code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return container.set_object_metadata(obj, metadata, clear=clear, prefix=prefix)
def set_object_metadata(self, container, obj, metadata, clear=False, extra_info=None, prefix=None)
Accepts a dictionary of metadata key/value pairs and updates the specified object metadata with them. If 'clear' is True, any existing metadata is deleted and only the passed metadata is retained. Otherwise, the values passed here update the object's metadata. 'extra_info; is an optional dictionary which will be populated with 'status', 'reason', and 'headers' keys from the underlying swiftclient call. By default, the standard object metadata prefix ('X-Object-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string.
3.597867
6.509856
0.55268
return self.manager.fetch(obj=self, include_meta=include_meta, chunk_size=chunk_size)
def get(self, include_meta=False, chunk_size=None)
Fetches the object from storage. If 'include_meta' is False, only the bytes representing the file is returned. Note: if 'chunk_size' is defined, you must fully read the object's contents before making another request. When 'include_meta' is True, what is returned from this method is a 2-tuple: Element 0: a dictionary containing metadata about the file. Element 1: a stream of bytes representing the object's contents.
4.943524
5.38529
0.917968
return self.manager.download(self, directory, structure=structure)
def download(self, directory, structure=True)
Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters.
6.140723
8.93663
0.687141
return self.container.copy_object(self, new_container, new_obj_name=new_obj_name)
def copy(self, new_container, new_obj_name=None, extra_info=None)
Copies this object to the new container, optionally giving it a new name. If you copy to the same container, you must supply a different name.
4.159849
3.624659
1.147652
return self.container.move_object(self, new_container, new_obj_name=new_obj_name)
def move(self, new_container, new_obj_name=None, extra_info=None)
Works just like copy_object, except that this object is deleted after a successful copy. This means that this storage_object reference will no longer be valid.
3.994631
3.558875
1.122442
self.container.change_object_content_type(self, new_ctype=new_ctype, guess=guess)
def change_content_type(self, new_ctype, guess=False)
Copies object to itself, but applies a new content-type. The guess feature requires the container to be CDN-enabled. If not then the content-type must be supplied. If using guess with a CDN-enabled container, new_ctype can be set to None. Failure during the put will result in a swift exception.
5.78123
4.347228
1.329866
self.manager.remove_metadata_key(self, key, prefix=prefix)
def remove_metadata_key(self, key, prefix=None)
Removes the specified key from the storage object's metadata. If the key does not exist in the metadata, nothing is done.
4.695585
4.943568
0.949837
return self.container.get_temp_url(self, seconds=seconds, method=method)
def get_temp_url(self, seconds, method="GET")
Returns a URL that can be used to access this object. The URL will expire after `seconds` seconds. The only methods supported are GET and PUT. Anything else will raise an InvalidTemporaryURLMethod exception.
6.724138
6.839798
0.98309
name = utils.get_name(obj) uri = "/%s/%s" % (self.uri_base, name) resp, resp_body = self.api.method_head(uri) hdrs = resp.headers try: content_length = int(hdrs.get("content-length")) except (TypeError, ValueError): content_length = None data = {"name": name, "bytes": content_length, "content_type": hdrs.get("content-type"), "hash": hdrs.get("etag"), "last_modified": hdrs.get("last-modified"), "timestamp": hdrs.get("x-timestamp"), } return StorageObject(self, data, loaded=True)
def get(self, obj)
Gets the information about the specified object. This overrides the base behavior, since Swift uses HEAD to get information, and GET to download the object.
2.666875
2.416943
1.103409
# First make sure that there is a content source. if (data, file_or_path) == (None, None): raise exc.NoContentSpecified("You must specify either a file path, " "an open file-like object, or a stream of bytes when " "creating an object.") src = data if data else file_or_path if src is file_or_path: obj_name = _validate_file_or_path(file_or_path, obj_name) if not obj_name: raise exc.MissingName("No name for the object to be created has " "been specified, and none can be inferred from context") if chunk_size: chunked = True if chunked: chunk_size = chunk_size or DEFAULT_CHUNKSIZE headers = headers or {} if metadata: metadata = _massage_metakeys(metadata, OBJECT_META_PREFIX) headers = metadata if ttl is not None: headers["X-Delete-After"] = ttl if src is data: self._upload(obj_name, data, content_type, content_encoding, content_length, etag, chunked, chunk_size, headers) elif hasattr(file_or_path, "read"): self._upload(obj_name, file_or_path, content_type, content_encoding, content_length, etag, False, chunk_size, headers) else: # Need to wrap the call in a context manager with open(file_or_path, "rb") as ff: self._upload(obj_name, ff, content_type, content_encoding, content_length, etag, False, chunk_size, headers) if return_none: return return self.get(obj_name)
def create(self, file_or_path=None, data=None, obj_name=None, content_type=None, etag=None, content_encoding=None, content_length=None, ttl=None, chunked=False, metadata=None, chunk_size=None, headers=None, return_none=False)
Creates or replaces a storage object in this container. The content of the object can either be a stream of bytes (`data`), or a file on disk (`file_or_path`). The disk file can be either an open file-like object, or an absolute path to the file on disk. When creating object from a data stream, you must specify the name of the object to be created in the container via the `obj_name` parameter. When working with a file, though, if no `obj_name` value is specified, the file`s name will be used. You may optionally set the `content_type` and `content_encoding` parameters; pyrax will create the appropriate headers when the object is stored. If no `content_type` is specified, the object storage system will make an intelligent guess based on the content of the object. If the size of the file is known, it can be passed as `content_length`. If you wish for the object to be temporary, specify the time it should be stored in seconds in the `ttl` parameter. If this is specified, the object will be deleted after that number of seconds. If you wish to store a stream of data (i.e., where you don't know the total size in advance), set the `chunked` parameter to True, and omit the `content_length` and `etag` parameters. This allows the data to be streamed to the object in the container without having to be written to disk first.
2.828344
2.851002
0.992053
if content_type is not None: headers["Content-Type"] = content_type if content_encoding is not None: headers["Content-Encoding"] = content_encoding if isinstance(content, six.string_types): fsize = len(content) else: if chunked: fsize = None elif content_length is None: fsize = get_file_size(content) else: fsize = content_length if fsize is None or fsize <= MAX_FILE_SIZE: # We can just upload it as-is. return self._store_object(obj_name, content=content, etag=etag, chunked=chunked, chunk_size=chunk_size, headers=headers) # Files larger than MAX_FILE_SIZE must be segmented # and uploaded separately. num_segments = int(math.ceil(float(fsize) / MAX_FILE_SIZE)) digits = int(math.log10(num_segments)) + 1 # NOTE: This could be greatly improved with threading or other # async design. for segment in range(num_segments): sequence = str(segment + 1).zfill(digits) seg_name = "%s.%s" % (obj_name, sequence) with utils.SelfDeletingTempfile() as tmpname: with open(tmpname, "wb") as tmp: tmp.write(content.read(MAX_FILE_SIZE)) with open(tmpname, "rb") as tmp: # We have to calculate the etag for each segment etag = utils.get_checksum(tmp) self._store_object(seg_name, content=tmp, etag=etag, chunked=False, headers=headers) # Upload the manifest headers.pop("ETag", "") headers["X-Object-Manifest"] = "%s/%s." % (self.name, obj_name) self._store_object(obj_name, content=None, headers=headers)
def _upload(self, obj_name, content, content_type, content_encoding, content_length, etag, chunked, chunk_size, headers)
Handles the uploading of content, including working around the 5GB maximum file size.
2.485116
2.460336
1.010072
head_etag = headers.pop("ETag", "") if chunked: headers.pop("Content-Length", "") headers["Transfer-Encoding"] = "chunked" elif etag is None and content is not None: etag = utils.get_checksum(content) if etag: headers["ETag"] = etag if not headers.get("Content-Type"): headers["Content-Type"] = None uri = "/%s/%s" % (self.uri_base, obj_name) resp, resp_body = self.api.method_put(uri, data=content, headers=headers)
def _store_object(self, obj_name, content, etag=None, chunked=False, chunk_size=None, headers=None)
Handles the low-level creation of a storage object and the uploading of the contents of that object.
2.701857
2.803168
0.963858
uri = "/%s/%s" % (self.uri_base, utils.get_name(obj)) if chunk_size: # Need the total size of the object if not isinstance(obj, StorageObject): obj = self.get(obj) obj_size = obj.total_bytes return self._fetch_chunker(uri, chunk_size, size, obj_size) headers = {} if size: headers = {"Range": "bytes=0-%s" % size} resp, resp_body = self.api.method_get(uri, headers=headers, raw_content=True) if include_meta: meta_resp, meta_body = self.api.method_head(uri) return (meta_resp.headers, resp_body) return resp_body
def fetch(self, obj, include_meta=False, chunk_size=None, size=None, extra_info=None)
Fetches the object from storage. If 'include_meta' is False, only the bytes representing the stored object are returned. Note: if 'chunk_size' is defined, the 'include_meta' parameter is ignored. If 'size' is specified, only the first 'size' bytes of the object will be returned. If the object if smaller than 'size', the entire object is returned. When 'include_meta' is True, what is returned from this method is a 2-tuple: Element 0: a dictionary containing metadata about the file. Element 1: a stream of bytes representing the object's contents. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
3.249452
3.381046
0.961079
pos = 0 total_bytes = 0 size = size or obj_size max_size = min(size, obj_size) while True: endpos = min(obj_size, pos + chunk_size - 1) headers = {"Range": "bytes=%s-%s" % (pos, endpos)} resp, resp_body = self.api.method_get(uri, headers=headers, raw_content=True) pos = endpos + 1 if not resp_body: # End of file return yield resp_body total_bytes += len(resp_body) if total_bytes >= max_size: return
def _fetch_chunker(self, uri, chunk_size, size, obj_size)
Returns a generator that returns an object in chunks.
2.799528
2.702543
1.035887
if nms is None: nms = self.api.list_object_names(self.name, full_listing=True) return self.api.bulk_delete(self.name, nms, async_=async_)
def delete_all_objects(self, nms, async_=False)
Deletes all objects from this container. By default the call will block until all objects have been deleted. By passing True for the 'async_' parameter, this method will not block, and instead return an object that can be used to follow the progress of the deletion. When deletion is complete the bulk deletion object's 'results' attribute will be populated with the information returned from the API call. In synchronous mode this is the value that is returned when the call completes. It is a dictionary with the following keys: deleted - the number of objects deleted not_found - the number of objects not found status - the HTTP return status code. '200 OK' indicates success errors - a list of any errors returned by the bulk delete call
3.43675
3.883682
0.884921
if not os.path.isdir(directory): raise exc.FolderNotFound("The directory '%s' does not exist." % directory) obj_name = utils.get_name(obj) path, fname = os.path.split(obj_name) if structure: fullpath = os.path.join(directory, path) if not os.path.exists(fullpath): os.makedirs(fullpath) target = os.path.join(fullpath, fname) else: target = os.path.join(directory, fname) with open(target, "wb") as dl: content = self.fetch(obj) try: dl.write(content) except UnicodeEncodeError: encoding = pyrax.get_encoding() dl.write(content.encode(encoding))
def download(self, obj, directory, structure=True)
Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters.
2.478063
2.451976
1.010639
cname = utils.get_name(self.container) oname = utils.get_name(obj) headers = {} if email_addresses: email_addresses = utils.coerce_to_list(email_addresses) headers["X-Purge-Email"] = ", ".join(email_addresses) uri = "/%s/%s" % (cname, oname) resp, resp_body = self.api.cdn_request(uri, method="DELETE", headers=headers)
def purge(self, obj, email_addresses=None)
Removes a CDN-enabled object from public access before the TTL expires. Please note that there is a limit (at this time) of 25 such requests; if you need to purge more than that, you must contact support. If one or more email_addresses are included, an email confirming the purge is sent to each address.
3.360952
3.681709
0.912878
uri = "/%s/%s" % (self.uri_base, utils.get_name(obj)) resp, resp_body = self.api.method_head(uri) ret = {} # Add the metadata prefix, if needed. if prefix is None: prefix = OBJECT_META_PREFIX low_prefix = prefix.lower() for hkey, hval in list(resp.headers.items()): lowkey = hkey.lower() if lowkey.startswith(low_prefix): cleaned = hkey.replace(low_prefix, "").replace("-", "_") ret[cleaned] = hval return ret
def get_metadata(self, obj, prefix=None)
Returns the metadata for the specified object as a dict.
3.835661
3.565875
1.075658
# Add the metadata prefix, if needed. if prefix is None: prefix = OBJECT_META_PREFIX massaged = _massage_metakeys(metadata, prefix) cname = utils.get_name(self.container) oname = utils.get_name(obj) new_meta = {} # Note that the API for object POST is the opposite of that for # container POST: for objects, all current metadata is deleted, # whereas for containers you need to set the values to an empty # string to delete them. if not clear: obj_meta = self.get_metadata(obj, prefix=prefix) new_meta = _massage_metakeys(obj_meta, prefix) utils.case_insensitive_update(new_meta, massaged) # Remove any empty values, since the object metadata API will # store them. to_pop = [] for key, val in six.iteritems(new_meta): if not val: to_pop.append(key) for key in to_pop: new_meta.pop(key) uri = "/%s/%s" % (cname, oname) resp, resp_body = self.api.method_post(uri, headers=new_meta)
def set_metadata(self, obj, metadata, clear=False, prefix=None)
Accepts a dictionary of metadata key/value pairs and updates the specified object metadata with them. If 'clear' is True, any existing metadata is deleted and only the passed metadata is retained. Otherwise, the values passed here update the object's metadata. By default, the standard object metadata prefix ('X-Object-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string.
3.830444
3.913226
0.978846
meta_dict = {key: ""} return self.set_metadata(obj, meta_dict)
def remove_metadata_key(self, obj, key)
Removes the specified key from the object's metadata. If the key does not exist in the metadata, nothing is done.
5.975452
5.733942
1.04212
ident = self.identity cdn_svc = ident.services.get("object_cdn") if cdn_svc: ep = cdn_svc.endpoints.get(self.region_name) if ep: self.cdn_management_url = ep.public_url
def _configure_cdn(self)
Initialize CDN-related endpoints, if available.
5.005318
4.361776
1.147541
self.list_containers = self.list_container_names self.get_all_containers = self.list self.get_container = self.get self.create_container = self.create self.delete_container = self.delete self.get_container_objects = self.list_container_objects self.get_container_object_names = self.list_container_object_names self.get_info = self.get_account_info
def _backwards_aliases(self)
In order to keep this backwards-compatible with previous versions, alias the old names to the new methods.
2.761735
2.374213
1.163221
if isinstance(item, six.string_types): item = super(StorageClient, self).get(item) return item
def get(self, item)
Returns the container whose name is provided as 'item'. If 'item' is not a string, the original item is returned unchanged.
4.897025
5.017166
0.976054
self._manager = ContainerManager(self, resource_class=Container, response_key="", uri_base="")
def _configure_manager(self)
Creates a manager to handle interacting with Containers.
24.98414
11.66826
2.141205
headers = self._manager.get_account_headers() acct_prefix = "x-account-" meta_prefix = ACCOUNT_META_PREFIX.lower() ret = {} for hkey, hval in list(headers.items()): lowkey = hkey.lower() if lowkey.startswith(acct_prefix): if not lowkey.startswith(meta_prefix): cleaned = hkey.replace(acct_prefix, "").replace("-", "_") try: # Most values are ints ret[cleaned] = int(hval) except ValueError: ret[cleaned] = hval return ret
def get_account_details(self)
Returns a dictionary containing information about the account.
3.619965
3.498049
1.034853
headers = self._manager.get_account_headers() return (headers.get("x-account-container-count"), headers.get("x-account-bytes-used"))
def get_account_info(self)
Returns a tuple for the number of containers and total bytes in the account.
5.769352
3.834273
1.504679
return self._manager.set_account_metadata(metadata, clear=clear, prefix=prefix)
def set_account_metadata(self, metadata, clear=False, prefix=None, extra_info=None)
Accepts a dictionary of metadata key/value pairs and updates the account's metadata with them. If 'clear' is True, any existing metadata is deleted and only the passed metadata is retained. Otherwise, the values passed here update the account's metadata. By default, the standard account metadata prefix ('X-Account-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
4.738512
7.30611
0.648568
meta = self._cached_temp_url_key if not cached or not meta: key = "temp_url_key" meta = self.get_account_metadata().get(key) self._cached_temp_url_key = meta return meta
def get_temp_url_key(self, cached=True)
Returns the current TempURL key, or None if it has not been set. By default the value returned is cached. To force an API call to get the current value on the server, pass `cached=False`.
3.767442
3.62663
1.038827
if key is None: key = uuid.uuid4().hex meta = {"Temp-Url-Key": key} self.set_account_metadata(meta) self._cached_temp_url_key = key
def set_temp_url_key(self, key=None)
Sets the key for the Temporary URL for the account. It should be a key that is secret to the owner. If no key is provided, a UUID value will be generated and used. It can later be obtained by calling get_temp_url_key().
3.976249
4.084096
0.973593
return self._manager.get_temp_url(container, obj, seconds, method=method, key=key, cached=cached)
def get_temp_url(self, container, obj, seconds, method="GET", key=None, cached=True)
Given a storage object in a container, returns a URL that can be used to access that object. The URL will expire after `seconds` seconds. The only methods supported are GET and PUT. Anything else will raise an `InvalidTemporaryURLMethod` exception. If you have your Temporary URL key, you can pass it in directly and potentially save an API call to retrieve it. If you don't pass in the key, and don't wish to use any cached value, pass `cached=False`.
2.795475
3.900559
0.716686
return self._manager.list(limit=limit, marker=marker, end_marker=end_marker, prefix=prefix)
def list(self, limit=None, marker=None, end_marker=None, prefix=None)
List the containers in this account, using the parameters to control the pagination of containers, since by default only the first 10,000 containers are returned.
3.075173
3.621628
0.849113
return self._manager.make_public(container, ttl=ttl)
def make_container_public(self, container, ttl=None)
Enables CDN access for the specified container, and optionally sets the TTL for the container.
6.633934
10.320889
0.642768
return self._manager.list_containers_info(limit=limit, marker=marker)
def list_containers_info(self, limit=None, marker=None)
Returns a list of info on Containers. For each container, a dict containing the following keys is returned: \code name - the name of the container count - the number of objects in the container bytes - the total bytes in the container
4.032079
8.178046
0.493037
return self._manager.list_subdirs(container, limit=limit, marker=marker, prefix=prefix, delimiter=delimiter, full_listing=full_listing)
def list_container_subdirs(self, container, limit=None, marker=None, prefix=None, delimiter=None, full_listing=False)
Although you cannot nest directories, you can simulate a hierarchical structure within a single container by adding forward slash characters (/) in the object name. This method returns a list of all of these pseudo-subdirectories in the specified container.
2.437113
2.826604
0.862205
return self._manager.list_object_names(container, marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, full_listing=full_listing)
def list_container_object_names(self, container, limit=None, marker=None, prefix=None, delimiter=None, full_listing=False)
Returns the names of all the objects in the specified container, optionally limited by the pagination parameters.
2.511384
3.114272
0.806411
return self._manager.get_metadata(container, prefix=prefix)
def get_container_metadata(self, container, prefix=None)
Returns a dictionary containing the metadata for the container.
7.518631
6.615029
1.136598
return self._manager.set_metadata(container, metadata, clear=clear, prefix=prefix)
def set_container_metadata(self, container, metadata, clear=False, prefix=None)
Accepts a dictionary of metadata key/value pairs and updates the specified container metadata with them. If 'clear' is True, any existing metadata is deleted and only the passed metadata is retained. Otherwise, the values passed here update the container's metadata. By default, the standard container metadata prefix ('X-Container-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string.
4.172832
7.441653
0.56074
return self._manager.delete_metadata(container, prefix=prefix)
def delete_container_metadata(self, container, prefix=None)
Removes all of thethe container's metadata. By default, all metadata beginning with the standard container metadata prefix ('X-Container-Meta-') is removed. If you wish to remove all metadata beginning with a different prefix, you must specify that prefix.
6.922988
11.803676
0.586511
return self._manager.get_object_metadata(container, obj, prefix=prefix)
def get_object_metadata(self, container, obj, prefix=None)
Returns the metadata for the specified object as a dict.
4.140017
3.708154
1.116463
self.set_object_metadata(container, obj, {key: ""}, prefix=prefix)
def remove_object_metadata_key(self, container, obj, key, prefix=None)
Removes the specified key from the storage object's metadata. If the key does not exist in the metadata, nothing is done.
4.19534
4.410747
0.951163
if full_listing: return self._manager.object_listing_iterator(container, prefix=prefix) return self._manager.list_objects(container, limit=limit, marker=marker, prefix=prefix, delimiter=delimiter, end_marker=end_marker)
def list_container_objects(self, container, limit=None, marker=None, prefix=None, delimiter=None, end_marker=None, full_listing=False)
Return a list of StorageObjects representing the objects in the container. You can use the marker, end_marker, and limit params to handle pagination, and the prefix and delimiter params to filter the objects returned. Also, by default only the first 10,000 objects are returned; if you set full_listing to True, an iterator to return all the objects in the container is returned. In this case, only the 'prefix' parameter is used; if you specify any others, they are ignored.
2.82057
2.839997
0.99316
return self._manager.delete_object_in_seconds(cont, obj, seconds)
def delete_object_in_seconds(self, cont, obj, seconds, extra_info=None)
Sets the object in the specified container to be deleted after the specified number of seconds. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
4.513969
6.028794
0.748735
return self.create_object(container, obj_name=obj_name, data=data, content_type=content_type, etag=etag, content_encoding=content_encoding, ttl=ttl, return_none=return_none, chunk_size=chunk_size, headers=headers, metadata=metadata)
def store_object(self, container, obj_name, data, content_type=None, etag=None, content_encoding=None, ttl=None, return_none=False, chunk_size=None, headers=None, metadata=None, extra_info=None)
Creates a new object in the specified container, and populates it with the given data. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
1.58558
1.751278
0.905384
return self.create_object(container, file_or_path=file_or_path, obj_name=obj_name, content_type=content_type, etag=etag, content_encoding=content_encoding, ttl=ttl, headers=headers, metadata=metadata, return_none=return_none)
def upload_file(self, container, file_or_path, obj_name=None, content_type=None, etag=None, content_encoding=None, ttl=None, content_length=None, return_none=False, headers=None, metadata=None, extra_info=None)
Uploads the specified file to the container. If no name is supplied, the file's name will be used. Either a file path or an open file-like object may be supplied. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. You may optionally set the `content_type` and `content_encoding` parameters; pyrax will create the appropriate headers when the object is stored. If the size of the file is known, it can be passed as `content_length`. If you wish for the object to be temporary, specify the time it should be stored in seconds in the `ttl` parameter. If this is specified, the object will be deleted after that number of seconds. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
1.812768
2.138544
0.847665
return self._manager.fetch_object(container, obj, include_meta=include_meta, chunk_size=chunk_size, size=size)
def fetch_object(self, container, obj, include_meta=False, chunk_size=None, size=None, extra_info=None)
Fetches the object from storage. If 'include_meta' is False, only the bytes representing the stored object are returned. Note: if 'chunk_size' is defined, you must fully read the object's contents before making another request. If 'size' is specified, only the first 'size' bytes of the object will be returned. If the object if smaller than 'size', the entire object is returned. When 'include_meta' is True, what is returned from this method is a 2-tuple: Element 0: a dictionary containing metadata about the file. Element 1: a stream of bytes representing the object's contents. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
2.567607
3.684901
0.696791
return self._manager.fetch_partial(container, obj, size)
def fetch_partial(self, container, obj, size)
Returns the first 'size' bytes of an object. If the object is smaller than the specified 'size' value, the entire object is returned.
5.037082
5.515052
0.913334
if chunk_size is None: chunk_size = DEFAULT_CHUNKSIZE class FetchChunker(object): def __init__(self, gen, verbose=False): self.gen = gen self.verbose = verbose self.processed = 0 self.count = 0 self.interval = 100 def read(self, size=None): self.count += 1 if self.verbose: if self.count > self.interval: self.count = 0 print(".") ret = next(self.gen) self.processed += len(ret) return ret parts = self.get_container_objects(container, prefix=name) fetches = [(part.name, self.fetch_object(container, part.name, chunk_size=chunk_size)) for part in parts if part.name != name] job = [(fetch[0], FetchChunker(fetch[1], verbose=verbose)) for fetch in fetches] return job
def fetch_dlo(self, container, name, chunk_size=None, verbose=False)
Returns a list of 2-tuples in the form of (object_name, fetch_generator) representing the components of a multi-part DLO (Dynamic Large Object). Each fetch_generator object can be interated to retrieve its contents. This is useful when transferring a DLO from one object storage system to another. Examples would be copying DLOs from one region of a provider to another, or copying a DLO from one provider to another.
2.706154
2.67427
1.011922
return self._manager.download_object(container, obj, directory, structure=structure)
def download_object(self, container, obj, directory, structure=True)
Fetches the object from storage, and writes it to the specified directory. The directory must exist before calling this method. If the object name represents a nested folder structure, such as "foo/bar/baz.txt", that folder structure will be created in the target directory by default. If you do not want the nested folders to be created, pass `structure=False` in the parameters.
4.830221
7.074878
0.682728
return self._manager.delete(container, del_objects=del_objects)
def delete(self, container, del_objects=False)
Deletes the specified container. If the container contains objects, the command will fail unless 'del_objects' is passed as True. In that case, each object will be deleted first, and then the container.
5.171642
5.729047
0.902705
return self._manager.copy_object(container, obj, new_container, new_obj_name=new_obj_name, content_type=content_type)
def copy_object(self, container, obj, new_container, new_obj_name=None, content_type=None, extra_info=None)
Copies the object to the new container, optionally giving it a new name. If you copy to the same container, you must supply a different name. You can optionally change the content_type of the object by supplying that in the 'content_type' parameter. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
2.284712
2.844857
0.803102
return self._manager.move_object(container, obj, new_container, new_obj_name=new_obj_name, new_reference=new_reference, content_type=content_type)
def move_object(self, container, obj, new_container, new_obj_name=None, new_reference=False, content_type=None, extra_info=None)
Works just like copy_object, except that the source object is deleted after a successful copy. You can optionally change the content_type of the object by supplying that in the 'content_type' parameter. NOTE: any references to the original object will no longer be valid; you will have to get a reference to the new object by passing True for the 'new_reference' parameter. When this is True, a reference to the newly moved object is returned. Otherwise, the etag for the moved object is returned. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
1.994057
2.419739
0.824079
return self._manager.change_object_content_type(container, obj, new_ctype, guess=guess)
def change_object_content_type(self, container, obj, new_ctype, guess=False, extra_info=None)
Copies object to itself, but applies a new content-type. The guess feature requires the container to be CDN-enabled. If not then the content-type must be supplied. If using guess with a CDN-enabled container, new_ctype can be set to None. Failure during the put will result in a swift exception. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more.
3.466753
4.818812
0.719421
if not os.path.isdir(folder_path): raise exc.FolderNotFound("No such folder: '%s'" % folder_path) ignore = utils.coerce_to_list(ignore) total_bytes = utils.folder_size(folder_path, ignore) upload_key = str(uuid.uuid4()) self.folder_upload_status[upload_key] = {"continue": True, "total_bytes": total_bytes, "uploaded": 0, } self._upload_folder_in_background(folder_path, container, ignore, upload_key, ttl) return (upload_key, total_bytes)
def upload_folder(self, folder_path, container=None, ignore=None, ttl=None)
Convenience method for uploading an entire folder, including any sub-folders, to Cloud Files. All files will be uploaded to objects with the same name as the file. In the case of nested folders, files will be named with the full path relative to the base folder. E.g., if the folder you specify contains a folder named 'docs', and 'docs' contains a file named 'install.html', that file will be uploaded to an object named 'docs/install.html'. If 'container' is specified, the folder's contents will be uploaded to that container. If it is not specified, a new container with the same name as the specified folder will be created, and the files uploaded to this new container. You can selectively ignore files by passing either a single pattern or a list of patterns; these will be applied to the individual folder and file names, and any names that match any of the 'ignore' patterns will not be uploaded. The patterns should be standard *nix-style shell patterns; e.g., '*pyc' will ignore all files ending in 'pyc', such as 'program.pyc' and 'abcpyc'. The upload will happen asynchronously; in other words, the call to upload_folder() will generate a UUID and return a 2-tuple of (UUID, total_bytes) immediately. Uploading will happen in the background; your app can call get_uploaded(uuid) to get the current status of the upload. When the upload is complete, the value returned by get_uploaded(uuid) will match the total_bytes for the upload. If you start an upload and need to cancel it, call cancel_folder_upload(uuid), passing the uuid returned by the initial call. It will then be up to you to either keep or delete the partially-uploaded content. If you specify a `ttl` parameter, the uploaded files will be deleted after that number of seconds.
3.144237
3.322566
0.946328
uploader = FolderUploader(folder_path, container, ignore, upload_key, self, ttl=ttl) uploader.start()
def _upload_folder_in_background(self, folder_path, container, ignore, upload_key, ttl=None)
Runs the folder upload in the background.
3.463044
3.121858
1.109289
cont = self.get_container(container) self._local_files = [] # Load a list of all the remote objects so we don't have to keep # hitting the service if verbose: log = logging.getLogger("pyrax") log.info("Loading remote object list (prefix=%s)", object_prefix) data = cont.get_objects(prefix=object_prefix, full_listing=True) self._remote_files = dict((d.name, d) for d in data) self._sync_summary = {"total": 0, "uploaded": 0, "ignored": 0, "older": 0, "duplicate": 0, "failed": 0, "failure_reasons": [], "deleted": 0, } self._sync_folder_to_container(folder_path, cont, prefix="", delete=delete, include_hidden=include_hidden, ignore=ignore, ignore_timestamps=ignore_timestamps, object_prefix=object_prefix, verbose=verbose) # Unset the _remote_files self._remote_files = None if verbose: # Log the summary summary = self._sync_summary log.info("Folder sync completed at %s" % time.ctime()) log.info(" Total files processed: %s" % summary["total"]) log.info(" Number Uploaded: %s" % summary["uploaded"]) log.info(" Number Ignored: %s" % summary["ignored"]) log.info(" Number Skipped (older): %s" % summary["older"]) log.info(" Number Skipped (dupe): %s" % summary["duplicate"]) log.info(" Number Deleted: %s" % summary["deleted"]) log.info(" Number Failed: %s" % summary["failed"]) if summary["failed"]: for reason in summary["failure_reasons"]: log.info(" Reason: %s" % reason)
def sync_folder_to_container(self, folder_path, container, delete=False, include_hidden=False, ignore=None, ignore_timestamps=False, object_prefix="", verbose=False)
Compares the contents of the specified folder, and checks to make sure that the corresponding object is present in the specified container. If there is no remote object matching the local file, it is created. If a matching object exists, the etag is examined to determine if the object in the container matches the local file; if they differ, the container is updated with the local file if the local file is newer when `ignore_timestamps' is False (default). If `ignore_timestamps` is True, the object is overwritten with the local file contents whenever the etags differ. NOTE: the timestamp of a remote object is the time it was uploaded, not the original modification time of the file stored in that object. Unless 'include_hidden' is True, files beginning with an initial period are ignored. If the 'delete' option is True, any objects in the container that do not have corresponding files in the local folder are deleted. You can selectively ignore files by passing either a single pattern or a list of patterns; these will be applied to the individual folder and file names, and any names that match any of the 'ignore' patterns will not be uploaded. The patterns should be standard *nix-style shell patterns; e.g., '*pyc' will ignore all files ending in 'pyc', such as 'program.pyc' and 'abcpyc'. If `object_prefix` is set it will be appended to the object name when it is checked and uploaded to the container. For example, if you use sync_folder_to_container("folderToSync/", myContainer, object_prefix="imgFolder") it will upload the files to the container/imgFolder/... instead of just container/... Set `verbose` to True to make it print what is going on. It will show which files are being uploaded and which ones are not and why.
2.564319
2.665746
0.961952
fnames = os.listdir(folder_path) ignore = utils.coerce_to_list(ignore) log = logging.getLogger("pyrax") if not include_hidden: ignore.append(".*") for fname in fnames: if utils.match_pattern(fname, ignore): self._sync_summary["ignored"] += 1 continue pth = os.path.join(folder_path, fname) if os.path.isdir(pth): subprefix = fname if prefix: subprefix = os.path.join(prefix, subprefix) self._sync_folder_to_container(pth, container, prefix=subprefix, delete=delete, include_hidden=include_hidden, ignore=ignore, ignore_timestamps=ignore_timestamps, object_prefix=object_prefix, verbose=verbose) continue self._local_files.append(os.path.join(object_prefix, prefix, fname)) local_etag = utils.get_checksum(pth) if object_prefix: prefix = os.path.join(object_prefix, prefix) object_prefix = "" fullname_with_prefix = os.path.join(prefix, fname) try: obj = self._remote_files[fullname_with_prefix] obj_etag = obj.etag except KeyError: obj = None obj_etag = None if local_etag != obj_etag: if not ignore_timestamps: if obj: obj_time_str = obj.last_modified[:19] else: obj_time_str = EARLY_DATE_STR local_mod = datetime.datetime.utcfromtimestamp( os.stat(pth).st_mtime) local_mod_str = local_mod.isoformat() if obj_time_str >= local_mod_str: # Remote object is newer self._sync_summary["older"] += 1 if verbose: log.info("%s NOT UPLOADED because remote object is " "newer", fullname_with_prefix) log.info(" Local: %s Remote: %s" % ( local_mod_str, obj_time_str)) continue try: container.upload_file(pth, obj_name=fullname_with_prefix, etag=local_etag, return_none=True) self._sync_summary["uploaded"] += 1 if verbose: log.info("%s UPLOADED", fullname_with_prefix) except Exception as e: # Record the failure, and move on self._sync_summary["failed"] += 1 self._sync_summary["failure_reasons"].append("%s" % e) if verbose: log.error("%s UPLOAD FAILED. Exception: %s" % (fullname_with_prefix, e)) else: self._sync_summary["duplicate"] += 1 if verbose: log.info("%s NOT UPLOADED because it already exists", fullname_with_prefix) if delete and not prefix: self._delete_objects_not_in_list(container, object_prefix)
def _sync_folder_to_container(self, folder_path, container, prefix, delete, include_hidden, ignore, ignore_timestamps, object_prefix, verbose)
This is the internal method that is called recursively to handle nested folder structures.
2.341596
2.347029
0.997685
objnames = set(cont.get_object_names(prefix=object_prefix, full_listing=True)) localnames = set(self._local_files) to_delete = list(objnames.difference(localnames)) self._sync_summary["deleted"] += len(to_delete) # We don't need to wait around for this to complete. Store the thread # reference in case it is needed at some point. self._thread = self.bulk_delete(cont, to_delete, async_=True)
def _delete_objects_not_in_list(self, cont, object_prefix="")
Finds all the objects in the specified container that are not present in the self._local_files list, and deletes them.
6.054939
5.148809
1.175988
deleter = BulkDeleter(self, container, object_names) deleter.start() if async_: return deleter while not deleter.completed: time.sleep(self.bulk_delete_interval) return deleter.results
def bulk_delete(self, container, object_names, async_=False)
Deletes multiple objects from a container in a single call. The bulk deletion call does not return until all of the specified objects have been processed. For large numbers of objects, this can take quite a while, so there is an 'async_' parameter to give you the option to have this call return immediately. If 'async_' is True, an object is returned with a 'completed' attribute that will be set to True as soon as the bulk deletion is complete, and a 'results' attribute that will contain a dictionary (described below) with the results of the bulk deletion. When deletion is complete the bulk deletion object's 'results' attribute will be populated with the information returned from the API call. In synchronous mode this is the value that is returned when the call completes. It is a dictionary with the following keys: deleted - the number of objects deleted not_found - the number of objects not found status - the HTTP return status code. '200 OK' indicates success errors - a list of any errors returned by the bulk delete call This isn't available in swiftclient yet, so it's using code patterned after the client code in that library.
2.998366
3.26588
0.918088
if not self.cdn_management_url: raise exc.NotCDNEnabled("CDN is not enabled for this service.") cdn_uri = "%s%s" % (self.cdn_management_url, uri) mthd = self.method_dict.get(method.upper()) try: resp, resp_body = mthd(cdn_uri, *args, **kwargs) except exc.NotFound as e: # This could be due to either the container does not exist, or that # the container exists but is not CDN-enabled. try: mgt_uri = "%s%s" % (self.management_url, uri) resp, resp_body = self.method_head(mgt_uri) except exc.NotFound: raise raise exc.NotCDNEnabled("This container is not CDN-enabled.") return resp, resp_body
def cdn_request(self, uri, method, *args, **kwargs)
If the service supports CDN, use this method to access CDN-specific URIs.
3.138474
3.05296
1.02801
if utils.match_pattern(dirname, self.ignore): return False good_names = (nm for nm in fnames if not utils.match_pattern(nm, self.ignore)) for fname in good_names: if self.client._should_abort_folder_upload(self.upload_key): return full_path = os.path.join(dirname, fname) obj_name = os.path.relpath(full_path, self.root_folder) obj_size = os.stat(full_path).st_size self.client.upload_file(self.container, full_path, obj_name=obj_name, return_none=True, ttl=self.ttl) self.client._update_progress(self.upload_key, obj_size)
def upload_files_in_folder(self, dirname, fnames)
Handles the iteration across files within a folder.
3.565443
3.446651
1.034466
root_path, folder_name = os.path.split(self.root_folder) self.root_folder = os.path.join(root_path, folder_name) for dirname, _, fnames in os.walk(self.root_folder): self.upload_files_in_folder(dirname, fnames)
def run(self)
Starts the uploading thread.
3.311018
3.067882
1.079252
for num, option in enumerate(options): if attr: print("%s: %s" % (num, getattr(option, attr))) else: print("%s: %s" % (num, option)) # Add an escape option escape_opt = num + 1 print("%s: I want to exit!" % escape_opt) choice = six.moves.input("Selection: ") try: ichoice = int(choice) if ichoice > escape_opt: raise ValueError except ValueError: print("Valid entries are the numbers 0-%s. Received '%s'." % (escape_opt, choice)) sys.exit() if ichoice == escape_opt: print("Bye!") sys.exit() return ichoice
def option_chooser(options, attr=None)
Given an iterable, enumerate its contents for a user to choose from. If the optional `attr` is not None, that attribute in each iterated object will be printed. This function will exit the program if the user chooses the escape option.
3.468849
3.413865
1.016106
@wraps(fnc) def _wrapped(self, queue, *args, **kwargs): if not isinstance(queue, Queue): # Must be the ID queue = self._manager.get(queue) return fnc(self, queue, *args, **kwargs) return _wrapped
def assure_queue(fnc)
Converts a queue ID or name passed as the 'queue' parameter to a Queue object.
2.972323
2.585547
1.149592
return self._message_manager.delete(msg_id, claim_id=claim_id)
def delete_message(self, msg_id, claim_id=None)
Deletes the message whose ID matches the supplied msg_id from the specified queue. If the message has been claimed, the ID of that claim must be passed as the 'claim_id' parameter.
4.376747
5.642648
0.775655
return self._message_manager.list(include_claimed=include_claimed, echo=echo, marker=marker, limit=limit)
def list(self, include_claimed=False, echo=False, marker=None, limit=None)
Returns a list of messages for this queue. By default only unclaimed messages are returned; if you want claimed messages included, pass `include_claimed=True`. Also, the requester's own messages are not returned by default; if you want them included, pass `echo=True`. The 'marker' and 'limit' parameters are used to control pagination of results. 'Marker' is the ID of the last message returned, while 'limit' controls the number of messages returned per reuqest (default=20).
3.796596
4.366702
0.869443
if not isinstance(claim, QueueClaim): claim = self._claim_manager.get(claim) return claim.messages
def list_by_claim(self, claim)
Returns a list of all the messages from this queue that have been claimed by the specified claim. The claim can be either a claim ID or a QueueClaim object.
7.611574
4.526918
1.681403
return self._claim_manager.claim(ttl, grace, count=count)
def claim_messages(self, ttl, grace, count=None)
Claims up to `count` unclaimed messages from this queue. If count is not specified, the default is to claim 10 messages. The `ttl` parameter specifies how long the server should wait before releasing the claim. The ttl value MUST be between 60 and 43200 seconds. The `grace` parameter is the message grace period in seconds. The value of grace MUST be between 60 and 43200 seconds. The server extends the lifetime of claimed messages to be at least as long as the lifetime of the claim itself, plus a specified grace period to deal with crashed workers (up to 1209600 or 14 days including claim lifetime). If a claimed message would normally live longer than the grace period, its expiration will not be adjusted. Returns a QueueClaim object, whose 'messages' attribute contains the list of QueueMessage objects representing the claimed messages.
6.941947
10.530852
0.659201
return self._claim_manager.update(claim, ttl=ttl, grace=grace)
def update_claim(self, claim, ttl=None, grace=None)
Updates the specified claim with either a new TTL or grace period, or both.
4.738563
5.867446
0.807602
super(QueueMessage, self)._add_details(info) if self.href is None: return parsed = urllib.parse.urlparse(self.href) self.id = parsed.path.rsplit("/", 1)[-1] query = parsed.query if query: self.claim_id = query.split("claim_id=")[-1]
def _add_details(self, info)
The 'id' and 'claim_id' attributes are not supplied directly, but included as part of the 'href' value.
3.685663
2.768816
1.331133
msg_dicts = info.pop("messages", []) super(QueueClaim, self)._add_details(info) parsed = urllib.parse.urlparse(self.href) self.id = parsed.path.rsplit("/", 1)[-1] self.messages = [QueueMessage(self.manager._message_manager, item) for item in msg_dicts]
def _add_details(self, info)
The 'id' attribute is not supplied directly, but included as part of the 'href' value. Also, convert the dicts for messages into QueueMessage objects.
5.417894
3.91975
1.382204
return self._iterate_list(include_claimed=include_claimed, echo=echo, marker=marker, limit=limit)
def list(self, include_claimed=False, echo=False, marker=None, limit=None)
Need to form the URI differently, so we can't use the default list().
4.047204
3.606021
1.122346
ret = [] if limit is None: this_limit = MSG_LIMIT else: this_limit = min(MSG_LIMIT, limit) limit = limit - this_limit uri = "/%s?include_claimed=%s&echo=%s" % (self.uri_base, json.dumps(include_claimed), json.dumps(echo)) qs_parts = [] if marker is not None: qs_parts.append("marker=%s" % marker) if this_limit is not None: qs_parts.append("limit=%s" % this_limit) if qs_parts: uri = "%s&%s" % (uri, "&".join(qs_parts)) resp, resp_body = self._list(uri, return_raw=True) if not resp_body: return ret messages = resp_body.get(self.plural_response_key, []) ret = [QueueMessage(manager=self, info=item) for item in messages] marker = _parse_marker(resp_body) loop = 0 if ((limit is None) or limit > 0) and marker: loop += 1 ret.extend(self._iterate_list(include_claimed, echo, marker, limit)) return ret
def _iterate_list(self, include_claimed, echo, marker, limit)
Recursive method to work around the hard limit of 10 items per call.
2.763192
2.759976
1.001165
msg_id = utils.get_id(msg) if claim_id: uri = "/%s/%s?claim_id=%s" % (self.uri_base, msg_id, claim_id) else: uri = "/%s/%s" % (self.uri_base, msg_id) return self._delete(uri)
def delete(self, msg, claim_id=None)
Deletes the specified message from its queue. If the message has been claimed, the ID of that claim must be passed as the 'claim_id' parameter.
2.208487
2.235738
0.987811
ids = utils.coerce_to_list(ids) uri = "/%s?ids=%s" % (self.uri_base, ",".join(ids)) # The API is not consistent in how it returns message lists, so this # workaround is needed. curr_prkey = self.plural_response_key self.plural_response_key = "" # BROKEN: API returns a list, not a dict. ret = self._list(uri) self.plural_response_key = curr_prkey return ret
def list_by_ids(self, ids)
If you wish to retrieve a list of messages from this queue and know the IDs of those messages, you can pass in a list of those IDs, and only the matching messages will be returned. This avoids pulling down all the messages in a queue and filtering on the client side.
5.99085
5.994991
0.999309
ids = utils.coerce_to_list(ids) uri = "/%s?ids=%s" % (self.uri_base, ",".join(ids)) return self.api.method_delete(uri)
def delete_by_ids(self, ids)
Deletes the messages whose IDs are passed in from this queue.
3.796714
3.376478
1.12446
if count is None: qs = "" else: qs = "?limit=%s" % count uri = "/%s%s" % (self.uri_base, qs) body = {"ttl": ttl, "grace": grace, } resp, resp_body = self.api.method_post(uri, body=body) if resp.status_code == 204: # Nothing available to claim return None # Get the claim ID from the first message in the list. href = resp_body[0]["href"] claim_id = href.split("claim_id=")[-1] return self.get(claim_id)
def claim(self, ttl, grace, count=None)
Claims up to `count` unclaimed messages from this queue. If count is not specified, the default is to claim 10 messages. The `ttl` parameter specifies how long the server should wait before releasing the claim. The ttl value MUST be between 60 and 43200 seconds. The `grace` parameter is the message grace period in seconds. The value of grace MUST be between 60 and 43200 seconds. The server extends the lifetime of claimed messages to be at least as long as the lifetime of the claim itself, plus a specified grace period to deal with crashed workers (up to 1209600 or 14 days including claim lifetime). If a claimed message would normally live longer than the grace period, its expiration will not be adjusted. bReturns a QueueClaim object, whose 'messages' attribute contains the list of QueueMessage objects representing the claimed messages.
3.295189
3.443487
0.956934
body = {} if ttl is not None: body["ttl"] = ttl if grace is not None: body["grace"] = grace if not body: raise exc.MissingClaimParameters("You must supply a value for " "'ttl' or 'grace' when calling 'update()'") uri = "/%s/%s" % (self.uri_base, utils.get_id(claim)) resp, resp_body = self.api.method_patch(uri, body=body)
def update(self, claim, ttl=None, grace=None)
Updates the specified claim with either a new TTL or grace period, or both.
3.249983
3.406473
0.954061
if self.api.queue_exists(id_): return Queue(self, {"queue": {"name": id_, "id_": id_}}, key="queue") raise exc.NotFound("The queue '%s' does not exist." % id_)
def get(self, id_)
Need to customize, since Queues are not returned with normal response bodies.
6.48847
5.082392
1.276657
uri = "/%s/%s/stats" % (self.uri_base, utils.get_id(queue)) resp, resp_body = self.api.method_get(uri) return resp_body.get("messages")
def get_stats(self, queue)
Returns the message stats for the specified queue.
5.46012
4.09938
1.331938
uri = "/%s/%s/metadata" % (self.uri_base, utils.get_id(queue)) resp, resp_body = self.api.method_get(uri) return resp_body
def get_metadata(self, queue)
Returns the metadata for the specified queue.
4.740748
3.793853
1.249587
uri = "/%s/%s/metadata" % (self.uri_base, utils.get_id(queue)) if clear: curr = {} else: curr = self.get_metadata(queue) curr.update(metadata) resp, resp_body = self.api.method_put(uri, body=curr)
def set_metadata(self, queue, metadata, clear=False)
Accepts a dictionary and adds that to the specified queue's metadata. If the 'clear' argument is passed as True, any existing metadata is replaced with the new metadata.
3.439104
3.479112
0.9885
self._manager = QueueManager(self, resource_class=Queue, response_key="queue", uri_base="queues")
def _configure_manager(self)
Create the manager to handle queues.
23.502357
14.511688
1.619547
if self.client_id is None: self.client_id = os.environ.get("CLOUD_QUEUES_ID") if self.client_id: dct["Client-ID"] = self.client_id
def _add_custom_headers(self, dct)
Add the Client-ID header required by Cloud Queues
4.165482
2.283811
1.823917
try: return super(QueueClient, self)._api_request(uri, method, **kwargs) except exc.BadRequest as e: if ((e.code == "400") and (e.message == 'The "Client-ID" header is required.')): raise exc.QueueClientIDNotDefined("You must supply a client ID " "to work with Queue messages.") else: raise
def _api_request(self, uri, method, **kwargs)
Any request that involves messages must define the client ID. This handles all failures due to lack of client ID and raises the appropriate exception.
4.999063
4.335226
1.153126
try: queue = self._manager.head(name) return True except exc.NotFound: return False
def queue_exists(self, name)
Returns True or False, depending on the existence of the named queue.
8.381172
8.223995
1.019112
if self.queue_exists(name): raise exc.DuplicateQueue("The queue '%s' already exists." % name) return self._manager.create(name)
def create(self, name)
Cloud Queues works differently, in that they use the name as the ID for the resource. So for create(), we need to check if a queue by that name exists already, and raise an exception if it does. If not, create the queue and return a reference object for it.
5.465692
4.213337
1.297236
return self._manager.set_metadata(queue, metadata, clear=clear)
def set_metadata(self, queue, metadata, clear=False)
Accepts a dictionary and adds that to the specified queue's metadata. If the 'clear' argument is passed as True, any existing metadata is replaced with the new metadata.
5.205521
5.702772
0.912805
return queue.delete_message(msg_id, claim_id=claim_id)
def delete_message(self, queue, msg_id, claim_id=None)
Deletes the message whose ID matches the supplied msg_id from the specified queue. If the message has been claimed, the ID of that claim must be passed as the 'claim_id' parameter.
3.630536
3.684683
0.985305
return queue.list(include_claimed=include_claimed, echo=echo, marker=marker, limit=limit)
def list_messages(self, queue, include_claimed=False, echo=False, marker=None, limit=None)
Returns a list of messages for the specified queue. By default only unclaimed messages are returned; if you want claimed messages included, pass `include_claimed=True`. Also, the requester's own messages are not returned by default; if you want them included, pass `echo=True`. The 'marker' and 'limit' parameters are used to control pagination of results. 'Marker' is the ID of the last message returned, while 'limit' controls the number of messages returned per reuqest (default=20).
2.979385
4.370815
0.681654
return queue.claim_messages(ttl, grace, count=count)
def claim_messages(self, queue, ttl, grace, count=None)
Claims up to `count` unclaimed messages from the specified queue. If count is not specified, the default is to claim 10 messages. The `ttl` parameter specifies how long the server should wait before releasing the claim. The ttl value MUST be between 60 and 43200 seconds. The `grace` parameter is the message grace period in seconds. The value of grace MUST be between 60 and 43200 seconds. The server extends the lifetime of claimed messages to be at least as long as the lifetime of the claim itself, plus a specified grace period to deal with crashed workers (up to 1209600 or 14 days including claim lifetime). If a claimed message would normally live longer than the grace period, its expiration will not be adjusted. Returns a QueueClaim object, whose 'messages' attribute contains the list of QueueMessage objects representing the claimed messages.
5.572802
7.266302
0.766938
return queue.update_claim(claim, ttl=ttl, grace=grace)
def update_claim(self, queue, claim, ttl=None, grace=None)
Updates the specified claim with either a new TTL or grace period, or both.
3.822627
4.423171
0.864228
self._flavor_manager = CloudCDNFlavorManager(self, uri_base="flavors", resource_class=CloudCDNFlavor, response_key=None, plural_response_key="flavors") self._services_manager = CloudCDNServiceManager(self, uri_base="services", resource_class=CloudCDNService, response_key=None, plural_response_key="services")
def _configure_manager(self)
Creates the Manager instances to handle monitoring.
3.666284
3.387218
1.082388
return self._services_manager.list(limit=limit, marker=marker)
def list_services(self, limit=None, marker=None)
List CDN services.
5.97255
6.200324
0.963264
return self._services_manager.create(name, flavor_id, domains, origins, restrictions, caching)
def create_service(self, name, flavor_id, domains, origins, restrictions=None, caching=None)
Create a new CDN service. Arguments: name: The name of the service. flavor_id: The ID of the flavor to use for this service. domains: A list of dictionaries, each of which has a required key "domain" and optional key "protocol" (the default protocol is http). origins: A list of dictionaries, each of which has a required key "origin" which is the URL or IP address to pull origin content from. Optional keys include "port" to use a port other than the default of 80, and "ssl" to enable SSL, which is disabled by default. caching: An optional
4.023293
8.291578
0.485226
self._services_manager.delete_assets(service_id, url, all)
def delete_assets(self, service_id, url=None, all=False)
Delete CDN assets Arguments: service_id: The ID of the service to delete from. url: The URL at which to delete assets all: When True, delete all assets associated with the service_id. You cannot specifiy both url and all.
5.328862
6.053626
0.880276
matching_endpoints = [] # We don't always get a service catalog back ... if "serviceCatalog" not in self.catalog["access"]: return None # Full catalog ... catalog = self.catalog["access"]["serviceCatalog"] for service in catalog: if service.get("type") != service_type: continue endpoints = service["endpoints"] for endpoint in endpoints: if not filter_value or endpoint.get(attr) == filter_value: endpoint["serviceName"] = service.get("name") matching_endpoints.append(endpoint) if not matching_endpoints: raise exc.EndpointNotFound() elif len(matching_endpoints) > 1: raise exc.AmbiguousEndpoints(endpoints=matching_endpoints) else: return matching_endpoints[0][endpoint_type]
def url_for(self, attr=None, filter_value=None, service_type=None, endpoint_type="publicURL", service_name=None, volume_service_name=None)
Fetches the public URL from the given service for a particular endpoint attribute. If none given, returns the first. See tests for sample service catalog.
2.970479
2.896686
1.025475
return self.manager.get_usage(self, start=start, end=end)
def get_usage(self, start=None, end=None)
Return the usage records for this load balancer. You may optionally include a start datetime or an end datetime, or both, which will limit the records to those on or after the start time, and those before or on the end time. These times should be Python datetime.datetime objects, Python datetime.date objects, or strings in the format: "YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD".
4.694037
4.405926
1.065392
for (key, val) in six.iteritems(info): if key == "nodes": val = [Node(parent=self, **nd) for nd in val] elif key == "sessionPersistence": val = val['persistenceType'] elif key == "cluster": val = val['name'] elif key == "virtualIps": key = "virtual_ips" val = [VirtualIP(parent=self, **vip) for vip in val] setattr(self, key, val)
def _add_details(self, info)
Override the base behavior to add Nodes, VirtualIPs, etc.
3.575487
3.031291
1.179526
return self.manager.update(self, name=name, algorithm=algorithm, protocol=protocol, halfClosed=halfClosed, port=port, timeout=timeout, httpsRedirect=httpsRedirect)
def update(self, name=None, algorithm=None, protocol=None, halfClosed=None, port=None, timeout=None, httpsRedirect=None)
Provides a way to modify the following attributes of a load balancer: - name - algorithm - protocol - halfClosed - port - timeout - httpsRedirect
1.957939
1.957497
1.000226
return self.manager.update_node(node, diff=diff)
def update_node(self, node, diff=None)
Updates the node's attributes.
5.590097
5.266268
1.061491