_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q17900
assure_snapshot
train
def assure_snapshot(fnc): """ Converts a snapshot ID passed as the snapshot to a CloudBlockStorageSnapshot object. """ @wraps(fnc) def _wrapped(self, snapshot, *args, **kwargs): if not isinstance(snapshot, CloudBlockStorageSnapshot): # Must be the ID snapshot = self._snapshot_manager.get(snapshot) return fnc(self, snapshot, *args, **kwargs) return _wrapped
python
{ "resource": "" }
q17901
CloudBlockStorageSnapshot.delete
train
def delete(self): """ Adds a check to make sure that the snapshot is able to be deleted. """ if self.status not in ("available", "error"): raise exc.SnapshotNotAvailable("Snapshot must be in 'available' " "or 'error' status before deleting. Current status: %s" % self.status) # When there are more thann one snapshot for a given volume, attempting to # delete them all will throw a 409 exception. This will help by retrying # such an error once after a RETRY_INTERVAL second delay. try: super(CloudBlockStorageSnapshot, self).delete() except exc.ClientException as e: if "Request conflicts with in-progress 'DELETE" in str(e): time.sleep(RETRY_INTERVAL) # Try again; if it fails, oh, well... super(CloudBlockStorageSnapshot, self).delete()
python
{ "resource": "" }
q17902
CloudBlockStorageSnapshot.update
train
def update(self, display_name=None, display_description=None): """ Update the specified values on this snapshot. You may specify one or more values to update. If no values are specified as non-None, the call is a no-op; no exception will be raised. """ return self.manager.update(self, display_name=display_name, display_description=display_description)
python
{ "resource": "" }
q17903
CloudBlockStorageVolume.attach_to_instance
train
def attach_to_instance(self, instance, mountpoint): """ Attaches this volume to the cloud server instance at the specified mountpoint. This requires a call to the cloud servers API; it cannot be done directly. """ instance_id = _resolve_id(instance) try: resp = self._nova_volumes.create_server_volume(instance_id, self.id, mountpoint) except Exception as e: raise exc.VolumeAttachmentFailed("%s" % e)
python
{ "resource": "" }
q17904
CloudBlockStorageVolume.detach
train
def detach(self): """ Detaches this volume from any device it may be attached to. If it is not attached, nothing happens. """ attachments = self.attachments if not attachments: # Not attached; no error needed, just return return # A volume can only be attached to one device at a time, but for some # reason this is a list instead of a singular value att = attachments[0] instance_id = att["server_id"] attachment_id = att["id"] try: self._nova_volumes.delete_server_volume(instance_id, attachment_id) except Exception as e: raise exc.VolumeDetachmentFailed("%s" % e)
python
{ "resource": "" }
q17905
CloudBlockStorageVolume.create_snapshot
train
def create_snapshot(self, name=None, description=None, force=False): """ Creates a snapshot of this volume, with an optional name and description. Normally snapshots will not happen if the volume is attached. To override this default behavior, pass force=True. """ name = name or "" description = description or "" # Note that passing in non-None values is required for the _create_body # method to distinguish between this and the request to create and # instance. return self.manager.create_snapshot(volume=self, name=name, description=description, force=force)
python
{ "resource": "" }
q17906
CloudBlockStorageVolume.list_snapshots
train
def list_snapshots(self): """ Returns a list of all snapshots of this volume. """ return [snap for snap in self.manager.list_snapshots() if snap.volume_id == self.id]
python
{ "resource": "" }
q17907
CloudBlockStorageManager._create_body
train
def _create_body(self, name, size=None, volume_type=None, description=None, metadata=None, snapshot_id=None, clone_id=None, availability_zone=None, image=None): """ Used to create the dict required to create a new volume """ if not isinstance(size, six.integer_types): raise exc.InvalidSize("Volume sizes must be integers") if volume_type is None: volume_type = "SATA" if description is None: description = "" if metadata is None: metadata = {} if image is not None: image = utils.get_id(image) body = {"volume": { "size": size, "snapshot_id": snapshot_id, "source_volid": clone_id, "display_name": name, "display_description": description, "volume_type": volume_type, "metadata": metadata, "availability_zone": availability_zone, "imageRef": image, }} return body
python
{ "resource": "" }
q17908
CloudBlockStorageManager.create
train
def create(self, *args, **kwargs): """ Catches errors that may be returned, and raises more informational exceptions. """ try: return super(CloudBlockStorageManager, self).create(*args, **kwargs) except exc.BadRequest as e: msg = e.message if "Clones currently must be >= original volume size" in msg: raise exc.VolumeCloneTooSmall(msg) else: raise
python
{ "resource": "" }
q17909
CloudBlockStorageSnapshotManager._create_body
train
def _create_body(self, name, description=None, volume=None, force=False): """ Used to create the dict required to create a new snapshot """ body = {"snapshot": { "display_name": name, "display_description": description, "volume_id": volume.id, "force": str(force).lower(), }} return body
python
{ "resource": "" }
q17910
CloudBlockStorageClient._configure_manager
train
def _configure_manager(self): """ Create the manager to handle the instances, and also another to handle flavors. """ self._manager = CloudBlockStorageManager(self, resource_class=CloudBlockStorageVolume, response_key="volume", uri_base="volumes") self._types_manager = BaseManager(self, resource_class=CloudBlockStorageVolumeType, response_key="volume_type", uri_base="types") self._snapshot_manager = CloudBlockStorageSnapshotManager(self, resource_class=CloudBlockStorageSnapshot, response_key="snapshot", uri_base="snapshots")
python
{ "resource": "" }
q17911
CloudBlockStorageClient.create_snapshot
train
def create_snapshot(self, volume, name=None, description=None, force=False): """ Creates a snapshot of the volume, with an optional name and description. Normally snapshots will not happen if the volume is attached. To override this default behavior, pass force=True. """ return self._snapshot_manager.create(volume=volume, name=name, description=description, force=force)
python
{ "resource": "" }
q17912
CloudBlockStorageClient.update_snapshot
train
def update_snapshot(self, snapshot, display_name=None, display_description=None): """ Update the specified values on the specified snapshot. You may specify one or more values to update. """ return snapshot.update(display_name=display_name, display_description=display_description)
python
{ "resource": "" }
q17913
assure_check
train
def assure_check(fnc): """ Converts an checkID passed as the check to a CloudMonitorCheck object. """ @wraps(fnc) def _wrapped(self, check, *args, **kwargs): if not isinstance(check, CloudMonitorCheck): # Must be the ID check = self._check_manager.get(check) return fnc(self, check, *args, **kwargs) return _wrapped
python
{ "resource": "" }
q17914
assure_entity
train
def assure_entity(fnc): """ Converts an entityID passed as the entity to a CloudMonitorEntity object. """ @wraps(fnc) def _wrapped(self, entity, *args, **kwargs): if not isinstance(entity, CloudMonitorEntity): # Must be the ID entity = self._entity_manager.get(entity) return fnc(self, entity, *args, **kwargs) return _wrapped
python
{ "resource": "" }
q17915
CloudMonitorEntity.get_check
train
def get_check(self, check): """ Returns an instance of the specified check. """ chk = self._check_manager.get(check) chk.set_entity(self) return chk
python
{ "resource": "" }
q17916
CloudMonitorEntity.list_checks
train
def list_checks(self, limit=None, marker=None, return_next=False): """ Returns a list of the checks defined for this account. By default the number returned is limited to 100; you can define the number to return by optionally passing a value for the 'limit' parameter. The value for limit must be at least 1, and can be up to 1000. For pagination, you must also specify the 'marker' parameter. This is the ID of the first item to return. To get this, pass True for the 'return_next' parameter, and the response will be a 2-tuple, with the first element being the list of checks, and the second the ID of the next item. If there is no next item, the second element will be None. """ checks = self._check_manager.list(limit=limit, marker=marker, return_next=return_next) for check in checks: check.set_entity(self) return checks
python
{ "resource": "" }
q17917
CloudMonitorEntity.create_check
train
def create_check(self, label=None, name=None, check_type=None, disabled=False, metadata=None, details=None, monitoring_zones_poll=None, timeout=None, period=None, target_alias=None, target_hostname=None, target_receiver=None, test_only=False, include_debug=False): """ Creates a check on this entity with the specified attributes. The 'details' parameter should be a dict with the keys as the option name, and the value as the desired setting. """ return self._check_manager.create_check(label=label, name=name, check_type=check_type, disabled=disabled, metadata=metadata, details=details, monitoring_zones_poll=monitoring_zones_poll, timeout=timeout, period=period, target_alias=target_alias, target_hostname=target_hostname, target_receiver=target_receiver, test_only=test_only, include_debug=include_debug)
python
{ "resource": "" }
q17918
CloudMonitorEntity.create_alarm
train
def create_alarm(self, check, notification_plan, criteria=None, disabled=False, label=None, name=None, metadata=None): """ Creates an alarm that binds the check on this entity with a notification plan. """ return self._alarm_manager.create(check, notification_plan, criteria=criteria, disabled=disabled, label=label, name=name, metadata=metadata)
python
{ "resource": "" }
q17919
CloudMonitorEntity.update_alarm
train
def update_alarm(self, alarm, criteria=None, disabled=False, label=None, name=None, metadata=None): """ Updates an existing alarm on this entity. """ return self._alarm_manager.update(alarm, criteria=criteria, disabled=disabled, label=label, name=name, metadata=metadata)
python
{ "resource": "" }
q17920
CloudMonitorEntity.list_alarms
train
def list_alarms(self, limit=None, marker=None, return_next=False): """ Returns a list of all the alarms created on this entity. """ return self._alarm_manager.list(limit=limit, marker=marker, return_next=return_next)
python
{ "resource": "" }
q17921
_PaginationManager.list
train
def list(self, limit=None, marker=None, return_next=False): """ This is necessary to handle pagination correctly, as the Monitoring service defines 'marker' differently than most other services. For monitoring, 'marker' represents the first item in the next page, whereas other services define it as the ID of the last item in the current page. """ kwargs = {} if return_next: kwargs["other_keys"] = "metadata" ret = super(_PaginationManager, self).list(limit=limit, marker=marker, **kwargs) if return_next: ents, meta = ret return (ents, meta[0].get("next_marker")) else: return ret
python
{ "resource": "" }
q17922
CloudMonitorNotificationManager.update_notification
train
def update_notification(self, notification, details): """ Updates the specified notification with the supplied details. """ if isinstance(notification, CloudMonitorNotification): nid = notification.id ntyp = notification.type else: # Supplied an ID nfcn = self.get(notification) nid = notification ntyp = nfcn.type uri = "/%s/%s" % (self.uri_base, nid) body = {"type": ntyp, "details": details} resp, resp_body = self.api.method_put(uri, body=body)
python
{ "resource": "" }
q17923
CloudMonitorNotificationManager.list_types
train
def list_types(self): """ Returns a list of all available notification types. """ uri = "/notification_types" resp, resp_body = self.api.method_get(uri) return [CloudMonitorNotificationType(self, info) for info in resp_body["values"]]
python
{ "resource": "" }
q17924
CloudMonitorNotificationManager.get_type
train
def get_type(self, notification_type_id): """ Returns a CloudMonitorNotificationType object for the given ID. """ uri = "/notification_types/%s" % utils.get_id(notification_type_id) resp, resp_body = self.api.method_get(uri) return CloudMonitorNotificationType(self, resp_body)
python
{ "resource": "" }
q17925
CloudMonitorAlarmManager.create
train
def create(self, check, notification_plan, criteria=None, disabled=False, label=None, name=None, metadata=None): """ Creates an alarm that binds the check on the given entity with a notification plan. Note that the 'criteria' parameter, if supplied, should be a string representing the DSL for describing alerting conditions and their output states. Pyrax does not do any validation of these criteria statements; it is up to you as the developer to understand the language and correctly form the statement. This alarm language is documented online in the Cloud Monitoring section of http://docs.rackspace.com. """ uri = "/%s" % self.uri_base body = {"check_id": utils.get_id(check), "notification_plan_id": utils.get_id(notification_plan), } if criteria: body["criteria"] = criteria if disabled is not None: body["disabled"] = disabled label_name = label or name if label_name: body["label"] = label_name if metadata: body["metadata"] = metadata resp, resp_body = self.api.method_post(uri, body=body) if resp.status_code == 201: alarm_id = resp.headers["x-object-id"] return self.get(alarm_id)
python
{ "resource": "" }
q17926
CloudMonitorCheckManager.create_check
train
def create_check(self, label=None, name=None, check_type=None, details=None, disabled=False, metadata=None, monitoring_zones_poll=None, timeout=None, period=None, target_alias=None, target_hostname=None, target_receiver=None, test_only=False, include_debug=False): """ Creates a check on the entity with the specified attributes. The 'details' parameter should be a dict with the keys as the option name, and the value as the desired setting. If the 'test_only' parameter is True, then the check is not created; instead, the check is run and the results of the test run returned. If 'include_debug' is True, additional debug information is returned. According to the current Cloud Monitoring docs: "Currently debug information is only available for the remote.http check and includes the response body." """ if details is None: raise exc.MissingMonitoringCheckDetails("The required 'details' " "parameter was not passed to the create_check() method.") ctype = utils.get_id(check_type) is_remote = ctype.startswith("remote") monitoring_zones_poll = utils.coerce_to_list(monitoring_zones_poll) monitoring_zones_poll = [utils.get_id(mzp) for mzp in monitoring_zones_poll] # only require monitoring_zones and targets for remote checks if is_remote: if not monitoring_zones_poll: raise exc.MonitoringZonesPollMissing("You must specify the " "'monitoring_zones_poll' parameter for remote checks.") if not (target_alias or target_hostname): raise exc.MonitoringCheckTargetNotSpecified("You must " "specify either the 'target_alias' or 'target_hostname' " "when creating a remote check.") body = {"label": label or name, "details": details, "disabled": disabled, "type": utils.get_id(check_type), } params = ("monitoring_zones_poll", "timeout", "period", "target_alias", "target_hostname", "target_receiver") body = _params_to_dict(params, body, locals()) if test_only: uri = "/%s/test-check" % self.uri_base if include_debug: uri = "%s?debug=true" % uri else: uri = "/%s" % self.uri_base try: resp = self.api.method_post(uri, body=body)[0] except exc.BadRequest as e: msg = e.message dtls = e.details match = _invalid_key_pat.match(msg) if match: missing = match.groups()[0].replace("details.", "") if missing in details: errmsg = "".join(["The value passed for '%s' in the ", "details parameter is not valid."]) % missing else: errmsg = "".join(["The required value for the '%s' ", "setting is missing from the 'details' ", "parameter."]) % missing utils.update_exc(e, errmsg) raise e else: if msg == "Validation error": # Info is in the 'details' raise exc.InvalidMonitoringCheckDetails("Validation " "failed. Error: '%s'." % dtls) # its something other than validation error; probably # limits exceeded, but raise it instead of failing silently raise e else: if resp.status_code == 201: check_id = resp.headers["x-object-id"] return self.get(check_id) # don't fail silently here either; raise an error # if we get an unexpected response code raise exc.ClientException("Unknown response code creating check;" " expected 201, got %s" % resp.status_code)
python
{ "resource": "" }
q17927
_EntityFilteringManger.list
train
def list(self, entity=None): """ Returns a dictionary of data, optionally filtered for a given entity. """ uri = "/%s" % self.uri_base if entity: uri = "%s?entityId=%s" % (uri, utils.get_id(entity)) resp, resp_body = self._list(uri, return_raw=True) return resp_body
python
{ "resource": "" }
q17928
CloudMonitorEntityManager._create_body
train
def _create_body(self, name, label=None, agent=None, ip_addresses=None, metadata=None): """ Used to create the dict required to create various resources. Accepts either 'label' or 'name' as the keyword parameter for the label attribute for entities. """ label = label or name if ip_addresses is not None: body = {"label": label} if ip_addresses: body["ip_addresses"] = ip_addresses if agent: body["agent_id"] = utils.get_id(agent) if metadata: body["metadata"] = metadata return body
python
{ "resource": "" }
q17929
CloudMonitorEntityManager.update_entity
train
def update_entity(self, entity, agent=None, metadata=None): """ Updates the specified entity's values with the supplied parameters. """ body = {} if agent: body["agent_id"] = utils.get_id(agent) if metadata: body["metadata"] = metadata if body: uri = "/%s/%s" % (self.uri_base, utils.get_id(entity)) resp, body = self.api.method_put(uri, body=body)
python
{ "resource": "" }
q17930
CloudMonitorCheck.get
train
def get(self): """Reloads the check with its current values.""" new = self.manager.get(self) if new: self._add_details(new._info)
python
{ "resource": "" }
q17931
CloudMonitorCheck.list_metrics
train
def list_metrics(self, limit=None, marker=None, return_next=False): """ Returns a list of all the metrics associated with this check. """ return self._metrics_manager.list(limit=limit, marker=marker, return_next=return_next)
python
{ "resource": "" }
q17932
CloudMonitorCheck.create_alarm
train
def create_alarm(self, notification_plan, criteria=None, disabled=False, label=None, name=None, metadata=None): """ Creates an alarm that binds this check with a notification plan. """ return self.manager.create_alarm(self.entity, self, notification_plan, criteria=criteria, disabled=disabled, label=label, name=name, metadata=metadata)
python
{ "resource": "" }
q17933
CloudMonitorAlarm.get
train
def get(self): """ Fetches the current state of the alarm from the API and updates the object. """ new_alarm = self.entity.get_alarm(self) if new_alarm: self._add_details(new_alarm._info)
python
{ "resource": "" }
q17934
CloudMonitorClient.list_metrics
train
def list_metrics(self, entity, check, limit=None, marker=None, return_next=False): """ Returns a list of all the metrics associated with the specified check. """ return entity.list_metrics(check, limit=limit, marker=marker, return_next=return_next)
python
{ "resource": "" }
q17935
CloudMonitorClient.create_notification_plan
train
def create_notification_plan(self, label=None, name=None, critical_state=None, ok_state=None, warning_state=None): """ Creates a notification plan to be executed when a monitoring check triggers an alarm. """ return self._notification_plan_manager.create(label=label, name=name, critical_state=critical_state, ok_state=ok_state, warning_state=warning_state)
python
{ "resource": "" }
q17936
CloudMonitorClient.update_alarm
train
def update_alarm(self, entity, alarm, criteria=None, disabled=False, label=None, name=None, metadata=None): """ Updates an existing alarm on the given entity. """ return entity.update_alarm(alarm, criteria=criteria, disabled=disabled, label=label, name=name, metadata=metadata)
python
{ "resource": "" }
q17937
CloudMonitorClient.list_alarms
train
def list_alarms(self, entity, limit=None, marker=None, return_next=False): """ Returns a list of all the alarms created on the specified entity. """ return entity.list_alarms(limit=limit, marker=marker, return_next=return_next)
python
{ "resource": "" }
q17938
assure_container
train
def assure_container(fnc): """ Assures that whether a Container or a name of a container is passed, a Container object is available. """ @wraps(fnc) def _wrapped(self, container, *args, **kwargs): if not isinstance(container, Container): # Must be the name container = self.get(container) return fnc(self, container, *args, **kwargs) return _wrapped
python
{ "resource": "" }
q17939
_massage_metakeys
train
def _massage_metakeys(dct, prfx): """ Returns a copy of the supplied dictionary, prefixing any keys that do not begin with the specified prefix accordingly. """ lowprefix = prfx.lower() ret = {} for k, v in list(dct.items()): if not k.lower().startswith(lowprefix): k = "%s%s" % (prfx, k) ret[k] = v return ret
python
{ "resource": "" }
q17940
get_file_size
train
def get_file_size(fileobj): """ Returns the size of a file-like object. """ currpos = fileobj.tell() fileobj.seek(0, 2) total_size = fileobj.tell() fileobj.seek(currpos) return total_size
python
{ "resource": "" }
q17941
Container._set_cdn_defaults
train
def _set_cdn_defaults(self): """Sets all the CDN-related attributes to default values.""" if self._cdn_enabled is FAULT: self._cdn_enabled = False self._cdn_uri = None self._cdn_ttl = DEFAULT_CDN_TTL self._cdn_ssl_uri = None self._cdn_streaming_uri = None self._cdn_ios_uri = None self._cdn_log_retention = False
python
{ "resource": "" }
q17942
Container._fetch_cdn_data
train
def _fetch_cdn_data(self): """Fetches the container's CDN data from the CDN service""" if self._cdn_enabled is FAULT: headers = self.manager.fetch_cdn_data(self) else: headers = {} # Set defaults in case not all headers are present. self._set_cdn_defaults() if not headers: # Not CDN enabled; return return else: self._cdn_enabled = True for key, value in headers.items(): low_key = key.lower() if low_key == "x-cdn-uri": self._cdn_uri = value elif low_key == "x-ttl": self._cdn_ttl = int(value) elif low_key == "x-cdn-ssl-uri": self._cdn_ssl_uri = value elif low_key == "x-cdn-streaming-uri": self._cdn_streaming_uri = value elif low_key == "x-cdn-ios-uri": self._cdn_ios_uri = value elif low_key == "x-log-retention": self._cdn_log_retention = (value == "True")
python
{ "resource": "" }
q17943
Container.get_object
train
def get_object(self, item): """ Returns a StorageObject matching the specified item. If no such object exists, a NotFound exception is raised. If 'item' is not a string, that item is returned unchanged. """ if isinstance(item, six.string_types): item = self.object_manager.get(item) return item
python
{ "resource": "" }
q17944
Container.store_object
train
def store_object(self, obj_name, data, content_type=None, etag=None, content_encoding=None, ttl=None, return_none=False, headers=None, extra_info=None): """ Creates a new object in this container, and populates it with the given data. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more. """ return self.create(obj_name=obj_name, data=data, content_type=content_type, etag=etag, content_encoding=content_encoding, ttl=ttl, return_none=return_none, headers=headers)
python
{ "resource": "" }
q17945
Container.upload_file
train
def upload_file(self, file_or_path, obj_name=None, content_type=None, etag=None, return_none=False, content_encoding=None, ttl=None, content_length=None, headers=None): """ Uploads the specified file to this container. If no name is supplied, the file's name will be used. Either a file path or an open file-like object may be supplied. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. You may optionally set the `content_type` and `content_encoding` parameters; pyrax will create the appropriate headers when the object is stored. If the size of the file is known, it can be passed as `content_length`. If you wish for the object to be temporary, specify the time it should be stored in seconds in the `ttl` parameter. If this is specified, the object will be deleted after that number of seconds. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more. """ return self.create(file_or_path=file_or_path, obj_name=obj_name, content_type=content_type, etag=etag, content_encoding=content_encoding, headers=headers, content_length=content_length, ttl=ttl, return_none=return_none)
python
{ "resource": "" }
q17946
Container.delete
train
def delete(self, del_objects=False): """ Deletes this Container. If the container contains objects, the command will fail unless 'del_objects' is passed as True. In that case, each object will be deleted first, and then the container. """ return self.manager.delete(self, del_objects=del_objects)
python
{ "resource": "" }
q17947
Container.delete_object_in_seconds
train
def delete_object_in_seconds(self, obj, seconds, extra_info=None): """ Sets the object in this container to be deleted after the specified number of seconds. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more. """ return self.manager.delete_object_in_seconds(self, obj, seconds)
python
{ "resource": "" }
q17948
Container.change_object_content_type
train
def change_object_content_type(self, obj, new_ctype, guess=False): """ Copies object to itself, but applies a new content-type. The guess feature requires this container to be CDN-enabled. If not, then the content-type must be supplied. If using guess with a CDN-enabled container, new_ctype can be set to None. Failure during the put will result in an exception. """ return self.manager.change_object_content_type(self, obj, new_ctype, guess=guess)
python
{ "resource": "" }
q17949
Container.get_temp_url
train
def get_temp_url(self, obj, seconds, method="GET", key=None, cached=True): """ Given a storage object in this container, returns a URL that can be used to access that object. The URL will expire after `seconds` seconds. The only methods supported are GET and PUT. Anything else will raise an `InvalidTemporaryURLMethod` exception. If you have your Temporary URL key, you can pass it in directly and potentially save an API call to retrieve it. If you don't pass in the key, and don't wish to use any cached value, pass `cached=False`. """ return self.manager.get_temp_url(self, obj, seconds, method=method, key=key, cached=cached)
python
{ "resource": "" }
q17950
ContainerManager.list
train
def list(self, limit=None, marker=None, end_marker=None, prefix=None): """ Swift doesn't return listings in the same format as the rest of OpenStack, so this method has to be overriden. """ uri = "/%s" % self.uri_base qs = utils.dict_to_qs({"marker": marker, "limit": limit, "prefix": prefix, "end_marker": end_marker}) if qs: uri = "%s?%s" % (uri, qs) resp, resp_body = self.api.method_get(uri) return [Container(self, res, loaded=False) for res in resp_body if res]
python
{ "resource": "" }
q17951
ContainerManager.get
train
def get(self, container): """ Returns a Container matching the specified container name. If no such container exists, a NoSuchContainer exception is raised. """ name = utils.get_name(container) uri = "/%s" % name resp, resp_body = self.api.method_head(uri) hdrs = resp.headers data = {"total_bytes": int(hdrs.get("x-container-bytes-used", "0")), "object_count": int(hdrs.get("x-container-object-count", "0")), "name": name} return Container(self, data, loaded=False)
python
{ "resource": "" }
q17952
ContainerManager.create
train
def create(self, name, metadata=None, prefix=None, *args, **kwargs): """ Creates a new container, and returns a Container object that represents that contianer. If a container by the same name already exists, no exception is raised; instead, a reference to that existing container is returned. """ uri = "/%s" % name headers = {} if prefix is None: prefix = CONTAINER_META_PREFIX if metadata: metadata = _massage_metakeys(metadata, prefix) headers = metadata resp, resp_body = self.api.method_put(uri, headers=headers) if resp.status_code in (201, 202): hresp, hresp_body = self.api.method_head(uri) num_obj = int(hresp.headers.get("x-container-object-count", "0")) num_bytes = int(hresp.headers.get("x-container-bytes-used", "0")) cont_info = {"name": name, "object_count": num_obj, "total_bytes": num_bytes} return Container(self, cont_info) elif resp.status_code == 400: raise exc.ClientException("Container creation failed: %s" % resp_body)
python
{ "resource": "" }
q17953
ContainerManager.fetch_cdn_data
train
def fetch_cdn_data(self, container): """ Returns a dict containing the CDN information for the specified container. If the container is not CDN-enabled, returns an empty dict. """ name = utils.get_name(container) uri = "/%s" % name try: resp, resp_body = self.api.cdn_request(uri, "HEAD") except exc.NotCDNEnabled: return {} return resp.headers
python
{ "resource": "" }
q17954
ContainerManager.get_headers
train
def get_headers(self, container): """ Return the headers for the specified container. """ uri = "/%s" % utils.get_name(container) resp, resp_body = self.api.method_head(uri) return resp.headers
python
{ "resource": "" }
q17955
ContainerManager.get_account_metadata
train
def get_account_metadata(self, prefix=None): """ Returns a dictionary containing metadata about the account. """ headers = self.get_account_headers() if prefix is None: prefix = ACCOUNT_META_PREFIX low_prefix = prefix.lower() ret = {} for hkey, hval in list(headers.items()): lowkey = hkey.lower() if lowkey.startswith(low_prefix): cleaned = hkey.replace(low_prefix, "").replace("-", "_") ret[cleaned] = hval return ret
python
{ "resource": "" }
q17956
ContainerManager.delete_account_metadata
train
def delete_account_metadata(self, prefix=None): """ Removes all metadata matching the specified prefix from the account. By default, the standard account metadata prefix ('X-Account-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string. """ # Add the metadata prefix, if needed. if prefix is None: prefix = ACCOUNT_META_PREFIX curr_meta = self.get_account_metadata(prefix=prefix) for ckey in curr_meta: curr_meta[ckey] = "" new_meta = _massage_metakeys(curr_meta, prefix) uri = "/" resp, resp_body = self.api.method_post(uri, headers=new_meta) return 200 <= resp.status_code <= 299
python
{ "resource": "" }
q17957
ContainerManager.remove_metadata_key
train
def remove_metadata_key(self, container, key): """ Removes the specified key from the container's metadata. If the key does not exist in the metadata, nothing is done. """ meta_dict = {key: ""} return self.set_metadata(container, meta_dict)
python
{ "resource": "" }
q17958
ContainerManager.delete_metadata
train
def delete_metadata(self, container, prefix=None): """ Removes all of the container's metadata. By default, all metadata beginning with the standard container metadata prefix ('X-Container-Meta-') is removed. If you wish to remove all metadata beginning with a different prefix, you must specify that prefix. """ # Add the metadata prefix, if needed. if prefix is None: prefix = CONTAINER_META_PREFIX new_meta = {} curr_meta = self.get_metadata(container, prefix=prefix) for ckey in curr_meta: new_meta[ckey] = "" uri = "/%s" % utils.get_name(container) resp, resp_body = self.api.method_post(uri, headers=new_meta) return 200 <= resp.status_code <= 299
python
{ "resource": "" }
q17959
ContainerManager.get_cdn_metadata
train
def get_cdn_metadata(self, container): """ Returns a dictionary containing the CDN metadata for the container. If the container does not exist, a NotFound exception is raised. If the container exists, but is not CDN-enabled, a NotCDNEnabled exception is raised. """ uri = "%s/%s" % (self.uri_base, utils.get_name(container)) resp, resp_body = self.api.cdn_request(uri, "HEAD") ret = dict(resp.headers) # Remove non-CDN headers ret.pop("content-length", None) ret.pop("content-type", None) ret.pop("date", None) return ret
python
{ "resource": "" }
q17960
ContainerManager.list_public_containers
train
def list_public_containers(self): """ Returns a list of the names of all CDN-enabled containers. """ resp, resp_body = self.api.cdn_request("", "GET") return [cont["name"] for cont in resp_body]
python
{ "resource": "" }
q17961
ContainerManager._set_cdn_access
train
def _set_cdn_access(self, container, public, ttl=None): """ Enables or disables CDN access for the specified container, and optionally sets the TTL for the container when enabling access. """ headers = {"X-Cdn-Enabled": "%s" % public} if public and ttl: headers["X-Ttl"] = ttl self.api.cdn_request("/%s" % utils.get_name(container), method="PUT", headers=headers)
python
{ "resource": "" }
q17962
ContainerManager.get_cdn_log_retention
train
def get_cdn_log_retention(self, container): """ Returns the status of the setting for CDN log retention for the specified container. """ resp, resp_body = self.api.cdn_request("/%s" % utils.get_name(container), method="HEAD") return resp.headers.get("x-log-retention").lower() == "true"
python
{ "resource": "" }
q17963
ContainerManager.set_cdn_log_retention
train
def set_cdn_log_retention(self, container, enabled): """ Enables or disables whether CDN access logs for the specified container are collected and stored on Cloud Files. """ headers = {"X-Log-Retention": "%s" % enabled} self.api.cdn_request("/%s" % utils.get_name(container), method="PUT", headers=headers)
python
{ "resource": "" }
q17964
ContainerManager.get_container_streaming_uri
train
def get_container_streaming_uri(self, container): """ Returns the URI for streaming content, or None if CDN is not enabled. """ resp, resp_body = self.api.cdn_request("/%s" % utils.get_name(container), method="HEAD") return resp.headers.get("x-cdn-streaming-uri")
python
{ "resource": "" }
q17965
ContainerManager.set_web_index_page
train
def set_web_index_page(self, container, page): """ Sets the header indicating the index page in a container when creating a static website. Note: the container must be CDN-enabled for this to have any effect. """ headers = {"X-Container-Meta-Web-Index": "%s" % page} self.api.cdn_request("/%s" % utils.get_name(container), method="POST", headers=headers)
python
{ "resource": "" }
q17966
ContainerManager.list_objects
train
def list_objects(self, container, limit=None, marker=None, prefix=None, delimiter=None, end_marker=None, full_listing=False): """ Return a list of StorageObjects representing the objects in this container. You can use the marker, end_marker, and limit params to handle pagination, and the prefix and delimiter params to filter the objects returned. By default only the first 10,000 objects are returned; if you need to access more than that, set the 'full_listing' parameter to True. """ if full_listing: return container.list_all(prefix=prefix) return container.list(limit=limit, marker=marker, prefix=prefix, delimiter=delimiter, end_marker=end_marker)
python
{ "resource": "" }
q17967
ContainerManager.list_object_names
train
def list_object_names(self, container, marker=None, limit=None, prefix=None, delimiter=None, end_marker=None, full_listing=False): """ Return a list of then names of the objects in this container. You can use the marker, end_marker, and limit params to handle pagination, and the prefix and delimiter params to filter the objects returned. By default only the first 10,000 objects are returned; if you need to access more than that, set the 'full_listing' parameter to True. """ return container.list_object_names(marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, end_marker=end_marker, full_listing=full_listing)
python
{ "resource": "" }
q17968
ContainerManager.change_object_content_type
train
def change_object_content_type(self, container, obj, new_ctype, guess=False): """ Copies object to itself, but applies a new content-type. The guess feature requires the container to be CDN-enabled. If not, then the content-type must be supplied. If using guess with a CDN-enabled container, new_ctype can be set to None. Failure during the put will result in an exception. """ cname = utils.get_name(container) oname = utils.get_name(obj) if guess and container.cdn_enabled: # Test against the CDN url to guess the content-type. obj_url = "%s/%s" % (container.cdn_uri, oname) new_ctype = mimetypes.guess_type(obj_url)[0] return self.copy_object(container, obj, container, content_type=new_ctype)
python
{ "resource": "" }
q17969
StorageObject.copy
train
def copy(self, new_container, new_obj_name=None, extra_info=None): """ Copies this object to the new container, optionally giving it a new name. If you copy to the same container, you must supply a different name. """ return self.container.copy_object(self, new_container, new_obj_name=new_obj_name)
python
{ "resource": "" }
q17970
StorageObject.move
train
def move(self, new_container, new_obj_name=None, extra_info=None): """ Works just like copy_object, except that this object is deleted after a successful copy. This means that this storage_object reference will no longer be valid. """ return self.container.move_object(self, new_container, new_obj_name=new_obj_name)
python
{ "resource": "" }
q17971
StorageObject.get_temp_url
train
def get_temp_url(self, seconds, method="GET"): """ Returns a URL that can be used to access this object. The URL will expire after `seconds` seconds. The only methods supported are GET and PUT. Anything else will raise an InvalidTemporaryURLMethod exception. """ return self.container.get_temp_url(self, seconds=seconds, method=method)
python
{ "resource": "" }
q17972
StorageObjectManager.get
train
def get(self, obj): """ Gets the information about the specified object. This overrides the base behavior, since Swift uses HEAD to get information, and GET to download the object. """ name = utils.get_name(obj) uri = "/%s/%s" % (self.uri_base, name) resp, resp_body = self.api.method_head(uri) hdrs = resp.headers try: content_length = int(hdrs.get("content-length")) except (TypeError, ValueError): content_length = None data = {"name": name, "bytes": content_length, "content_type": hdrs.get("content-type"), "hash": hdrs.get("etag"), "last_modified": hdrs.get("last-modified"), "timestamp": hdrs.get("x-timestamp"), } return StorageObject(self, data, loaded=True)
python
{ "resource": "" }
q17973
StorageObjectManager._upload
train
def _upload(self, obj_name, content, content_type, content_encoding, content_length, etag, chunked, chunk_size, headers): """ Handles the uploading of content, including working around the 5GB maximum file size. """ if content_type is not None: headers["Content-Type"] = content_type if content_encoding is not None: headers["Content-Encoding"] = content_encoding if isinstance(content, six.string_types): fsize = len(content) else: if chunked: fsize = None elif content_length is None: fsize = get_file_size(content) else: fsize = content_length if fsize is None or fsize <= MAX_FILE_SIZE: # We can just upload it as-is. return self._store_object(obj_name, content=content, etag=etag, chunked=chunked, chunk_size=chunk_size, headers=headers) # Files larger than MAX_FILE_SIZE must be segmented # and uploaded separately. num_segments = int(math.ceil(float(fsize) / MAX_FILE_SIZE)) digits = int(math.log10(num_segments)) + 1 # NOTE: This could be greatly improved with threading or other # async design. for segment in range(num_segments): sequence = str(segment + 1).zfill(digits) seg_name = "%s.%s" % (obj_name, sequence) with utils.SelfDeletingTempfile() as tmpname: with open(tmpname, "wb") as tmp: tmp.write(content.read(MAX_FILE_SIZE)) with open(tmpname, "rb") as tmp: # We have to calculate the etag for each segment etag = utils.get_checksum(tmp) self._store_object(seg_name, content=tmp, etag=etag, chunked=False, headers=headers) # Upload the manifest headers.pop("ETag", "") headers["X-Object-Manifest"] = "%s/%s." % (self.name, obj_name) self._store_object(obj_name, content=None, headers=headers)
python
{ "resource": "" }
q17974
StorageObjectManager._store_object
train
def _store_object(self, obj_name, content, etag=None, chunked=False, chunk_size=None, headers=None): """ Handles the low-level creation of a storage object and the uploading of the contents of that object. """ head_etag = headers.pop("ETag", "") if chunked: headers.pop("Content-Length", "") headers["Transfer-Encoding"] = "chunked" elif etag is None and content is not None: etag = utils.get_checksum(content) if etag: headers["ETag"] = etag if not headers.get("Content-Type"): headers["Content-Type"] = None uri = "/%s/%s" % (self.uri_base, obj_name) resp, resp_body = self.api.method_put(uri, data=content, headers=headers)
python
{ "resource": "" }
q17975
StorageObjectManager._fetch_chunker
train
def _fetch_chunker(self, uri, chunk_size, size, obj_size): """ Returns a generator that returns an object in chunks. """ pos = 0 total_bytes = 0 size = size or obj_size max_size = min(size, obj_size) while True: endpos = min(obj_size, pos + chunk_size - 1) headers = {"Range": "bytes=%s-%s" % (pos, endpos)} resp, resp_body = self.api.method_get(uri, headers=headers, raw_content=True) pos = endpos + 1 if not resp_body: # End of file return yield resp_body total_bytes += len(resp_body) if total_bytes >= max_size: return
python
{ "resource": "" }
q17976
StorageObjectManager.remove_metadata_key
train
def remove_metadata_key(self, obj, key): """ Removes the specified key from the object's metadata. If the key does not exist in the metadata, nothing is done. """ meta_dict = {key: ""} return self.set_metadata(obj, meta_dict)
python
{ "resource": "" }
q17977
StorageClient._configure_cdn
train
def _configure_cdn(self): """ Initialize CDN-related endpoints, if available. """ ident = self.identity cdn_svc = ident.services.get("object_cdn") if cdn_svc: ep = cdn_svc.endpoints.get(self.region_name) if ep: self.cdn_management_url = ep.public_url
python
{ "resource": "" }
q17978
StorageClient._backwards_aliases
train
def _backwards_aliases(self): """ In order to keep this backwards-compatible with previous versions, alias the old names to the new methods. """ self.list_containers = self.list_container_names self.get_all_containers = self.list self.get_container = self.get self.create_container = self.create self.delete_container = self.delete self.get_container_objects = self.list_container_objects self.get_container_object_names = self.list_container_object_names self.get_info = self.get_account_info
python
{ "resource": "" }
q17979
StorageClient.get
train
def get(self, item): """ Returns the container whose name is provided as 'item'. If 'item' is not a string, the original item is returned unchanged. """ if isinstance(item, six.string_types): item = super(StorageClient, self).get(item) return item
python
{ "resource": "" }
q17980
StorageClient._configure_manager
train
def _configure_manager(self): """ Creates a manager to handle interacting with Containers. """ self._manager = ContainerManager(self, resource_class=Container, response_key="", uri_base="")
python
{ "resource": "" }
q17981
StorageClient.get_account_details
train
def get_account_details(self): """ Returns a dictionary containing information about the account. """ headers = self._manager.get_account_headers() acct_prefix = "x-account-" meta_prefix = ACCOUNT_META_PREFIX.lower() ret = {} for hkey, hval in list(headers.items()): lowkey = hkey.lower() if lowkey.startswith(acct_prefix): if not lowkey.startswith(meta_prefix): cleaned = hkey.replace(acct_prefix, "").replace("-", "_") try: # Most values are ints ret[cleaned] = int(hval) except ValueError: ret[cleaned] = hval return ret
python
{ "resource": "" }
q17982
StorageClient.get_account_info
train
def get_account_info(self): """ Returns a tuple for the number of containers and total bytes in the account. """ headers = self._manager.get_account_headers() return (headers.get("x-account-container-count"), headers.get("x-account-bytes-used"))
python
{ "resource": "" }
q17983
StorageClient.get_temp_url_key
train
def get_temp_url_key(self, cached=True): """ Returns the current TempURL key, or None if it has not been set. By default the value returned is cached. To force an API call to get the current value on the server, pass `cached=False`. """ meta = self._cached_temp_url_key if not cached or not meta: key = "temp_url_key" meta = self.get_account_metadata().get(key) self._cached_temp_url_key = meta return meta
python
{ "resource": "" }
q17984
StorageClient.set_temp_url_key
train
def set_temp_url_key(self, key=None): """ Sets the key for the Temporary URL for the account. It should be a key that is secret to the owner. If no key is provided, a UUID value will be generated and used. It can later be obtained by calling get_temp_url_key(). """ if key is None: key = uuid.uuid4().hex meta = {"Temp-Url-Key": key} self.set_account_metadata(meta) self._cached_temp_url_key = key
python
{ "resource": "" }
q17985
StorageClient.list
train
def list(self, limit=None, marker=None, end_marker=None, prefix=None): """ List the containers in this account, using the parameters to control the pagination of containers, since by default only the first 10,000 containers are returned. """ return self._manager.list(limit=limit, marker=marker, end_marker=end_marker, prefix=prefix)
python
{ "resource": "" }
q17986
StorageClient.list_container_object_names
train
def list_container_object_names(self, container, limit=None, marker=None, prefix=None, delimiter=None, full_listing=False): """ Returns the names of all the objects in the specified container, optionally limited by the pagination parameters. """ return self._manager.list_object_names(container, marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, full_listing=full_listing)
python
{ "resource": "" }
q17987
StorageClient.delete_container_metadata
train
def delete_container_metadata(self, container, prefix=None): """ Removes all of thethe container's metadata. By default, all metadata beginning with the standard container metadata prefix ('X-Container-Meta-') is removed. If you wish to remove all metadata beginning with a different prefix, you must specify that prefix. """ return self._manager.delete_metadata(container, prefix=prefix)
python
{ "resource": "" }
q17988
StorageClient.list_container_objects
train
def list_container_objects(self, container, limit=None, marker=None, prefix=None, delimiter=None, end_marker=None, full_listing=False): """ Return a list of StorageObjects representing the objects in the container. You can use the marker, end_marker, and limit params to handle pagination, and the prefix and delimiter params to filter the objects returned. Also, by default only the first 10,000 objects are returned; if you set full_listing to True, an iterator to return all the objects in the container is returned. In this case, only the 'prefix' parameter is used; if you specify any others, they are ignored. """ if full_listing: return self._manager.object_listing_iterator(container, prefix=prefix) return self._manager.list_objects(container, limit=limit, marker=marker, prefix=prefix, delimiter=delimiter, end_marker=end_marker)
python
{ "resource": "" }
q17989
StorageClient.store_object
train
def store_object(self, container, obj_name, data, content_type=None, etag=None, content_encoding=None, ttl=None, return_none=False, chunk_size=None, headers=None, metadata=None, extra_info=None): """ Creates a new object in the specified container, and populates it with the given data. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more. """ return self.create_object(container, obj_name=obj_name, data=data, content_type=content_type, etag=etag, content_encoding=content_encoding, ttl=ttl, return_none=return_none, chunk_size=chunk_size, headers=headers, metadata=metadata)
python
{ "resource": "" }
q17990
StorageClient.upload_file
train
def upload_file(self, container, file_or_path, obj_name=None, content_type=None, etag=None, content_encoding=None, ttl=None, content_length=None, return_none=False, headers=None, metadata=None, extra_info=None): """ Uploads the specified file to the container. If no name is supplied, the file's name will be used. Either a file path or an open file-like object may be supplied. A StorageObject reference to the uploaded file will be returned, unless 'return_none' is set to True. You may optionally set the `content_type` and `content_encoding` parameters; pyrax will create the appropriate headers when the object is stored. If the size of the file is known, it can be passed as `content_length`. If you wish for the object to be temporary, specify the time it should be stored in seconds in the `ttl` parameter. If this is specified, the object will be deleted after that number of seconds. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more. """ return self.create_object(container, file_or_path=file_or_path, obj_name=obj_name, content_type=content_type, etag=etag, content_encoding=content_encoding, ttl=ttl, headers=headers, metadata=metadata, return_none=return_none)
python
{ "resource": "" }
q17991
StorageClient.fetch_partial
train
def fetch_partial(self, container, obj, size): """ Returns the first 'size' bytes of an object. If the object is smaller than the specified 'size' value, the entire object is returned. """ return self._manager.fetch_partial(container, obj, size)
python
{ "resource": "" }
q17992
StorageClient.upload_folder
train
def upload_folder(self, folder_path, container=None, ignore=None, ttl=None): """ Convenience method for uploading an entire folder, including any sub-folders, to Cloud Files. All files will be uploaded to objects with the same name as the file. In the case of nested folders, files will be named with the full path relative to the base folder. E.g., if the folder you specify contains a folder named 'docs', and 'docs' contains a file named 'install.html', that file will be uploaded to an object named 'docs/install.html'. If 'container' is specified, the folder's contents will be uploaded to that container. If it is not specified, a new container with the same name as the specified folder will be created, and the files uploaded to this new container. You can selectively ignore files by passing either a single pattern or a list of patterns; these will be applied to the individual folder and file names, and any names that match any of the 'ignore' patterns will not be uploaded. The patterns should be standard *nix-style shell patterns; e.g., '*pyc' will ignore all files ending in 'pyc', such as 'program.pyc' and 'abcpyc'. The upload will happen asynchronously; in other words, the call to upload_folder() will generate a UUID and return a 2-tuple of (UUID, total_bytes) immediately. Uploading will happen in the background; your app can call get_uploaded(uuid) to get the current status of the upload. When the upload is complete, the value returned by get_uploaded(uuid) will match the total_bytes for the upload. If you start an upload and need to cancel it, call cancel_folder_upload(uuid), passing the uuid returned by the initial call. It will then be up to you to either keep or delete the partially-uploaded content. If you specify a `ttl` parameter, the uploaded files will be deleted after that number of seconds. """ if not os.path.isdir(folder_path): raise exc.FolderNotFound("No such folder: '%s'" % folder_path) ignore = utils.coerce_to_list(ignore) total_bytes = utils.folder_size(folder_path, ignore) upload_key = str(uuid.uuid4()) self.folder_upload_status[upload_key] = {"continue": True, "total_bytes": total_bytes, "uploaded": 0, } self._upload_folder_in_background(folder_path, container, ignore, upload_key, ttl) return (upload_key, total_bytes)
python
{ "resource": "" }
q17993
StorageClient._upload_folder_in_background
train
def _upload_folder_in_background(self, folder_path, container, ignore, upload_key, ttl=None): """Runs the folder upload in the background.""" uploader = FolderUploader(folder_path, container, ignore, upload_key, self, ttl=ttl) uploader.start()
python
{ "resource": "" }
q17994
StorageClient._sync_folder_to_container
train
def _sync_folder_to_container(self, folder_path, container, prefix, delete, include_hidden, ignore, ignore_timestamps, object_prefix, verbose): """ This is the internal method that is called recursively to handle nested folder structures. """ fnames = os.listdir(folder_path) ignore = utils.coerce_to_list(ignore) log = logging.getLogger("pyrax") if not include_hidden: ignore.append(".*") for fname in fnames: if utils.match_pattern(fname, ignore): self._sync_summary["ignored"] += 1 continue pth = os.path.join(folder_path, fname) if os.path.isdir(pth): subprefix = fname if prefix: subprefix = os.path.join(prefix, subprefix) self._sync_folder_to_container(pth, container, prefix=subprefix, delete=delete, include_hidden=include_hidden, ignore=ignore, ignore_timestamps=ignore_timestamps, object_prefix=object_prefix, verbose=verbose) continue self._local_files.append(os.path.join(object_prefix, prefix, fname)) local_etag = utils.get_checksum(pth) if object_prefix: prefix = os.path.join(object_prefix, prefix) object_prefix = "" fullname_with_prefix = os.path.join(prefix, fname) try: obj = self._remote_files[fullname_with_prefix] obj_etag = obj.etag except KeyError: obj = None obj_etag = None if local_etag != obj_etag: if not ignore_timestamps: if obj: obj_time_str = obj.last_modified[:19] else: obj_time_str = EARLY_DATE_STR local_mod = datetime.datetime.utcfromtimestamp( os.stat(pth).st_mtime) local_mod_str = local_mod.isoformat() if obj_time_str >= local_mod_str: # Remote object is newer self._sync_summary["older"] += 1 if verbose: log.info("%s NOT UPLOADED because remote object is " "newer", fullname_with_prefix) log.info(" Local: %s Remote: %s" % ( local_mod_str, obj_time_str)) continue try: container.upload_file(pth, obj_name=fullname_with_prefix, etag=local_etag, return_none=True) self._sync_summary["uploaded"] += 1 if verbose: log.info("%s UPLOADED", fullname_with_prefix) except Exception as e: # Record the failure, and move on self._sync_summary["failed"] += 1 self._sync_summary["failure_reasons"].append("%s" % e) if verbose: log.error("%s UPLOAD FAILED. Exception: %s" % (fullname_with_prefix, e)) else: self._sync_summary["duplicate"] += 1 if verbose: log.info("%s NOT UPLOADED because it already exists", fullname_with_prefix) if delete and not prefix: self._delete_objects_not_in_list(container, object_prefix)
python
{ "resource": "" }
q17995
StorageClient._delete_objects_not_in_list
train
def _delete_objects_not_in_list(self, cont, object_prefix=""): """ Finds all the objects in the specified container that are not present in the self._local_files list, and deletes them. """ objnames = set(cont.get_object_names(prefix=object_prefix, full_listing=True)) localnames = set(self._local_files) to_delete = list(objnames.difference(localnames)) self._sync_summary["deleted"] += len(to_delete) # We don't need to wait around for this to complete. Store the thread # reference in case it is needed at some point. self._thread = self.bulk_delete(cont, to_delete, async_=True)
python
{ "resource": "" }
q17996
StorageClient.bulk_delete
train
def bulk_delete(self, container, object_names, async_=False): """ Deletes multiple objects from a container in a single call. The bulk deletion call does not return until all of the specified objects have been processed. For large numbers of objects, this can take quite a while, so there is an 'async_' parameter to give you the option to have this call return immediately. If 'async_' is True, an object is returned with a 'completed' attribute that will be set to True as soon as the bulk deletion is complete, and a 'results' attribute that will contain a dictionary (described below) with the results of the bulk deletion. When deletion is complete the bulk deletion object's 'results' attribute will be populated with the information returned from the API call. In synchronous mode this is the value that is returned when the call completes. It is a dictionary with the following keys: deleted - the number of objects deleted not_found - the number of objects not found status - the HTTP return status code. '200 OK' indicates success errors - a list of any errors returned by the bulk delete call This isn't available in swiftclient yet, so it's using code patterned after the client code in that library. """ deleter = BulkDeleter(self, container, object_names) deleter.start() if async_: return deleter while not deleter.completed: time.sleep(self.bulk_delete_interval) return deleter.results
python
{ "resource": "" }
q17997
StorageClient.cdn_request
train
def cdn_request(self, uri, method, *args, **kwargs): """ If the service supports CDN, use this method to access CDN-specific URIs. """ if not self.cdn_management_url: raise exc.NotCDNEnabled("CDN is not enabled for this service.") cdn_uri = "%s%s" % (self.cdn_management_url, uri) mthd = self.method_dict.get(method.upper()) try: resp, resp_body = mthd(cdn_uri, *args, **kwargs) except exc.NotFound as e: # This could be due to either the container does not exist, or that # the container exists but is not CDN-enabled. try: mgt_uri = "%s%s" % (self.management_url, uri) resp, resp_body = self.method_head(mgt_uri) except exc.NotFound: raise raise exc.NotCDNEnabled("This container is not CDN-enabled.") return resp, resp_body
python
{ "resource": "" }
q17998
FolderUploader.upload_files_in_folder
train
def upload_files_in_folder(self, dirname, fnames): """Handles the iteration across files within a folder.""" if utils.match_pattern(dirname, self.ignore): return False good_names = (nm for nm in fnames if not utils.match_pattern(nm, self.ignore)) for fname in good_names: if self.client._should_abort_folder_upload(self.upload_key): return full_path = os.path.join(dirname, fname) obj_name = os.path.relpath(full_path, self.root_folder) obj_size = os.stat(full_path).st_size self.client.upload_file(self.container, full_path, obj_name=obj_name, return_none=True, ttl=self.ttl) self.client._update_progress(self.upload_key, obj_size)
python
{ "resource": "" }
q17999
FolderUploader.run
train
def run(self): """Starts the uploading thread.""" root_path, folder_name = os.path.split(self.root_folder) self.root_folder = os.path.join(root_path, folder_name) for dirname, _, fnames in os.walk(self.root_folder): self.upload_files_in_folder(dirname, fnames)
python
{ "resource": "" }