desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Resume upload of a file already part-uploaded to Glacier. The resumption of an upload where the part-uploaded section is empty is a valid degenerate case that this function can handle. One and only one of filename or file_obj must be specified. :type upload_id: str :param upload_id: existing Glacier upload id of uploa...
def resume_archive_from_file(self, upload_id, filename=None, file_obj=None):
part_list_response = self.list_all_parts(upload_id) part_size = part_list_response['PartSizeInBytes'] part_hash_map = {} for part_desc in part_list_response['Parts']: part_index = self._range_string_to_part_index(part_desc['RangeInBytes'], part_size) part_tree_hash = codecs.decode(part_d...
'Create a new archive from a file and upload the given file. This is a convenience method around the :class:`boto.glacier.concurrent.ConcurrentUploader` class. This method will perform a multipart upload and upload the parts of the file concurrently. :type filename: str :param filename: A filename to upload :param kwa...
def concurrent_create_archive_from_file(self, filename, description, **kwargs):
uploader = ConcurrentUploader(self.layer1, self.name, **kwargs) archive_id = uploader.upload(filename, description) return archive_id
'Initiate a archive retrieval job to download the data from an archive. You will need to wait for the notification from Amazon (via SNS) before you can actually download the data, this takes around 4 hours. :type archive_id: str :param archive_id: The id of the archive :type description: str :param description: An opti...
def retrieve_archive(self, archive_id, sns_topic=None, description=None):
job_data = {'Type': 'archive-retrieval', 'ArchiveId': archive_id} if (sns_topic is not None): job_data['SNSTopic'] = sns_topic if (description is not None): job_data['Description'] = description response = self.layer1.initiate_job(self.name, job_data) return self.get_job(response['Jo...
'Initiate a inventory retrieval job to list the items in the vault. You will need to wait for the notification from Amazon (via SNS) before you can actually download the data, this takes around 4 hours. :type description: str :param description: An optional description for the job. :type sns_topic: str :param sns_topic...
def retrieve_inventory(self, sns_topic=None, description=None, byte_range=None, start_date=None, end_date=None, limit=None):
job_data = {'Type': 'inventory-retrieval'} if (sns_topic is not None): job_data['SNSTopic'] = sns_topic if (description is not None): job_data['Description'] = description if (byte_range is not None): job_data['RetrievalByteRange'] = byte_range if ((start_date is not None) or...
'Identical to ``retrieve_inventory``, but returns a ``Job`` instance instead of just the job ID. :type description: str :param description: An optional description for the job. :type sns_topic: str :param sns_topic: The Amazon SNS topic ARN where Amazon Glacier sends notification when the job is completed and the outpu...
def retrieve_inventory_job(self, **kwargs):
job_id = self.retrieve_inventory(**kwargs) return self.get_job(job_id)
'This operation deletes an archive from the vault. :type archive_id: str :param archive_id: The ID for the archive to be deleted.'
def delete_archive(self, archive_id):
return self.layer1.delete_archive(self.name, archive_id)
'Get an object representing a job in progress. :type job_id: str :param job_id: The ID of the job :rtype: :class:`boto.glacier.job.Job` :return: A Job object representing the job.'
def get_job(self, job_id):
response_data = self.layer1.describe_job(self.name, job_id) return Job(self, response_data)
'Return a list of Job objects related to this vault. :type completed: boolean :param completed: Specifies the state of the jobs to return. If a value of True is passed, only completed jobs will be returned. If a value of False is passed, only uncompleted jobs will be returned. If no value is passed, all jobs will be ...
def list_jobs(self, completed=None, status_code=None):
response_data = self.layer1.list_jobs(self.name, completed, status_code) return [Job(self, jd) for jd in response_data['JobList']]
'Automatically make and combine multiple calls to list_parts. Call list_parts as necessary, combining the results in case multiple calls were required to get data on all available parts.'
def list_all_parts(self, upload_id):
result = self.layer1.list_parts(self.name, upload_id) marker = result['Marker'] while marker: additional_result = self.layer1.list_parts(self.name, upload_id, marker=marker) result['Parts'].extend(additional_result['Parts']) marker = additional_result['Marker'] result['Marker'] =...
'Reads and returns the response body, or up to the next amt bytes.'
def read(self, amt=None):
return self.http_response.read(amt)
'This operation downloads the output of the job. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory. You can download all the job output or download a portion of the output by specifying a byte range. In the case of an archive re...
def get_output(self, byte_range=None, validate_checksum=False):
response = self.vault.layer1.get_job_output(self.vault.name, self.id, byte_range) if (validate_checksum and ('TreeHash' in response)): data = response.read() actual_tree_hash = tree_hash_from_str(data) if (response['TreeHash'] != actual_tree_hash): raise TreeHashDoesNotMatchE...
'Download an archive to a file by name. :type filename: str :param filename: The name of the file where the archive contents will be saved. :type chunk_size: int :param chunk_size: The chunk size to use when downloading the archive. :type verify_hashes: bool :param verify_hashes: Indicates whether or not to verify the ...
def download_to_file(self, filename, chunk_size=DefaultPartSize, verify_hashes=True, retry_exceptions=(socket.error,)):
num_chunks = self._calc_num_chunks(chunk_size) with open(filename, 'wb') as output_file: self._download_to_fileob(output_file, num_chunks, chunk_size, verify_hashes, retry_exceptions)
'Download an archive to a file object. :type output_file: file :param output_file: The file object where the archive contents will be saved. :type chunk_size: int :param chunk_size: The chunk size to use when downloading the archive. :type verify_hashes: bool :param verify_hashes: Indicates whether or not to verify the...
def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize, verify_hashes=True, retry_exceptions=(socket.error,)):
num_chunks = self._calc_num_chunks(chunk_size) self._download_to_fileob(output_file, num_chunks, chunk_size, verify_hashes, retry_exceptions)
'This operation lists all vaults owned by the calling user\'s account. The list returned in the response is ASCII-sorted by vault name. By default, this operation returns up to 1,000 items. If there are more vaults to list, the response `marker` field contains the vault Amazon Resource Name (ARN) at which to continue t...
def list_vaults(self, limit=None, marker=None):
params = {} if limit: params['limit'] = limit if marker: params['marker'] = marker return self.make_request('GET', 'vaults', params=params)
'This operation returns information about a vault, including the vault\'s Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault. The number of archives and their total size are as of the last inventory generation. This means t...
def describe_vault(self, vault_name):
uri = ('vaults/%s' % vault_name) return self.make_request('GET', uri)
'This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an AWS account. You can create up to 1,000 vaults per account. If you need to create more vaults, contact Amazon Glacier. You must use the following guidelines when naming a vault. + Names can be betwee...
def create_vault(self, vault_name):
uri = ('vaults/%s' % vault_name) return self.make_request('PUT', uri, ok_responses=(201,), response_headers=[('Location', 'Location')])
'This operation deletes a vault. Amazon Glacier will delete a vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not removed) and Am...
def delete_vault(self, vault_name):
uri = ('vaults/%s' % vault_name) return self.make_request('DELETE', uri, ok_responses=(204,))
'This operation retrieves the `notification-configuration` subresource of the specified vault. For information about setting a notification configuration on a vault, see SetVaultNotifications. If a notification configuration for a vault is not set, the operation returns a `404 Not Found` error. For more information abo...
def get_vault_notifications(self, vault_name):
uri = ('vaults/%s/notification-configuration' % vault_name) return self.make_request('GET', uri)
'This operation configures notifications that will be sent when specific events happen to a vault. By default, you don\'t get any notifications. To configure vault notifications, send a PUT request to the `notification-configuration` subresource of the vault. The request should include a JSON document that provides an ...
def set_vault_notifications(self, vault_name, notification_config):
uri = ('vaults/%s/notification-configuration' % vault_name) json_config = json.dumps(notification_config) return self.make_request('PUT', uri, data=json_config, ok_responses=(204,))
'This operation deletes the notification configuration set for a vault. The operation is eventually consistent;that is, it might take some time for Amazon Glacier to completely disable the notifications and you might still receive some notifications for a short time after you send the delete request. An AWS account has...
def delete_vault_notifications(self, vault_name):
uri = ('vaults/%s/notification-configuration' % vault_name) return self.make_request('DELETE', uri, ok_responses=(204,))
'This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a...
def list_jobs(self, vault_name, completed=None, status_code=None, limit=None, marker=None):
params = {} if limit: params['limit'] = limit if marker: params['marker'] = marker if status_code: params['statuscode'] = status_code if (completed is not None): params['completed'] = ('true' if completed else 'false') uri = ('vaults/%s/jobs' % vault_name) ret...
'This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon Glacier completes the job. For more information about initiating a job, see InitiateJob. This operation...
def describe_job(self, vault_name, job_id):
uri = ('vaults/%s/jobs/%s' % (vault_name, job_id)) return self.make_request('GET', uri, ok_responses=(200,))
'This operation initiates a job of the specified type. In this release, you can initiate a job to retrieve either an archive or a vault inventory (a list of archives in a vault). Retrieving data from Amazon Glacier is a two-step process: #. Initiate a retrieval job. #. After the job completes, download the bytes. The r...
def initiate_job(self, vault_name, job_data):
uri = ('vaults/%s/jobs' % vault_name) response_headers = [('x-amz-job-id', u'JobId'), ('Location', u'Location')] json_job_data = json.dumps(job_data) return self.make_request('POST', uri, data=json_job_data, ok_responses=(202,), response_headers=response_headers)
'This operation downloads the output of the job you initiated using InitiateJob. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory. A job ID will not expire for at least 24 hours after Amazon Glacier completes the job. That is, y...
def get_job_output(self, vault_name, job_id, byte_range=None):
response_headers = [('x-amz-sha256-tree-hash', u'TreeHash'), ('Content-Range', u'ContentRange'), ('Content-Type', u'ContentType')] headers = None if byte_range: headers = {'Range': ('bytes=%d-%d' % byte_range)} uri = ('vaults/%s/jobs/%s/output' % (vault_name, job_id)) response = self.make_re...
'This operation adds an archive to a vault. This is a synchronous operation, and for a successful upload, your data is durably persisted. Amazon Glacier returns the archive ID in the `x-amz-archive-id` header of the response. You must use the archive ID to access your data in Amazon Glacier. After you upload an archive...
def upload_archive(self, vault_name, archive, linear_hash, tree_hash, description=None):
response_headers = [('x-amz-archive-id', u'ArchiveId'), ('Location', u'Location'), ('x-amz-sha256-tree-hash', u'TreeHash')] uri = ('vaults/%s/archives' % vault_name) try: content_length = str(len(archive)) except (TypeError, AttributeError): content_length = str(os.fstat(archive.fileno()...
'This operation deletes an archive from a vault. Subsequent requests to initiate a retrieval of this archive will fail. Archive retrievals that are in progress for this archive ID may or may not succeed according to the following scenarios: + If the archive retrieval job is actively preparing the data for download when...
def delete_archive(self, vault_name, archive_id):
uri = ('vaults/%s/archives/%s' % (vault_name, archive_id)) return self.make_request('DELETE', uri, ok_responses=(204,))
'This operation initiates a multipart upload. Amazon Glacier creates a multipart upload resource and returns its ID in the response. The multipart upload ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart). When you initiate a multipart upload, you specify the part size in number o...
def initiate_multipart_upload(self, vault_name, part_size, description=None):
response_headers = [('x-amz-multipart-upload-id', u'UploadId'), ('Location', u'Location')] headers = {'x-amz-part-size': str(part_size)} if description: headers['x-amz-archive-description'] = description uri = ('vaults/%s/multipart-uploads' % vault_name) response = self.make_request('POST', ...
'You call this operation to inform Amazon Glacier that all the archive parts have been uploaded and that Amazon Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Amazon Glacier returns the URI path of the newly created archive resource. Using the URI pat...
def complete_multipart_upload(self, vault_name, upload_id, sha256_treehash, archive_size):
response_headers = [('x-amz-archive-id', u'ArchiveId'), ('Location', u'Location')] headers = {'x-amz-sha256-tree-hash': sha256_treehash, 'x-amz-archive-size': str(archive_size)} uri = ('vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)) response = self.make_request('POST', uri, headers=headers, ...
'This operation aborts a multipart upload identified by the upload ID. After the Abort Multipart Upload request succeeds, you cannot upload any more parts to the multipart upload or complete the multipart upload. Aborting a completed upload fails. However, aborting an already-aborted upload will succeed, for a short ti...
def abort_multipart_upload(self, vault_name, upload_id):
uri = ('vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)) return self.make_request('DELETE', uri, ok_responses=(204,))
'This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order. Th...
def list_multipart_uploads(self, vault_name, limit=None, marker=None):
params = {} if limit: params['limit'] = limit if marker: params['marker'] = marker uri = ('vaults/%s/multipart-uploads' % vault_name) return self.make_request('GET', uri, params=params)
'This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List...
def list_parts(self, vault_name, upload_id, limit=None, marker=None):
params = {} if limit: params['limit'] = limit if marker: params['marker'] = marker uri = ('vaults/%s/multipart-uploads/%s' % (vault_name, upload_id)) return self.make_request('GET', uri, params=params)
'This operation uploads a part of an archive. You can upload archive parts in any order. You can also upload them in parallel. You can upload up to 10,000 parts for a multipart upload. Amazon Glacier rejects your upload part request if any of the following conditions is true: + **SHA256 tree hash does not match**To ens...
def upload_part(self, vault_name, upload_id, linear_hash, tree_hash, byte_range, part_data):
headers = {'x-amz-content-sha256': linear_hash, 'x-amz-sha256-tree-hash': tree_hash, 'Content-Range': ('bytes %d-%d/*' % byte_range)} response_headers = [('x-amz-sha256-tree-hash', u'TreeHash')] uri = ('vaults/%s/multipart-uploads/%s' % (str(vault_name), upload_id)) return self.make_request('PUT', ur...
':type api: :class:`boto.glacier.layer1.Layer1` :param api: A layer1 glacier object. :type vault_name: str :param vault_name: The name of the vault. :type part_size: int :param part_size: The size, in bytes, of the chunks to use when uploading the archive parts. The part size must be a megabyte multiplied by a power o...
def __init__(self, api, vault_name, part_size=DEFAULT_PART_SIZE, num_threads=10):
super(ConcurrentUploader, self).__init__(part_size, num_threads) self._api = api self._vault_name = vault_name
'Concurrently create an archive. The part_size value specified when the class was constructed will be used *unless* it is smaller than the minimum required part size needed for the size of the given file. In that case, the part size used will be the minimum part size required to properly upload the given file. :type f...
def upload(self, filename, description=None):
total_size = os.stat(filename).st_size (total_parts, part_size) = self._calculate_required_part_size(total_size) hash_chunks = ([None] * total_parts) worker_queue = Queue() result_queue = Queue() response = self._api.initiate_multipart_upload(self._vault_name, part_size, description) upload_...
':param job: A layer2 job object for archive retrieval object. :param part_size: The size, in bytes, of the chunks to use when uploading the archive parts. The part size must be a megabyte multiplied by a power of two.'
def __init__(self, job, part_size=DEFAULT_PART_SIZE, num_threads=10):
super(ConcurrentDownloader, self).__init__(part_size, num_threads) self._job = job
'Concurrently download an archive. :param filename: The filename to download the archive to :type filename: str'
def download(self, filename):
total_size = self._job.archive_size (total_parts, part_size) = self._calculate_required_part_size(total_size) worker_queue = Queue() result_queue = Queue() self._add_work_items_to_queue(total_parts, worker_queue, part_size) self._start_download_threads(result_queue, worker_queue) try: ...
'Waits until the result_queue is filled with all the downloaded parts This indicates that all part downloads have completed Saves downloaded parts into filename :param filename: :param result_queue: :param total_parts:'
def _wait_for_download_threads(self, filename, result_queue, total_parts):
hash_chunks = ([None] * total_parts) with open(filename, 'wb') as f: for _ in range(total_parts): result = result_queue.get() if isinstance(result, Exception): log.debug('An error was found in the result queue, terminating threads: %s...
'Individual download thread that will download parts of the file from Glacier. Parts to download stored in work queue. Parts download to a temp dir with each part a separate file :param job: Glacier job object :param work_queue: A queue of tuples which include the part_number and part_size :param result_queue: A priori...
def __init__(self, job, worker_queue, result_queue, num_retries=5, time_between_retries=5, retry_exceptions=Exception):
super(DownloadWorkerThread, self).__init__(worker_queue, result_queue) self._job = job self._num_retries = num_retries self._time_between_retries = time_between_retries self._retry_exceptions = retry_exceptions
'Attempt to download a part of the archive from Glacier Store the result in the result_queue :param work:'
def _process_chunk(self, work):
result = None for _ in range(self._num_retries): try: result = self._download_chunk(work) break except self._retry_exceptions as e: log.error('Exception caught downloading part number %s for job %s', work[0], self._job) time...
'Downloads a chunk of archive from Glacier. Saves the data to a temp file Returns the part number and temp file location :param work:'
def _download_chunk(self, work):
(part_number, part_size) = work start_byte = (part_number * part_size) byte_range = (start_byte, ((start_byte + part_size) - 1)) log.debug('Downloading chunk %s of size %s', part_number, part_size) response = self._job.get_output(byte_range) data = response.read() actual_hash ...
'Creates a vault. :type name: str :param name: The name of the vault :rtype: :class:`boto.glacier.vault.Vault` :return: A Vault object representing the vault.'
def create_vault(self, name):
self.layer1.create_vault(name) return self.get_vault(name)
'Delete a vault. This operation deletes a vault. Amazon Glacier will delete a vault only if there are no archives in the vault as per the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is no...
def delete_vault(self, name):
return self.layer1.delete_vault(name)
'Get an object representing a named vault from Glacier. This operation does not check if the vault actually exists. :type name: str :param name: The name of the vault :rtype: :class:`boto.glacier.vault.Vault` :return: A Vault object representing the vault.'
def get_vault(self, name):
response_data = self.layer1.describe_vault(name) return Vault(self.layer1, response_data)
'Return a list of all vaults associated with the account ID. :rtype: List of :class:`boto.glacier.vault.Vault` :return: A list of Vault objects.'
def list_vaults(self):
vaults = [] marker = None while True: response_data = self.layer1.list_vaults(marker=marker, limit=1000) vaults.extend([Vault(self.layer1, rd) for rd in response_data['VaultList']]) marker = response_data.get('Marker') if (not marker): break return vaults
'Upload a part to Glacier. :param part_index: part number where 0 is the first part :param part_data: data to upload corresponding to this part'
def upload_part(self, part_index, part_data):
if self.closed: raise ValueError('I/O operation on closed file') part_tree_hash = tree_hash(chunk_hashes(part_data, self.chunk_size)) self._insert_tree_hash(part_index, part_tree_hash) hex_tree_hash = bytes_to_hex(part_tree_hash) linear_hash = hashlib.sha256(part_data).hexdigest(...
'Skip uploading of a part. The final close call needs to calculate the tree hash and total size of all uploaded data, so this is the mechanism for resume functionality to provide it without actually uploading the data again. :param part_index: part number where 0 is the first part :param part_tree_hash: binary tree_has...
def skip_part(self, part_index, part_tree_hash, part_length):
if self.closed: raise ValueError('I/O operation on closed file') self._insert_tree_hash(part_index, part_tree_hash) self._uploaded_size += part_length
'Returns the current tree hash for the data that\'s been written **so far**. Only once the writing is complete is the final tree hash returned.'
@property def current_tree_hash(self):
return tree_hash(self.uploader._tree_hashes)
'Returns the current uploaded size for the data that\'s been written **so far**. Only once the writing is complete is the final uploaded size returned.'
@property def current_uploaded_size(self):
return self.uploader._uploaded_size
'Validates a pipeline and initiates processing. If the pipeline does not pass validation, activation fails. Call this action to start processing pipeline tasks of a pipeline you\'ve created using the CreatePipeline and PutPipelineDefinition actions. A pipeline cannot be modified after it has been successfully activated...
def activate_pipeline(self, pipeline_id):
params = {'pipelineId': pipeline_id} return self.make_request(action='ActivatePipeline', body=json.dumps(params))
'Creates a new empty pipeline. When this action succeeds, you can then use the PutPipelineDefinition action to populate the pipeline. :type name: string :param name: The name of the new pipeline. You can use the same name for multiple pipelines associated with your AWS account, because AWS Data Pipeline assigns each ne...
def create_pipeline(self, name, unique_id, description=None):
params = {'name': name, 'uniqueId': unique_id} if (description is not None): params['description'] = description return self.make_request(action='CreatePipeline', body=json.dumps(params))
'Permanently deletes a pipeline, its pipeline definition and its run history. You cannot query or restore a deleted pipeline. AWS Data Pipeline will attempt to cancel instances associated with the pipeline that are currently being processed by task runners. Deleting a pipeline cannot be undone. To temporarily pause a p...
def delete_pipeline(self, pipeline_id):
params = {'pipelineId': pipeline_id} return self.make_request(action='DeletePipeline', body=json.dumps(params))
'Returns the object definitions for a set of objects associated with the pipeline. Object definitions are composed of a set of fields that define the properties of the object. :type pipeline_id: string :param pipeline_id: Identifier of the pipeline that contains the object definitions. :type object_ids: list :param obj...
def describe_objects(self, object_ids, pipeline_id, marker=None, evaluate_expressions=None):
params = {'pipelineId': pipeline_id, 'objectIds': object_ids} if (evaluate_expressions is not None): params['evaluateExpressions'] = evaluate_expressions if (marker is not None): params['marker'] = marker return self.make_request(action='DescribeObjects', body=json.dumps(params))
'Retrieve metadata about one or more pipelines. The information retrieved includes the name of the pipeline, the pipeline identifier, its current state, and the user account that owns the pipeline. Using account credentials, you can retrieve metadata about pipelines that you or your IAM users have created. If you are u...
def describe_pipelines(self, pipeline_ids):
params = {'pipelineIds': pipeline_ids} return self.make_request(action='DescribePipelines', body=json.dumps(params))
'Evaluates a string in the context of a specified object. A task runner can use this action to evaluate SQL queries stored in Amazon S3. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline. :type object_id: string :param object_id: The identifier of the object. :type expression: string :param e...
def evaluate_expression(self, pipeline_id, expression, object_id):
params = {'pipelineId': pipeline_id, 'objectId': object_id, 'expression': expression} return self.make_request(action='EvaluateExpression', body=json.dumps(params))
'Returns the definition of the specified pipeline. You can call GetPipelineDefinition to retrieve the pipeline definition you provided using PutPipelineDefinition. :type pipeline_id: string :param pipeline_id: The identifier of the pipeline. :type version: string :param version: The version of the pipeline definition t...
def get_pipeline_definition(self, pipeline_id, version=None):
params = {'pipelineId': pipeline_id} if (version is not None): params['version'] = version return self.make_request(action='GetPipelineDefinition', body=json.dumps(params))
'Returns a list of pipeline identifiers for all active pipelines. Identifiers are returned only for pipelines you have permission to access. :type marker: string :param marker: The starting point for the results to be returned. The first time you call ListPipelines, this value should be empty. As long as the action ret...
def list_pipelines(self, marker=None):
params = {} if (marker is not None): params['marker'] = marker return self.make_request(action='ListPipelines', body=json.dumps(params))
'Task runners call this action to receive a task to perform from AWS Data Pipeline. The task runner specifies which tasks it can perform by setting a value for the workerGroup parameter of the PollForTask call. The task returned by PollForTask may come from any of the pipelines that match the workerGroup value passed i...
def poll_for_task(self, worker_group, hostname=None, instance_identity=None):
params = {'workerGroup': worker_group} if (hostname is not None): params['hostname'] = hostname if (instance_identity is not None): params['instanceIdentity'] = instance_identity return self.make_request(action='PollForTask', body=json.dumps(params))
'Adds tasks, schedules, and preconditions that control the behavior of the pipeline. You can use PutPipelineDefinition to populate a new pipeline or to update an existing pipeline that has not yet been activated. PutPipelineDefinition also validates the configuration as it adds it to the pipeline. Changes to the pipeli...
def put_pipeline_definition(self, pipeline_objects, pipeline_id):
params = {'pipelineId': pipeline_id, 'pipelineObjects': pipeline_objects} return self.make_request(action='PutPipelineDefinition', body=json.dumps(params))
'Queries a pipeline for the names of objects that match a specified set of conditions. The objects returned by QueryObjects are paginated and then filtered by the value you set for query. This means the action may return an empty result set with a value set for marker. If `HasMoreResults` is set to `True`, you should c...
def query_objects(self, pipeline_id, sphere, marker=None, query=None, limit=None):
params = {'pipelineId': pipeline_id, 'sphere': sphere} if (query is not None): params['query'] = query if (marker is not None): params['marker'] = marker if (limit is not None): params['limit'] = limit return self.make_request(action='QueryObjects', body=json.dumps(params))
'Updates the AWS Data Pipeline service on the progress of the calling task runner. When the task runner is assigned a task, it should call ReportTaskProgress to acknowledge that it has the task within 2 minutes. If the web service does not recieve this acknowledgement within the 2 minute window, it will assign the task...
def report_task_progress(self, task_id):
params = {'taskId': task_id} return self.make_request(action='ReportTaskProgress', body=json.dumps(params))
'Task runners call ReportTaskRunnerHeartbeat every 15 minutes to indicate that they are operational. In the case of AWS Data Pipeline Task Runner launched on a resource managed by AWS Data Pipeline, the web service can use this call to detect when the task runner application has failed and restart a new instance. :type...
def report_task_runner_heartbeat(self, taskrunner_id, worker_group=None, hostname=None):
params = {'taskrunnerId': taskrunner_id} if (worker_group is not None): params['workerGroup'] = worker_group if (hostname is not None): params['hostname'] = hostname return self.make_request(action='ReportTaskRunnerHeartbeat', body=json.dumps(params))
'Requests that the status of an array of physical or logical pipeline objects be updated in the pipeline. This update may not occur immediately, but is eventually consistent. The status that can be set depends on the type of object. :type pipeline_id: string :param pipeline_id: Identifies the pipeline that contains the...
def set_status(self, object_ids, status, pipeline_id):
params = {'pipelineId': pipeline_id, 'objectIds': object_ids, 'status': status} return self.make_request(action='SetStatus', body=json.dumps(params))
'Notifies AWS Data Pipeline that a task is completed and provides information about the final status. The task runner calls this action regardless of whether the task was sucessful. The task runner does not need to call SetTaskStatus for tasks that are canceled by the web service during a call to ReportTaskProgress. :t...
def set_task_status(self, task_id, task_status, error_id=None, error_message=None, error_stack_trace=None):
params = {'taskId': task_id, 'taskStatus': task_status} if (error_id is not None): params['errorId'] = error_id if (error_message is not None): params['errorMessage'] = error_message if (error_stack_trace is not None): params['errorStackTrace'] = error_stack_trace return self...
'Tests the pipeline definition with a set of validation checks to ensure that it is well formed and can run without error. :type pipeline_id: string :param pipeline_id: Identifies the pipeline whose definition is to be validated. :type pipeline_objects: list :param pipeline_objects: A list of objects that define the pi...
def validate_pipeline_definition(self, pipeline_objects, pipeline_id):
params = {'pipelineId': pipeline_id, 'pipelineObjects': pipeline_objects} return self.make_request(action='ValidatePipelineDefinition', body=json.dumps(params))
'Gets information about one or more applications. :type application_names: list :param application_names: A list of application names, with multiple application names separated by spaces.'
def batch_get_applications(self, application_names=None):
params = {} if (application_names is not None): params['applicationNames'] = application_names return self.make_request(action='BatchGetApplications', body=json.dumps(params))
'Gets information about one or more deployments. :type deployment_ids: list :param deployment_ids: A list of deployment IDs, with multiple deployment IDs separated by spaces.'
def batch_get_deployments(self, deployment_ids=None):
params = {} if (deployment_ids is not None): params['deploymentIds'] = deployment_ids return self.make_request(action='BatchGetDeployments', body=json.dumps(params))
'Creates a new application. :type application_name: string :param application_name: The name of the application. This name must be unique within the AWS user account.'
def create_application(self, application_name):
params = {'applicationName': application_name} return self.make_request(action='CreateApplication', body=json.dumps(params))
'Deploys an application revision to the specified deployment group. :type application_name: string :param application_name: The name of an existing AWS CodeDeploy application within the AWS user account. :type deployment_group_name: string :param deployment_group_name: The deployment group\'s name. :type revision: dict...
def create_deployment(self, application_name, deployment_group_name=None, revision=None, deployment_config_name=None, description=None, ignore_application_stop_failures=None):
params = {'applicationName': application_name} if (deployment_group_name is not None): params['deploymentGroupName'] = deployment_group_name if (revision is not None): params['revision'] = revision if (deployment_config_name is not None): params['deploymentConfigName'] = deployme...
'Creates a new deployment configuration. :type deployment_config_name: string :param deployment_config_name: The name of the deployment configuration to create. :type minimum_healthy_hosts: dict :param minimum_healthy_hosts: The minimum number of healthy instances that should be available at any time during the deploym...
def create_deployment_config(self, deployment_config_name, minimum_healthy_hosts=None):
params = {'deploymentConfigName': deployment_config_name} if (minimum_healthy_hosts is not None): params['minimumHealthyHosts'] = minimum_healthy_hosts return self.make_request(action='CreateDeploymentConfig', body=json.dumps(params))
'Creates a new deployment group for application revisions to be deployed to. :type application_name: string :param application_name: The name of an existing AWS CodeDeploy application within the AWS user account. :type deployment_group_name: string :param deployment_group_name: The name of an existing deployment group ...
def create_deployment_group(self, application_name, deployment_group_name, deployment_config_name=None, ec_2_tag_filters=None, auto_scaling_groups=None, service_role_arn=None):
params = {'applicationName': application_name, 'deploymentGroupName': deployment_group_name} if (deployment_config_name is not None): params['deploymentConfigName'] = deployment_config_name if (ec_2_tag_filters is not None): params['ec2TagFilters'] = ec_2_tag_filters if (auto_scaling_gro...
'Deletes an application. :type application_name: string :param application_name: The name of an existing AWS CodeDeploy application within the AWS user account.'
def delete_application(self, application_name):
params = {'applicationName': application_name} return self.make_request(action='DeleteApplication', body=json.dumps(params))
'Deletes a deployment configuration. A deployment configuration cannot be deleted if it is currently in use. Also, predefined configurations cannot be deleted. :type deployment_config_name: string :param deployment_config_name: The name of an existing deployment configuration within the AWS user account.'
def delete_deployment_config(self, deployment_config_name):
params = {'deploymentConfigName': deployment_config_name} return self.make_request(action='DeleteDeploymentConfig', body=json.dumps(params))
'Deletes a deployment group. :type application_name: string :param application_name: The name of an existing AWS CodeDeploy application within the AWS user account. :type deployment_group_name: string :param deployment_group_name: The name of an existing deployment group for the specified application.'
def delete_deployment_group(self, application_name, deployment_group_name):
params = {'applicationName': application_name, 'deploymentGroupName': deployment_group_name} return self.make_request(action='DeleteDeploymentGroup', body=json.dumps(params))
'Gets information about an application. :type application_name: string :param application_name: The name of an existing AWS CodeDeploy application within the AWS user account.'
def get_application(self, application_name):
params = {'applicationName': application_name} return self.make_request(action='GetApplication', body=json.dumps(params))
'Gets information about an application revision. :type application_name: string :param application_name: The name of the application that corresponds to the revision. :type revision: dict :param revision: Information about the application revision to get, including the revision\'s type and its location.'
def get_application_revision(self, application_name, revision):
params = {'applicationName': application_name, 'revision': revision} return self.make_request(action='GetApplicationRevision', body=json.dumps(params))
'Gets information about a deployment. :type deployment_id: string :param deployment_id: An existing deployment ID within the AWS user account.'
def get_deployment(self, deployment_id):
params = {'deploymentId': deployment_id} return self.make_request(action='GetDeployment', body=json.dumps(params))
'Gets information about a deployment configuration. :type deployment_config_name: string :param deployment_config_name: The name of an existing deployment configuration within the AWS user account.'
def get_deployment_config(self, deployment_config_name):
params = {'deploymentConfigName': deployment_config_name} return self.make_request(action='GetDeploymentConfig', body=json.dumps(params))
'Gets information about a deployment group. :type application_name: string :param application_name: The name of an existing AWS CodeDeploy application within the AWS user account. :type deployment_group_name: string :param deployment_group_name: The name of an existing deployment group for the specified application.'
def get_deployment_group(self, application_name, deployment_group_name):
params = {'applicationName': application_name, 'deploymentGroupName': deployment_group_name} return self.make_request(action='GetDeploymentGroup', body=json.dumps(params))
'Gets information about an Amazon EC2 instance as part of a deployment. :type deployment_id: string :param deployment_id: The unique ID of a deployment. :type instance_id: string :param instance_id: The unique ID of an Amazon EC2 instance in the deployment\'s deployment group.'
def get_deployment_instance(self, deployment_id, instance_id):
params = {'deploymentId': deployment_id, 'instanceId': instance_id} return self.make_request(action='GetDeploymentInstance', body=json.dumps(params))
'Lists information about revisions for an application. :type application_name: string :param application_name: The name of an existing AWS CodeDeploy application within the AWS user account. :type sort_by: string :param sort_by: The column name to sort the list results by: + registerTime: Sort the list results by when ...
def list_application_revisions(self, application_name, sort_by=None, sort_order=None, s_3_bucket=None, s_3_key_prefix=None, deployed=None, next_token=None):
params = {'applicationName': application_name} if (sort_by is not None): params['sortBy'] = sort_by if (sort_order is not None): params['sortOrder'] = sort_order if (s_3_bucket is not None): params['s3Bucket'] = s_3_bucket if (s_3_key_prefix is not None): params['s3Ke...
'Lists the applications registered within the AWS user account. :type next_token: string :param next_token: An identifier that was returned from the previous list applications call, which can be used to return the next set of applications in the list.'
def list_applications(self, next_token=None):
params = {} if (next_token is not None): params['nextToken'] = next_token return self.make_request(action='ListApplications', body=json.dumps(params))
'Lists the deployment configurations within the AWS user account. :type next_token: string :param next_token: An identifier that was returned from the previous list deployment configurations call, which can be used to return the next set of deployment configurations in the list.'
def list_deployment_configs(self, next_token=None):
params = {} if (next_token is not None): params['nextToken'] = next_token return self.make_request(action='ListDeploymentConfigs', body=json.dumps(params))
'Lists the deployment groups for an application registered within the AWS user account. :type application_name: string :param application_name: The name of an existing AWS CodeDeploy application within the AWS user account. :type next_token: string :param next_token: An identifier that was returned from the previous li...
def list_deployment_groups(self, application_name, next_token=None):
params = {'applicationName': application_name} if (next_token is not None): params['nextToken'] = next_token return self.make_request(action='ListDeploymentGroups', body=json.dumps(params))
'Lists the Amazon EC2 instances for a deployment within the AWS user account. :type deployment_id: string :param deployment_id: The unique ID of a deployment. :type next_token: string :param next_token: An identifier that was returned from the previous list deployment instances call, which can be used to return the nex...
def list_deployment_instances(self, deployment_id, next_token=None, instance_status_filter=None):
params = {'deploymentId': deployment_id} if (next_token is not None): params['nextToken'] = next_token if (instance_status_filter is not None): params['instanceStatusFilter'] = instance_status_filter return self.make_request(action='ListDeploymentInstances', body=json.dumps(params))
'Lists the deployments under a deployment group for an application registered within the AWS user account. :type application_name: string :param application_name: The name of an existing AWS CodeDeploy application within the AWS user account. :type deployment_group_name: string :param deployment_group_name: The name of...
def list_deployments(self, application_name=None, deployment_group_name=None, include_only_statuses=None, create_time_range=None, next_token=None):
params = {} if (application_name is not None): params['applicationName'] = application_name if (deployment_group_name is not None): params['deploymentGroupName'] = deployment_group_name if (include_only_statuses is not None): params['includeOnlyStatuses'] = include_only_statuses ...
'Registers with AWS CodeDeploy a revision for the specified application. :type application_name: string :param application_name: The name of an existing AWS CodeDeploy application within the AWS user account. :type description: string :param description: A comment about the revision. :type revision: dict :param revisio...
def register_application_revision(self, application_name, revision, description=None):
params = {'applicationName': application_name, 'revision': revision} if (description is not None): params['description'] = description return self.make_request(action='RegisterApplicationRevision', body=json.dumps(params))
'Attempts to stop an ongoing deployment. :type deployment_id: string :param deployment_id: The unique ID of a deployment.'
def stop_deployment(self, deployment_id):
params = {'deploymentId': deployment_id} return self.make_request(action='StopDeployment', body=json.dumps(params))
'Changes an existing application\'s name. :type application_name: string :param application_name: The current name of the application that you want to change. :type new_application_name: string :param new_application_name: The new name that you want to change the application to.'
def update_application(self, application_name=None, new_application_name=None):
params = {} if (application_name is not None): params['applicationName'] = application_name if (new_application_name is not None): params['newApplicationName'] = new_application_name return self.make_request(action='UpdateApplication', body=json.dumps(params))
'Changes information about an existing deployment group. :type application_name: string :param application_name: The application name corresponding to the deployment group to update. :type current_deployment_group_name: string :param current_deployment_group_name: The current name of the existing deployment group. :typ...
def update_deployment_group(self, application_name, current_deployment_group_name, new_deployment_group_name=None, deployment_config_name=None, ec_2_tag_filters=None, auto_scaling_groups=None, service_role_arn=None):
params = {'applicationName': application_name, 'currentDeploymentGroupName': current_deployment_group_name} if (new_deployment_group_name is not None): params['newDeploymentGroupName'] = new_deployment_group_name if (deployment_config_name is not None): params['deploymentConfigName'] = deplo...
'Adds one or more attachments to an attachment set. If an `AttachmentSetId` is not specified, a new attachment set is created, and the ID of the set is returned in the response. If an `AttachmentSetId` is specified, the attachments are added to the specified set, if it exists. An attachment set is a temporary container...
def add_attachments_to_set(self, attachments, attachment_set_id=None):
params = {'attachments': attachments} if (attachment_set_id is not None): params['attachmentSetId'] = attachment_set_id return self.make_request(action='AddAttachmentsToSet', body=json.dumps(params))
'Adds additional customer communication to an AWS Support case. You use the `CaseId` value to identify the case to add communication to. You can list a set of email addresses to copy on the communication using the `CcEmailAddresses` value. The `CommunicationBody` value contains the text of the communication. The respon...
def add_communication_to_case(self, communication_body, case_id=None, cc_email_addresses=None, attachment_set_id=None):
params = {'communicationBody': communication_body} if (case_id is not None): params['caseId'] = case_id if (cc_email_addresses is not None): params['ccEmailAddresses'] = cc_email_addresses if (attachment_set_id is not None): params['attachmentSetId'] = attachment_set_id retur...
'Creates a new case in the AWS Support Center. This operation is modeled on the behavior of the AWS Support Center `Open a new case`_ page. Its parameters require you to specify the following information: #. **IssueType.** The type of issue for the case. You can specify either "customer-service" or "technical." If you ...
def create_case(self, subject, communication_body, service_code=None, severity_code=None, category_code=None, cc_email_addresses=None, language=None, issue_type=None, attachment_set_id=None):
params = {'subject': subject, 'communicationBody': communication_body} if (service_code is not None): params['serviceCode'] = service_code if (severity_code is not None): params['severityCode'] = severity_code if (category_code is not None): params['categoryCode'] = category_code...
'Returns the attachment that has the specified ID. Attachment IDs are generated by the case management system when you add an attachment to a case or case communication. Attachment IDs are returned in the AttachmentDetails objects that are returned by the DescribeCommunications operation. :type attachment_id: string :p...
def describe_attachment(self, attachment_id):
params = {'attachmentId': attachment_id} return self.make_request(action='DescribeAttachment', body=json.dumps(params))
'Returns a list of cases that you specify by passing one or more case IDs. In addition, you can filter the cases by date by setting values for the `AfterTime` and `BeforeTime` request parameters. Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might...
def describe_cases(self, case_id_list=None, display_id=None, after_time=None, before_time=None, include_resolved_cases=None, next_token=None, max_results=None, language=None, include_communications=None):
params = {} if (case_id_list is not None): params['caseIdList'] = case_id_list if (display_id is not None): params['displayId'] = display_id if (after_time is not None): params['afterTime'] = after_time if (before_time is not None): params['beforeTime'] = before_time ...
'Returns communications (and attachments) for one or more support cases. You can use the `AfterTime` and `BeforeTime` parameters to filter by date. You can use the `CaseId` parameter to restrict the results to a particular case. Case data is available for 12 months after creation. If a case was created more than 12 mon...
def describe_communications(self, case_id, before_time=None, after_time=None, next_token=None, max_results=None):
params = {'caseId': case_id} if (before_time is not None): params['beforeTime'] = before_time if (after_time is not None): params['afterTime'] = after_time if (next_token is not None): params['nextToken'] = next_token if (max_results is not None): params['maxResults']...
'Returns the current list of AWS services and a list of service categories that applies to each one. You then use service names and categories in your CreateCase requests. Each AWS service has its own set of categories. The service codes and category codes correspond to the values that are displayed in the **Service** ...
def describe_services(self, service_code_list=None, language=None):
params = {} if (service_code_list is not None): params['serviceCodeList'] = service_code_list if (language is not None): params['language'] = language return self.make_request(action='DescribeServices', body=json.dumps(params))
'Returns the list of severity levels that you can assign to an AWS Support case. The severity level for a case is also a field in the CaseDetails data type included in any CreateCase request. :type language: string :param language: The ISO 639-1 code for the language in which AWS provides support. AWS Support currently...
def describe_severity_levels(self, language=None):
params = {} if (language is not None): params['language'] = language return self.make_request(action='DescribeSeverityLevels', body=json.dumps(params))
'Returns the refresh status of the Trusted Advisor checks that have the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks. :type check_ids: list :param check_ids: The IDs of the Trusted Advisor checks.'
def describe_trusted_advisor_check_refresh_statuses(self, check_ids):
params = {'checkIds': check_ids} return self.make_request(action='DescribeTrustedAdvisorCheckRefreshStatuses', body=json.dumps(params))
'Returns the results of the Trusted Advisor check that has the specified check ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks. The response contains a TrustedAdvisorCheckResult object, which contains these three objects: + TrustedAdvisorCategorySpecificSummary + TrustedAdvisorResourceDetail + Tru...
def describe_trusted_advisor_check_result(self, check_id, language=None):
params = {'checkId': check_id} if (language is not None): params['language'] = language return self.make_request(action='DescribeTrustedAdvisorCheckResult', body=json.dumps(params))
'Returns the summaries of the results of the Trusted Advisor checks that have the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks. The response contains an array of TrustedAdvisorCheckSummary objects. :type check_ids: list :param check_ids: The IDs of the Trusted Advisor checks.'
def describe_trusted_advisor_check_summaries(self, check_ids):
params = {'checkIds': check_ids} return self.make_request(action='DescribeTrustedAdvisorCheckSummaries', body=json.dumps(params))