desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Read the response.
This method does not have the same behavior as
http_client.HTTPResponse.read. Instead, if this method is called with
no ``amt`` arg, then the response body will be cached. Subsequent
calls to ``read()`` with no args **will return the cached response**.'
| def read(self, amt=None):
| if (amt is None):
if (not self._cached_response):
self._cached_response = http_client.HTTPResponse.read(self)
return self._cached_response
else:
return http_client.HTTPResponse.read(self, amt)
|
':type host: str
:param host: The host to make the connection to
:keyword str aws_access_key_id: Your AWS Access Key ID (provided by
Amazon). If none is specified, the value in your
``AWS_ACCESS_KEY_ID`` environmental variable is used.
:keyword str aws_secret_access_key: Your AWS Secret Access Key
(provided by Amazon).... | def __init__(self, host, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, path='/', provider='aws', security_token=None, suppress_consec_slashes=True, validate_certs=True, profile_name=No... | self.suppress_consec_slashes = suppress_consec_slashes
self.num_retries = 6
if config.has_option('Boto', 'is_secure'):
is_secure = config.getboolean('Boto', 'is_secure')
self.is_secure = is_secure
self.https_validate_certificates = config.getbool('Boto', 'https_validate_certificates', valida... |
'mexe - Multi-execute inside a loop, retrying multiple times to handle
transient Internet errors by simply trying again.
Also handles redirects.
This code was inspired by the S3Utils classes posted to the boto-users
Google group by Larry Bates. Thanks!'
| def _mexe(self, request, sender=None, override_num_retries=None, retry_handler=None):
| boto.log.debug(('Method: %s' % request.method))
boto.log.debug(('Path: %s' % request.path))
boto.log.debug(('Data: %s' % request.body))
boto.log.debug(('Headers: %s' % request.headers))
boto.log.debug(('Host: %s' % request.host))
boto.log.debug(('Port: %s' % request.port))
... |
'Makes a request to the server, with stock multiple-retry logic.'
| def make_request(self, method, path, headers=None, data='', host=None, auth_path=None, sender=None, override_num_retries=None, params=None, retry_handler=None):
| if (params is None):
params = {}
http_request = self.build_base_http_request(method, path, auth_path, params, headers, data, host)
return self._mexe(http_request, sender, override_num_retries, retry_handler=retry_handler)
|
'(Optional) Close any open HTTP connections. This is non-destructive,
and making a new request will open a connection again.'
| def close(self):
| boto.log.debug('closing all HTTP connections')
self._connection = None
|
'Serialize a list of structures.
For example::
items = [(\'foo\', \'bar\', \'baz\'), (\'foo2\', \'bar2\', \'baz2\')]
label = \'ParamName.member\'
names = (\'One\', \'Two\', \'Three\')
self.build_complex_list_params(params, items, label, names)
would result in the params dict being updated with these params::
ParamName.... | def build_complex_list_params(self, params, items, label, names):
| for (i, item) in enumerate(items, 1):
current_prefix = ('%s.%s' % (label, i))
for (key, value) in zip(names, item):
full_key = ('%s.%s' % (current_prefix, key))
params[full_key] = value
|
'Connect to this Region\'s endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class\'s constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The co... | def connect(self, **kw_params):
| if self.connection_cls:
return self.connection_cls(region=self, **kw_params)
|
'Returns the stack policy for this stack. If it has no policy
then, a null value is returned.'
| def get_policy(self):
| return self.connection.get_stack_policy(self.stack_id)
|
'Sets a stack policy for this stack.
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack Resources`_
in the AWS CloudFormation User Guide.)
You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
passed, o... | def set_policy(self, stack_policy_body=None, stack_policy_url=None):
| return self.connection.set_stack_policy(self.stack_id, stack_policy_body=stack_policy_body, stack_policy_url=stack_policy_url)
|
'Helper that creates JSON parameters needed by a Stack Create or
Stack Update call.
:type stack_name: string
:param stack_name:
The name associated with the stack. The name must be unique within your
AWS account.
Must contain only alphanumeric characters (case sensitive) and start
with an alpha character. Maximum lengt... | def _build_create_or_update_params(self, stack_name, template_body, template_url, parameters, disable_rollback, timeout_in_minutes, notification_arns, capabilities, on_failure, stack_policy_body, stack_policy_url, tags, use_previous_template=None, stack_policy_during_update_body=None, stack_policy_during_update_url=Non... | params = {'ContentType': 'JSON', 'StackName': stack_name, 'DisableRollback': self.encode_bool(disable_rollback)}
if template_body:
params['TemplateBody'] = template_body
if template_url:
params['TemplateURL'] = template_url
if (use_previous_template is not None):
params['UsePrevi... |
'Do a request via ``self.make_request`` and parse the JSON response.
:type call: string
:param call: Call name, e.g. ``CreateStack``
:type params: dict
:param params: Dictionary of call parameters
:type path: string
:param path: Server path
:type method: string
:param method: HTTP method to use
:rtype: dict
:return: Pa... | def _do_request(self, call, params, path, method):
| response = self.make_request(call, params, path, method)
body = response.read().decode('utf-8')
if (response.status == 200):
body = json.loads(body)
return body
else:
boto.log.error(('%s %s' % (response.status, response.reason)))
boto.log.error(('%s' % body))
r... |
'Creates a stack as specified in the template. After the call
completes successfully, the stack creation starts. You can
check the status of the stack via the DescribeStacks API.
Currently, the limit for stacks is 20 stacks per account per
region.
:type stack_name: string
:param stack_name:
The name associated with the... | def create_stack(self, stack_name, template_body=None, template_url=None, parameters=None, notification_arns=None, disable_rollback=None, timeout_in_minutes=None, capabilities=None, tags=None, on_failure=None, stack_policy_body=None, stack_policy_url=None):
| params = self._build_create_or_update_params(stack_name, template_body, template_url, parameters, disable_rollback, timeout_in_minutes, notification_arns, capabilities, on_failure, stack_policy_body, stack_policy_url, tags)
body = self._do_request('CreateStack', params, '/', 'POST')
return body['CreateStack... |
'Updates a stack as specified in the template. After the call
completes successfully, the stack update starts. You can check
the status of the stack via the DescribeStacks action.
**Note: **You cannot update `AWS::S3::Bucket`_ resources, for
example, to add or modify tags.
To get a copy of the template for an existing ... | def update_stack(self, stack_name, template_body=None, template_url=None, parameters=None, notification_arns=None, disable_rollback=False, timeout_in_minutes=None, capabilities=None, tags=None, use_previous_template=None, stack_policy_during_update_body=None, stack_policy_during_update_url=None, stack_policy_body=None,... | params = self._build_create_or_update_params(stack_name, template_body, template_url, parameters, disable_rollback, timeout_in_minutes, notification_arns, capabilities, None, stack_policy_body, stack_policy_url, tags, use_previous_template, stack_policy_during_update_body, stack_policy_during_update_url)
body =... |
'Deletes a specified stack. Once the call completes
successfully, stack deletion starts. Deleted stacks do not
show up in the DescribeStacks API if the deletion has been
completed successfully.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.'
| def delete_stack(self, stack_name_or_id):
| params = {'ContentType': 'JSON', 'StackName': stack_name_or_id}
return self._do_request('DeleteStack', params, '/', 'GET')
|
'Returns all stack related events for a specified stack. For
more information about a stack\'s event history, go to
`Stacks`_ in the AWS CloudFormation User Guide.
Events are returned, even if the stack never existed or has
been successfully deleted.
:type stack_name_or_id: string
:param stack_name_or_id: The name or t... | def describe_stack_events(self, stack_name_or_id=None, next_token=None):
| params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeStackEvents', params, [('member', StackEvent)])
|
'Returns a description of the specified resource in the
specified stack.
For deleted stacks, DescribeStackResource returns resource
information for up to 90 days after the stack has been
deleted.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
Default... | def describe_stack_resource(self, stack_name_or_id, logical_resource_id):
| params = {'ContentType': 'JSON', 'StackName': stack_name_or_id, 'LogicalResourceId': logical_resource_id}
return self._do_request('DescribeStackResource', params, '/', 'GET')
|
'Returns AWS resource descriptions for running and deleted
stacks. If `StackName` is specified, all the associated
resources that are part of the stack are returned. If
`PhysicalResourceId` is specified, the associated resources of
the stack that the resource belongs to are returned.
Only the first 100 resources will b... | def describe_stack_resources(self, stack_name_or_id=None, logical_resource_id=None, physical_resource_id=None):
| params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
if logical_resource_id:
params['LogicalResourceId'] = logical_resource_id
if physical_resource_id:
params['PhysicalResourceId'] = physical_resource_id
return self.get_list('DescribeStackResources', params... |
'Returns the description for the specified stack; if no stack
name was specified, then it returns the description for all
the stacks created.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack.
Default: There is no default value.
:type next_token: string
... | def describe_stacks(self, stack_name_or_id=None, next_token=None):
| params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
if (next_token is not None):
params['NextToken'] = next_token
return self.get_list('DescribeStacks', params, [('member', Stack)])
|
'Returns the template body for a specified stack. You can get
the template for running or deleted stacks.
For deleted stacks, GetTemplate returns the template for up to
90 days after the stack has been deleted.
If the template does not exist, a `ValidationError` is
returned.
:type stack_name_or_id: string
:param stack_... | def get_template(self, stack_name_or_id):
| params = {'ContentType': 'JSON', 'StackName': stack_name_or_id}
return self._do_request('GetTemplate', params, '/', 'GET')
|
'Returns descriptions of all resources of the specified stack.
For deleted stacks, ListStackResources returns resource
information for up to 90 days after the stack has been
deleted.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifier associated
with the stack, which are not always... | def list_stack_resources(self, stack_name_or_id, next_token=None):
| params = {'StackName': stack_name_or_id}
if next_token:
params['NextToken'] = next_token
return self.get_list('ListStackResources', params, [('member', StackResourceSummary)])
|
'Returns the summary information for stacks whose status
matches the specified StackStatusFilter. Summary information
for stacks that have been deleted is kept for 90 days after
the stack is deleted. If no StackStatusFilter is specified,
summary information for all stacks is returned (including
existing stacks and stac... | def list_stacks(self, stack_status_filters=None, next_token=None):
| params = {}
if next_token:
params['NextToken'] = next_token
if (stack_status_filters and (len(stack_status_filters) > 0)):
self.build_list_params(params, stack_status_filters, 'StackStatusFilter.member')
return self.get_list('ListStacks', params, [('member', StackSummary)])
|
'Validates a specified template.
:type template_body: string
:param template_body: String containing the template body. (For more
information, go to `Template Anatomy`_ in the AWS CloudFormation
User Guide.)
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
passed, only `TemplateBody` is used.
:ty... | def validate_template(self, template_body=None, template_url=None):
| params = {}
if template_body:
params['TemplateBody'] = template_body
if template_url:
params['TemplateURL'] = template_url
if (template_body and template_url):
boto.log.warning('If both TemplateBody and TemplateURL are specified, only TemplateBody will ... |
'Cancels an update on the specified stack. If the call
completes successfully, the stack will roll back the update
and revert to the previous stack configuration.
Only stacks that are in the UPDATE_IN_PROGRESS state can be
canceled.
:type stack_name_or_id: string
:param stack_name_or_id: The name or the unique identifi... | def cancel_update_stack(self, stack_name_or_id=None):
| params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
return self.get_status('CancelUpdateStack', params)
|
'Returns the estimated monthly cost of a template. The return
value is an AWS Simple Monthly Calculator URL with a query
string that describes the resources required to run the
template.
:type template_body: string
:param template_body: Structure containing the template body. (For more
information, go to `Template Anat... | def estimate_template_cost(self, template_body=None, template_url=None, parameters=None):
| params = {'ContentType': 'JSON'}
if (template_body is not None):
params['TemplateBody'] = template_body
if (template_url is not None):
params['TemplateURL'] = template_url
if (parameters and (len(parameters) > 0)):
for (i, (key, value)) in enumerate(parameters):
param... |
'Returns the stack policy for a specified stack. If a stack
doesn\'t have a policy, a null value is returned.
:type stack_name_or_id: string
:param stack_name_or_id: The name or stack ID that is associated with
the stack whose policy you want to get.
:rtype: string
:return: The policy JSON document'
| def get_stack_policy(self, stack_name_or_id):
| params = {'ContentType': 'JSON', 'StackName': stack_name_or_id}
response = self._do_request('GetStackPolicy', params, '/', 'POST')
return response['GetStackPolicyResponse']['GetStackPolicyResult']['StackPolicyBody']
|
'Sets a stack policy for a specified stack.
:type stack_name_or_id: string
:param stack_name_or_id: The name or stack ID that you want to
associate a policy with.
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack... | def set_stack_policy(self, stack_name_or_id, stack_policy_body=None, stack_policy_url=None):
| params = {'ContentType': 'JSON', 'StackName': stack_name_or_id}
if (stack_policy_body is not None):
params['StackPolicyBody'] = stack_policy_body
if (stack_policy_url is not None):
params['StackPolicyURL'] = stack_policy_url
response = self._do_request('SetStackPolicy', params, '/', 'POS... |
'Allows a caller to initiate a transaction that atomically transfers
money from a sender\'s payment instrument to the recipient, while
decreasing corresponding debt balance.'
| @needs_caller_reference
@complex_amounts('SettlementAmount')
@requires(['CreditInstrumentId', 'SettlementAmount.Value', 'SenderTokenId', 'SettlementAmount.CurrencyCode'])
@api_action()
def settle_debt(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Gets the latest status of a transaction.'
| @requires(['TransactionId'])
@api_action()
def get_transaction_status(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Returns transactions for a given date range.'
| @requires(['StartDate'])
@api_action()
def get_account_activity(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Returns all details of a transaction.'
| @requires(['TransactionId'])
@api_action()
def get_transaction(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Returns the total outstanding balance for all the credit instruments
for the given creditor account.'
| @api_action()
def get_outstanding_debt_balance(self, action, response):
| return self.get_object(action, {}, response)
|
'Returns the balance available on the given prepaid instrument.'
| @requires(['PrepaidInstrumentId'])
@api_action()
def get_prepaid_balance(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Returns the total liability held by the given account corresponding to
all the prepaid instruments owned by the account.'
| @api_action()
def get_total_prepaid_liability(self, action, response):
| return self.get_object(action, {}, response)
|
'Returns the account balance for an account in real time.'
| @api_action()
def get_account_balance(self, action, response):
| return self.get_object(action, {}, response)
|
'Installs a payment instruction for caller.'
| @needs_caller_reference
@requires(['PaymentInstruction', 'TokenType'])
@api_action()
def install_payment_instruction(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Generate a signed URL for the Co-Branded service API given arguments as
payload.'
| @needs_caller_reference
@requires(['returnURL', 'pipelineName'])
def cbui_url(self, **kw):
| sandbox = ((('sandbox' in self.host) and 'payments-sandbox') or 'payments')
endpoint = 'authorize.{0}.amazon.com'.format(sandbox)
base = '/cobranded-ui/actions/start'
validpipelines = ('SingleUse', 'MultiUse', 'Recurring', 'Recipient', 'SetupPrepaid', 'SetupPostpaid', 'EditToken')
assert (kw['pipeli... |
'Reserve API is part of the Reserve and Settle API conjunction that
serve the purpose of a pay where the authorization and settlement have
a timing difference.'
| @needs_caller_reference
@complex_amounts('TransactionAmount')
@requires(['SenderTokenId', 'TransactionAmount.Value', 'TransactionAmount.CurrencyCode'])
@api_action()
def reserve(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Allows calling applications to move money from a sender to a recipient.'
| @needs_caller_reference
@complex_amounts('TransactionAmount')
@requires(['SenderTokenId', 'TransactionAmount.Value', 'TransactionAmount.CurrencyCode'])
@api_action()
def pay(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Cancels an ongoing transaction and puts it in cancelled state.'
| @requires(['TransactionId'])
@api_action()
def cancel(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'The Settle API is used in conjunction with the Reserve API and is used
to settle previously reserved transaction.'
| @complex_amounts('TransactionAmount')
@requires(['ReserveTransactionId', 'TransactionAmount.Value', 'TransactionAmount.CurrencyCode'])
@api_action()
def settle(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Refunds a previously completed transaction.'
| @complex_amounts('RefundAmount')
@requires(['TransactionId', 'RefundAmount.Value', 'CallerReference', 'RefundAmount.CurrencyCode'])
@api_action()
def refund(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Returns the recipient status.'
| @requires(['RecipientTokenId'])
@api_action()
def get_recipient_verification_status(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Returns the details of a particular token installed by this calling
application using the subway co-branded UI.'
| @requires(['CallerReference'], ['TokenId'])
@api_action()
def get_token_by_caller(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Verify the signature that FPS sent in IPN or callback urls.'
| @requires(['UrlEndPoint', 'HttpParameters'])
@api_action()
def verify_signature(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Returns a list of tokens installed on the given account.'
| @api_action()
def get_tokens(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Returns the usage of a token.'
| @requires(['TokenId'])
@api_action()
def get_token_usage(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Cancels any token installed by the calling application on its own
account.'
| @requires(['TokenId'])
@api_action()
def cancel_token(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Funds the prepaid balance on the given prepaid instrument.'
| @needs_caller_reference
@complex_amounts('FundingAmount')
@requires(['PrepaidInstrumentId', 'FundingAmount.Value', 'SenderTokenId', 'FundingAmount.CurrencyCode'])
@api_action()
def fund_prepaid(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Returns the balance corresponding to the given credit instrument.'
| @requires(['CreditInstrumentId'])
@api_action()
def get_debt_balance(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Allows a creditor to write off the debt balance accumulated partially
or fully at any time.'
| @needs_caller_reference
@complex_amounts('AdjustmentAmount')
@requires(['CreditInstrumentId', 'AdjustmentAmount.Value', 'AdjustmentAmount.CurrencyCode'])
@api_action()
def write_off_debt(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Returns the transactions for a given subscriptionID.'
| @requires(['SubscriptionId'])
@api_action()
def get_transactions_for_subscription(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Returns the details of Subscription for a given subscriptionID.'
| @requires(['SubscriptionId'])
@api_action()
def get_subscription_details(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Cancels a subscription.'
| @needs_caller_reference
@complex_amounts('RefundAmount')
@requires(['SubscriptionId'])
@api_action()
def cancel_subscription_and_refund(self, action, response, **kw):
| message = 'If you specify a RefundAmount, you must specify CallerReference.'
assert ((not ('RefundAmount.Value' in kw)) or ('CallerReference' in kw)), message
return self.get_object(action, kw, response)
|
'Gets the payment instruction of a token.'
| @requires(['TokenId'])
@api_action()
def get_payment_instruction(self, action, response, **kw):
| return self.get_object(action, kw, response)
|
'Connect to an SSH server and authenticate with it.
:type num_retries: int
:param num_retries: The maximum number of connection attempts.'
| def connect(self, num_retries=5):
| retry = 0
while (retry < num_retries):
try:
self._ssh_client.connect(self.server.hostname, username=self.uname, pkey=self._pkey, timeout=self._timeout)
return
except socket.error as xxx_todo_changeme:
(value, message) = xxx_todo_changeme.args
if (v... |
'Open an SFTP session on the SSH server.
:rtype: :class:`paramiko.sftp_client.SFTPClient`
:return: An SFTP client object.'
| def open_sftp(self):
| return self._ssh_client.open_sftp()
|
'Open an SFTP session on the remote host, and copy a file from
the remote host to the specified path on the local host.
:type src: string
:param src: The path to the target file on the remote host.
:type dst: string
:param dst: The path on your local host where you want to
store the file.'
| def get_file(self, src, dst):
| sftp_client = self.open_sftp()
sftp_client.get(src, dst)
|
'Open an SFTP session on the remote host, and copy a file from
the local host to the specified path on the remote host.
:type src: string
:param src: The path to the target file on your local host.
:type dst: string
:param dst: The path on the remote host where you want to store
the file.'
| def put_file(self, src, dst):
| sftp_client = self.open_sftp()
sftp_client.put(src, dst)
|
'Open an SFTP session to the remote host, and open a file on
that host.
:type filename: string
:param filename: The path to the file on the remote host.
:type mode: string
:param mode: The file interaction mode.
:type bufsize: integer
:param bufsize: The file buffer size.
:rtype: :class:`paramiko.sftp_file.SFTPFile`
:r... | def open(self, filename, mode='r', bufsize=(-1)):
| sftp_client = self.open_sftp()
return sftp_client.open(filename, mode, bufsize)
|
'List all of the files and subdirectories at the specified path
on the remote host.
:type path: string
:param path: The base path from which to obtain the list.
:rtype: list
:return: A list of files and subdirectories at the specified path.'
| def listdir(self, path):
| sftp_client = self.open_sftp()
return sftp_client.listdir(path)
|
'Check the specified path on the remote host to determine if
it is a directory.
:type path: string
:param path: The path to the directory that you want to check.
:rtype: integer
:return: If the path is a directory, the function returns 1.
If the path is a file or an invalid path, the function
returns 0.'
| def isdir(self, path):
| status = self.run(('[ -d %s ] || echo "FALSE"' % path))
if status[1].startswith('FALSE'):
return 0
return 1
|
'Check the remote host for the specified path, or a file
at the specified path. This function returns 1 if the
path or the file exist on the remote host, and returns 0 if
the path or the file does not exist on the remote host.
:type path: string
:param path: The path to the directory or file that you want to check.
:rt... | def exists(self, path):
| status = self.run(('[ -a %s ] || echo "FALSE"' % path))
if status[1].startswith('FALSE'):
return 0
return 1
|
'Start an interactive shell session with the remote host.'
| def shell(self):
| channel = self._ssh_client.invoke_shell()
interactive_shell(channel)
|
'Run a command on the remote host.
:type command: string
:param command: The command that you want to send to the remote host.
:rtype: tuple
:return: This function returns a tuple that contains an integer status,
the stdout from the command, and the stderr from the command.'
| def run(self, command):
| boto.log.debug(('running:%s on %s' % (command, self.server.instance_id)))
status = 0
try:
t = self._ssh_client.exec_command(command)
except paramiko.SSHException:
status = 1
std_out = t[1].read()
std_err = t[2].read()
t[0].close()
t[1].close()
t[2].close()
b... |
'Request a pseudo-terminal from a server, and execute a command on that
server.
:type command: string
:param command: The command that you want to run on the remote host.
:rtype: :class:`paramiko.channel.Channel`
:return: An open channel object.'
| def run_pty(self, command):
| boto.log.debug(('running:%s on %s' % (command, self.server.instance_id)))
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.exec_command(command)
return channel
|
'Close an SSH session and any open channels that are tied to it.'
| def close(self):
| transport = self._ssh_client.get_transport()
transport.close()
self.server.reset_cmdshell()
|
'Copy a file from one directory to another.'
| def get_file(self, src, dst):
| shutil.copyfile(src, dst)
|
'Copy a file from one directory to another.'
| def put_file(self, src, dst):
| shutil.copyfile(src, dst)
|
'List all of the files and subdirectories at the specified path.
:rtype: list
:return: Return a list containing the names of the entries
in the directory given by path.'
| def listdir(self, path):
| return os.listdir(path)
|
'Check the specified path to determine if it is a directory.
:rtype: boolean
:return: Returns True if the path is an existing directory.'
| def isdir(self, path):
| return os.path.isdir(path)
|
'Check for the specified path, or check a file at the specified path.
:rtype: boolean
:return: If the path or the file exist, the function returns True.'
| def exists(self, path):
| return os.path.exists(path)
|
'Open a subprocess and run a command on the local host.
:rtype: tuple
:return: This function returns a tuple that contains an integer status
and a string with the combined stdout and stderr output.'
| def run(self):
| boto.log.info(('running:%s' % self.command))
log_fp = StringIO()
process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while (process.poll() is None):
time.sleep(1)
t = process.communicate()
log_fp.write(t[0])
... |
'Create a new instance based on the specified configuration file or the specified
configuration and the passed in parameters.
If the config_file argument is not None, the configuration is read from there.
Otherwise, the cfg argument is used.
The config file may include other config files with a #import reference. The i... | @classmethod
def create(cls, config_file=None, logical_volume=None, cfg=None, **params):
| if config_file:
cfg = Config(path=config_file)
if cfg.has_section('EC2'):
for option in cfg.options('EC2'):
if (option not in params):
params[option] = cfg.get('EC2', option)
getter = CommandLineGetter()
getter.get(cls, params)
region = params.get('region'... |
'Returns a list of all completed snapshots for this volume ID.'
| def get_snapshots(self):
| ec2 = self.get_ec2_connection()
rs = ec2.get_all_snapshots()
all_vols = ([self.volume_id] + self.past_volume_ids)
snaps = []
for snapshot in rs:
if (snapshot.volume_id in all_vols):
if (snapshot.progress == '100%'):
snapshot.date = boto.utils.parse_ts(snapshot.sta... |
'Trim the number of snapshots for this volume. This method always
keeps the oldest snapshot. It then uses the parameters passed in
to determine how many others should be kept.
The algorithm is to keep all snapshots from the current day. Then
it will keep the first snapshot of the day for the previous seven days.
The... | def trim_snapshots(self, delete=False):
| snaps = self.get_snapshots()
if (len(snaps) <= 2):
return snaps
snaps = snaps[1:(-1)]
now = datetime.datetime.now(snaps[0].date.tzinfo)
midnight = datetime.datetime(year=now.year, month=now.month, day=now.day, tzinfo=now.tzinfo)
one_week = datetime.timedelta(days=7, seconds=(60 * 60))
... |
'Determine how long until the next scheduled time for a Task.
Returns the number of seconds until the next scheduled time or zero
if the task needs to be run immediately.
If it\'s an hourly task and it\'s never been run, run it now.
If it\'s a daily task and it\'s never been run and the hour is right, run it now.'
| def check(self):
| boto.log.info(('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed)))
if (self.hourly and (not self.last_executed)):
return 0
if (self.daily and (not self.last_executed)):
if (int(self.hour) == self.now.hour):
return 0
else:
re... |
'Adds or updates tags for the specified Amazon Kinesis stream.
Each stream can have up to 10 tags.
If tags have already been assigned to the stream,
`AddTagsToStream` overwrites any existing tags that correspond
to the specified tag keys.
:type stream_name: string
:param stream_name: The name of the stream.
:type tags:... | def add_tags_to_stream(self, stream_name, tags):
| params = {'StreamName': stream_name, 'Tags': tags}
return self.make_request(action='AddTagsToStream', body=json.dumps(params))
|
'Creates a Amazon Kinesis stream. A stream captures and
transports data records that are continuously emitted from
different data sources or producers . Scale-out within an
Amazon Kinesis stream is explicitly supported by means of
shards, which are uniquely identified groups of data records
in an Amazon Kinesis stream.... | def create_stream(self, stream_name, shard_count):
| params = {'StreamName': stream_name, 'ShardCount': shard_count}
return self.make_request(action='CreateStream', body=json.dumps(params))
|
'Deletes a stream and all its shards and data. You must shut
down any applications that are operating on the stream before
you delete the stream. If an application attempts to operate
on a deleted stream, it will receive the exception
`ResourceNotFoundException`.
If the stream is in the `ACTIVE` state, you can delete i... | def delete_stream(self, stream_name):
| params = {'StreamName': stream_name}
return self.make_request(action='DeleteStream', body=json.dumps(params))
|
'Describes the specified stream.
The information about the stream includes its current status,
its Amazon Resource Name (ARN), and an array of shard objects.
For each shard object, there is information about the hash key
and sequence number ranges that the shard spans, and the IDs
of any earlier shards that played in a... | def describe_stream(self, stream_name, limit=None, exclusive_start_shard_id=None):
| params = {'StreamName': stream_name}
if (limit is not None):
params['Limit'] = limit
if (exclusive_start_shard_id is not None):
params['ExclusiveStartShardId'] = exclusive_start_shard_id
return self.make_request(action='DescribeStream', body=json.dumps(params))
|
'Gets data records from a shard.
Specify a shard iterator using the `ShardIterator` parameter.
The shard iterator specifies the position in the shard from
which you want to start reading data records sequentially. If
there are no records available in the portion of the shard
that the iterator points to, `GetRecords` re... | def get_records(self, shard_iterator, limit=None, b64_decode=True):
| params = {'ShardIterator': shard_iterator}
if (limit is not None):
params['Limit'] = limit
response = self.make_request(action='GetRecords', body=json.dumps(params))
if b64_decode:
for record in response.get('Records', []):
record['Data'] = base64.b64decode(record['Data'].enc... |
'Gets a shard iterator. A shard iterator expires five minutes
after it is returned to the requester.
A shard iterator specifies the position in the shard from
which to start reading data records sequentially. A shard
iterator specifies this position using the sequence number of
a data record in a shard. A sequence numb... | def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type, starting_sequence_number=None):
| params = {'StreamName': stream_name, 'ShardId': shard_id, 'ShardIteratorType': shard_iterator_type}
if (starting_sequence_number is not None):
params['StartingSequenceNumber'] = starting_sequence_number
return self.make_request(action='GetShardIterator', body=json.dumps(params))
|
'Lists your streams.
The number of streams may be too large to return from a single
call to `ListStreams`. You can limit the number of returned
streams using the `Limit` parameter. If you do not specify a
value for the `Limit` parameter, Amazon Kinesis uses the
default limit, which is currently 10.
You can detect if th... | def list_streams(self, limit=None, exclusive_start_stream_name=None):
| params = {}
if (limit is not None):
params['Limit'] = limit
if (exclusive_start_stream_name is not None):
params['ExclusiveStartStreamName'] = exclusive_start_stream_name
return self.make_request(action='ListStreams', body=json.dumps(params))
|
'Lists the tags for the specified Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream.
:type exclusive_start_tag_key: string
:param exclusive_start_tag_key: The key to use as the starting point
for the list of tags. If this parameter is set, `ListTagsForStream`
gets all tags that... | def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None, limit=None):
| params = {'StreamName': stream_name}
if (exclusive_start_tag_key is not None):
params['ExclusiveStartTagKey'] = exclusive_start_tag_key
if (limit is not None):
params['Limit'] = limit
return self.make_request(action='ListTagsForStream', body=json.dumps(params))
|
'Merges two adjacent shards in a stream and combines them into
a single shard to reduce the stream\'s capacity to ingest and
transport data. Two shards are considered adjacent if the
union of the hash key ranges for the two shards form a
contiguous set with no gaps. For example, if you have two
shards, one with a hash ... | def merge_shards(self, stream_name, shard_to_merge, adjacent_shard_to_merge):
| params = {'StreamName': stream_name, 'ShardToMerge': shard_to_merge, 'AdjacentShardToMerge': adjacent_shard_to_merge}
return self.make_request(action='MergeShards', body=json.dumps(params))
|
'This operation puts a data record into an Amazon Kinesis
stream from a producer. This operation must be called to send
data from the producer into the Amazon Kinesis stream for
real-time ingestion and subsequent processing. The `PutRecord`
operation requires the name of the stream that captures,
stores, and transports... | def put_record(self, stream_name, data, partition_key, explicit_hash_key=None, sequence_number_for_ordering=None, exclusive_minimum_sequence_number=None, b64_encode=True):
| params = {'StreamName': stream_name, 'Data': data, 'PartitionKey': partition_key}
if (explicit_hash_key is not None):
params['ExplicitHashKey'] = explicit_hash_key
if (sequence_number_for_ordering is not None):
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
if b64_enc... |
'Puts (writes) multiple data records from a producer into an
Amazon Kinesis stream in a single call (also referred to as a
`PutRecords` request). Use this operation to send data from a
data producer into the Amazon Kinesis stream for real-time
ingestion and processing. Each shard can support up to 1000
records written ... | def put_records(self, records, stream_name, b64_encode=True):
| params = {'Records': records, 'StreamName': stream_name}
if b64_encode:
for i in range(len(params['Records'])):
data = params['Records'][i]['Data']
if (not isinstance(data, six.binary_type)):
data = data.encode('utf-8')
params['Records'][i]['Data'] = b... |
'Deletes tags from the specified Amazon Kinesis stream.
If you specify a tag that does not exist, it is ignored.
:type stream_name: string
:param stream_name: The name of the stream.
:type tag_keys: list
:param tag_keys: A list of tag keys. Each corresponding tag is removed
from the stream.'
| def remove_tags_from_stream(self, stream_name, tag_keys):
| params = {'StreamName': stream_name, 'TagKeys': tag_keys}
return self.make_request(action='RemoveTagsFromStream', body=json.dumps(params))
|
'Splits a shard into two new shards in the stream, to increase
the stream\'s capacity to ingest and transport data.
`SplitShard` is called when there is a need to increase the
overall capacity of stream because of an expected increase in
the volume of data records being ingested.
You can also use `SplitShard` when a sh... | def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
| params = {'StreamName': stream_name, 'ShardToSplit': shard_to_split, 'NewStartingHashKey': new_starting_hash_key}
return self.make_request(action='SplitShard', body=json.dumps(params))
|
'Responsible for walking through Params defined for the request and:
* Matching them with keyword parameters passed to the request
constructor or via the command line.
* Checking to see if all required parameters have been specified
and raising an exception, if not.
* Encoding each value into the set of request paramet... | def process_args(self, **args):
| self.args.update(args)
self.connection_args = copy.copy(self.args)
if (('debug' in self.args) and (self.args['debug'] >= 2)):
boto.set_stream_logger(self.name())
required = [p.name for p in (self.Params + self.Args) if (not p.optional)]
for param in (self.Params + self.Args):
if para... |
'This method is responsible for formatting the output for the
command line interface. The default behavior is to call the
generic CLI formatter which attempts to print something
reasonable. If you want specific formatting, you should
override this method and do your own thing.
:type data: dict
:param data: The data r... | def cli_formatter(self, data):
| if data:
self._generic_cli_formatter(self.Response, data)
|
'For command line arguments, just the presence
of the option means True so just return True'
| @classmethod
def convert_boolean(cls, param, value):
| return True
|
'Convert a string value as received in the command line
tools and convert to the appropriate type of value.
Raise a ValidationError if the value can\'t be converted.
:type value: str
:param value: The value to convert. This should always
be a string.'
| def convert(self, value):
| return super(Param, self).convert(self, value)
|
'Checks for the existence of an AWS credential file.
If the environment variable AWS_CREDENTIAL_FILE is
set and points to a file, that file will be read and
will be searched credentials.
Note that if credentials have been explicitelypassed
into the class constructor, those values always take
precedence.'
| def check_for_credential_file(self):
| if ('AWS_CREDENTIAL_FILE' in os.environ):
path = os.environ['AWS_CREDENTIAL_FILE']
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if os.path.isfile(path):
fp = open(path)
lines = fp.readlines()
fp.close()
for line in li... |
'First checks to see if a url argument was explicitly passed
in. If so, that will be used. If not, it checks for the
existence of the environment variable specified in ENV_URL.
If this is set, it should contain a fully qualified URL to the
service you want to use.
Note that any values passed explicitly to the class c... | def check_for_env_url(self):
| url = self.args.get('url', None)
if url:
del self.args['url']
if ((not url) and (self.EnvURL in os.environ)):
url = os.environ[self.EnvURL]
if url:
rslt = urlparse.urlparse(url)
if ('is_secure' not in self.args):
if (rslt.scheme == 'https'):
se... |
'Delete\'s this vault. WARNING!'
| def delete(self):
| self.layer1.delete_vault(self.name)
|
'Adds an archive to a vault. For archives greater than 100MB the
multipart upload will be used.
:type file: str
:param file: A filename to upload
:type description: str
:param description: An optional description for the archive.
:rtype: str
:return: The archive id of the newly created archive'
| def upload_archive(self, filename, description=None):
| if (os.path.getsize(filename) > self.SingleOperationThreshold):
return self.create_archive_from_file(filename, description=description)
return self._upload_archive_single_operation(filename, description)
|
'Adds an archive to a vault in a single operation. It\'s recommended for
archives less than 100MB
:type file: str
:param file: A filename to upload
:type description: str
:param description: A description for the archive.
:rtype: str
:return: The archive id of the newly created archive'
| def _upload_archive_single_operation(self, filename, description):
| with open(filename, 'rb') as fileobj:
(linear_hash, tree_hash) = compute_hashes_from_fileobj(fileobj)
fileobj.seek(0)
response = self.layer1.upload_archive(self.name, fileobj, linear_hash, tree_hash, description)
return response['ArchiveId']
|
'Create a new archive and begin a multi-part upload to it.
Returns a file-like object to which the data for the archive
can be written. Once all the data is written the file-like
object should be closed, you can then call the get_archive_id
method on it to get the ID of the created archive.
:type part_size: int
:param ... | def create_archive_writer(self, part_size=DefaultPartSize, description=None):
| response = self.layer1.initiate_multipart_upload(self.name, part_size, description)
return Writer(self, response['UploadId'], part_size=part_size)
|
'Create a new archive and upload the data from the given file
or file-like object.
:type filename: str
:param filename: A filename to upload
:type file_obj: file
:param file_obj: A file-like object to upload
:type description: str
:param description: An optional description for the archive.
:type upload_id_callback: fu... | def create_archive_from_file(self, filename=None, file_obj=None, description=None, upload_id_callback=None):
| part_size = self.DefaultPartSize
if (not file_obj):
file_size = os.path.getsize(filename)
try:
part_size = minimum_part_size(file_size, part_size)
except ValueError:
raise UploadArchiveError('File size of %s bytes exceeds 40,000 GB archive ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.