_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q22500
|
_check_gle_response
|
train
|
def _check_gle_response(result):
"""Return getlasterror response as a dict, or raise OperationFailure."""
# Did getlasterror itself fail?
_check_command_response(result)
if result.get("wtimeout", False):
# MongoDB versions before 1.8.0 return the error message in an "errmsg"
# field. If "errmsg" exists "err" will also exist set to None, so we
# have to check for "errmsg" first.
raise WTimeoutError(result.get("errmsg", result.get("err")),
result.get("code"),
result)
error_msg = result.get("err", "")
if error_msg is None:
return result
if error_msg.startswith("not master"):
raise NotMasterError(error_msg, result)
details = result
# mongos returns the error code in an error object for some errors.
if "errObjects" in result:
for errobj in result["errObjects"]:
if errobj.get("err") == error_msg:
details = errobj
break
code = details.get("code")
if code in (11000, 11001, 12582):
raise DuplicateKeyError(details["err"], code, result)
raise OperationFailure(details["err"], code, result)
|
python
|
{
"resource": ""
}
|
q22501
|
_raise_last_error
|
train
|
def _raise_last_error(bulk_write_result):
"""Backward compatibility helper for insert error handling.
"""
# Prefer write errors over write concern errors
write_errors = bulk_write_result.get("writeErrors")
if write_errors:
_raise_last_write_error(write_errors)
_raise_write_concern_error(bulk_write_result["writeConcernErrors"][-1])
|
python
|
{
"resource": ""
}
|
q22502
|
Topology.open
|
train
|
def open(self):
"""Start monitoring, or restart after a fork.
No effect if called multiple times.
.. warning:: Topology is shared among multiple threads and is protected
by mutual exclusion. Using Topology from a process other than the one
that initialized it will emit a warning and may result in deadlock. To
prevent this from happening, MongoClient must be created after any
forking.
"""
if self._pid is None:
self._pid = os.getpid()
else:
if os.getpid() != self._pid:
warnings.warn(
"MongoClient opened before fork. Create MongoClient only "
"after forking. See PyMongo's documentation for details: "
"http://api.mongodb.org/python/current/faq.html#"
"is-pymongo-fork-safe")
with self._lock:
self._ensure_opened()
|
python
|
{
"resource": ""
}
|
q22503
|
Topology._process_change
|
train
|
def _process_change(self, server_description):
"""Process a new ServerDescription on an opened topology.
Hold the lock when calling this.
"""
td_old = self._description
if self._publish_server:
old_server_description = td_old._server_descriptions[
server_description.address]
self._events.put((
self._listeners.publish_server_description_changed,
(old_server_description, server_description,
server_description.address, self._topology_id)))
self._description = updated_topology_description(
self._description, server_description)
self._update_servers()
self._receive_cluster_time_no_lock(server_description.cluster_time)
if self._publish_tp:
self._events.put((
self._listeners.publish_topology_description_changed,
(td_old, self._description, self._topology_id)))
# Wake waiters in select_servers().
self._condition.notify_all()
|
python
|
{
"resource": ""
}
|
q22504
|
Topology.get_server_session
|
train
|
def get_server_session(self):
"""Start or resume a server session, or raise ConfigurationError."""
with self._lock:
session_timeout = self._description.logical_session_timeout_minutes
if session_timeout is None:
# Maybe we need an initial scan? Can raise ServerSelectionError.
if self._description.topology_type == TOPOLOGY_TYPE.Single:
if not self._description.has_known_servers:
self._select_servers_loop(
any_server_selector,
self._settings.server_selection_timeout,
None)
elif not self._description.readable_servers:
self._select_servers_loop(
readable_server_selector,
self._settings.server_selection_timeout,
None)
session_timeout = self._description.logical_session_timeout_minutes
if session_timeout is None:
raise ConfigurationError(
"Sessions are not supported by this MongoDB deployment")
return self._session_pool.get_server_session(session_timeout)
|
python
|
{
"resource": ""
}
|
q22505
|
Topology._ensure_opened
|
train
|
def _ensure_opened(self):
"""Start monitors, or restart after a fork.
Hold the lock when calling this.
"""
if not self._opened:
self._opened = True
self._update_servers()
# Start or restart the events publishing thread.
if self._publish_tp or self._publish_server:
self.__events_executor.open()
# Ensure that the monitors are open.
for server in itervalues(self._servers):
server.open()
|
python
|
{
"resource": ""
}
|
q22506
|
Monitor._check_with_retry
|
train
|
def _check_with_retry(self):
"""Call ismaster once or twice. Reset server's pool on error.
Returns a ServerDescription.
"""
# According to the spec, if an ismaster call fails we reset the
# server's pool. If a server was once connected, change its type
# to Unknown only after retrying once.
address = self._server_description.address
retry = True
if self._server_description.server_type == SERVER_TYPE.Unknown:
retry = False
start = _time()
try:
return self._check_once()
except ReferenceError:
raise
except Exception as error:
error_time = _time() - start
if self._publish:
self._listeners.publish_server_heartbeat_failed(
address, error_time, error)
self._topology.reset_pool(address)
default = ServerDescription(address, error=error)
if not retry:
self._avg_round_trip_time.reset()
# Server type defaults to Unknown.
return default
# Try a second and final time. If it fails return original error.
# Always send metadata: this is a new connection.
start = _time()
try:
return self._check_once()
except ReferenceError:
raise
except Exception as error:
error_time = _time() - start
if self._publish:
self._listeners.publish_server_heartbeat_failed(
address, error_time, error)
self._avg_round_trip_time.reset()
return default
|
python
|
{
"resource": ""
}
|
q22507
|
Monitor._check_once
|
train
|
def _check_once(self):
"""A single attempt to call ismaster.
Returns a ServerDescription, or raises an exception.
"""
address = self._server_description.address
if self._publish:
self._listeners.publish_server_heartbeat_started(address)
with self._pool.get_socket({}) as sock_info:
response, round_trip_time = self._check_with_socket(sock_info)
self._avg_round_trip_time.add_sample(round_trip_time)
sd = ServerDescription(
address=address,
ismaster=response,
round_trip_time=self._avg_round_trip_time.get())
if self._publish:
self._listeners.publish_server_heartbeat_succeeded(
address, round_trip_time, response)
return sd
|
python
|
{
"resource": ""
}
|
q22508
|
GridOut.readline
|
train
|
def readline(self, size=-1):
"""Read one line or up to `size` bytes from the file.
:Parameters:
- `size` (optional): the maximum number of bytes to read
"""
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
if size == 0:
return EMPTY
received = 0
data = StringIO()
while received < size:
chunk_data = self.readchunk()
pos = chunk_data.find(NEWLN, 0, size)
if pos != -1:
size = received + pos + 1
received += len(chunk_data)
data.write(chunk_data)
if pos != -1:
break
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
|
python
|
{
"resource": ""
}
|
q22509
|
_GridOutChunkIterator._next_with_retry
|
train
|
def _next_with_retry(self):
"""Return the next chunk and retry once on CursorNotFound.
We retry on CursorNotFound to maintain backwards compatibility in
cases where two calls to read occur more than 10 minutes apart (the
server's default cursor timeout).
"""
if self._cursor is None:
self._create_cursor()
try:
return self._cursor.next()
except CursorNotFound:
self._cursor.close()
self._create_cursor()
return self._cursor.next()
|
python
|
{
"resource": ""
}
|
q22510
|
Collection.__create
|
train
|
def __create(self, options, collation, session):
"""Sends a create command with the given options.
"""
cmd = SON([("create", self.__name)])
if options:
if "size" in options:
options["size"] = float(options["size"])
cmd.update(options)
with self._socket_for_writes(session) as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
write_concern=self._write_concern_for(session),
collation=collation, session=session)
|
python
|
{
"resource": ""
}
|
q22511
|
Collection._legacy_write
|
train
|
def _legacy_write(self, sock_info, name, cmd, op_id,
bypass_doc_val, func, *args):
"""Internal legacy unacknowledged write helper."""
# Cannot have both unacknowledged write and bypass document validation.
if bypass_doc_val and sock_info.max_wire_version >= 4:
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
listeners = self.database.client._event_listeners
publish = listeners.enabled_for_commands
if publish:
start = datetime.datetime.now()
args = args + (sock_info.compression_context,)
rqst_id, msg, max_size = func(*args)
if publish:
duration = datetime.datetime.now() - start
listeners.publish_command_start(
cmd, self.__database.name, rqst_id, sock_info.address, op_id)
start = datetime.datetime.now()
try:
result = sock_info.legacy_write(rqst_id, msg, max_size, False)
except Exception as exc:
if publish:
dur = (datetime.datetime.now() - start) + duration
if isinstance(exc, OperationFailure):
details = exc.details
# Succeed if GLE was successful and this is a write error.
if details.get("ok") and "n" in details:
reply = message._convert_write_result(
name, cmd, details)
listeners.publish_command_success(
dur, reply, name, rqst_id, sock_info.address, op_id)
raise
else:
details = message._convert_exception(exc)
listeners.publish_command_failure(
dur, details, name, rqst_id, sock_info.address, op_id)
raise
if publish:
if result is not None:
reply = message._convert_write_result(name, cmd, result)
else:
# Comply with APM spec.
reply = {'ok': 1}
duration = (datetime.datetime.now() - start) + duration
listeners.publish_command_success(
duration, reply, name, rqst_id, sock_info.address, op_id)
return result
|
python
|
{
"resource": ""
}
|
q22512
|
Collection._insert_one
|
train
|
def _insert_one(
self, doc, ordered,
check_keys, manipulate, write_concern, op_id, bypass_doc_val,
session):
"""Internal helper for inserting a single document."""
if manipulate:
doc = self.__database._apply_incoming_manipulators(doc, self)
if not isinstance(doc, RawBSONDocument) and '_id' not in doc:
doc['_id'] = ObjectId()
doc = self.__database._apply_incoming_copying_manipulators(doc,
self)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
command = SON([('insert', self.name),
('ordered', ordered),
('documents', [doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
def _insert_command(session, sock_info, retryable_write):
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_INSERT.
return self._legacy_write(
sock_info, 'insert', command, op_id,
bypass_doc_val, message.insert, self.__full_name,
[doc], check_keys, False, write_concern.document, False,
self.__write_response_codec_options)
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
check_keys=check_keys,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
self.__database.client._retryable_write(
acknowledged, _insert_command, session)
if not isinstance(doc, RawBSONDocument):
return doc.get('_id')
|
python
|
{
"resource": ""
}
|
q22513
|
Collection._aggregate_one_result
|
train
|
def _aggregate_one_result(
self, sock_info, slave_ok, cmd, collation=None, session=None):
"""Internal helper to run an aggregate that returns a single result."""
result = self._command(
sock_info,
cmd,
slave_ok,
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
batch = result['cursor']['firstBatch']
return batch[0] if batch else None
|
python
|
{
"resource": ""
}
|
q22514
|
Collection.estimated_document_count
|
train
|
def estimated_document_count(self, **kwargs):
"""Get an estimate of the number of documents in this collection using
collection metadata.
The :meth:`estimated_document_count` method is **not** supported in a
transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
:Parameters:
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
"""
if 'session' in kwargs:
raise ConfigurationError(
'estimated_document_count does not support sessions')
cmd = SON([('count', self.__name)])
cmd.update(kwargs)
return self._count(cmd)
|
python
|
{
"resource": ""
}
|
q22515
|
Collection.count_documents
|
train
|
def count_documents(self, filter, session=None, **kwargs):
"""Count the number of documents in this collection.
The :meth:`count_documents` method is supported in a transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count. Must be
a positive integer. If not provided, no limit is imposed.
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
This option is only supported on MongoDB 3.6 and above.
The :meth:`count_documents` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (required): A query document that selects which documents
to count in the collection. Can be an empty document to count all
documents.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
pipeline = [{'$match': filter}]
if 'skip' in kwargs:
pipeline.append({'$skip': kwargs.pop('skip')})
if 'limit' in kwargs:
pipeline.append({'$limit': kwargs.pop('limit')})
pipeline.append({'$group': {'_id': 1, 'n': {'$sum': 1}}})
cmd = SON([('aggregate', self.__name),
('pipeline', pipeline),
('cursor', {})])
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
def _cmd(session, server, sock_info, slave_ok):
result = self._aggregate_one_result(
sock_info, slave_ok, cmd, collation, session)
if not result:
return 0
return result['n']
return self.__database.client._retryable_read(
_cmd, self._read_preference_for(session), session)
|
python
|
{
"resource": ""
}
|
q22516
|
Collection.index_information
|
train
|
def index_information(self, session=None):
"""Get information on this collection's indexes.
Returns a dictionary where the keys are index names (as
returned by create_index()) and the values are dictionaries
containing information about each index. The dictionary is
guaranteed to contain at least a single key, ``"key"`` which
is a list of (key, direction) pairs specifying the index (as
passed to create_index()). It will also contain any other
metadata about the indexes, except for the ``"ns"`` and
``"name"`` keys, which are cleaned. Example output might look
like this:
>>> db.test.create_index("x", unique=True)
u'x_1'
>>> db.test.index_information()
{u'_id_': {u'key': [(u'_id', 1)]},
u'x_1': {u'unique': True, u'key': [(u'x', 1)]}}
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
cursor = self.list_indexes(session=session)
info = {}
for index in cursor:
index["key"] = index["key"].items()
index = dict(index)
info[index.pop("name")] = index
return info
|
python
|
{
"resource": ""
}
|
q22517
|
Collection._map_reduce
|
train
|
def _map_reduce(self, map, reduce, out, session, read_pref, **kwargs):
"""Internal mapReduce helper."""
cmd = SON([("mapReduce", self.__name),
("map", map),
("reduce", reduce),
("out", out)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
inline = 'inline' in out
if inline:
user_fields = {'results': 1}
else:
user_fields = None
read_pref = ((session and session._txn_read_preference())
or read_pref)
with self.__database.client._socket_for_reads(read_pref, session) as (
sock_info, slave_ok):
if (sock_info.max_wire_version >= 4 and
('readConcern' not in cmd) and
inline):
read_concern = self.read_concern
else:
read_concern = None
if 'writeConcern' not in cmd and not inline:
write_concern = self._write_concern_for(session)
else:
write_concern = None
return self._command(
sock_info, cmd, slave_ok, read_pref,
read_concern=read_concern,
write_concern=write_concern,
collation=collation, session=session,
user_fields=user_fields)
|
python
|
{
"resource": ""
}
|
q22518
|
Collection.find_one_and_delete
|
train
|
def find_one_and_delete(self, filter,
projection=None, sort=None, session=None, **kwargs):
"""Finds a single document and deletes it, returning the document.
>>> db.test.count_documents({'x': 1})
2
>>> db.test.find_one_and_delete({'x': 1})
{u'x': 1, u'_id': ObjectId('54f4e12bfba5220aa4d6dee8')}
>>> db.test.count_documents({'x': 1})
1
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'x': 1}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_delete(
... {'x': 1}, sort=[('_id', pymongo.DESCENDING)])
{u'x': 1, u'_id': 2}
The *projection* option can be used to limit the fields returned.
>>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False})
{u'x': 1}
:Parameters:
- `filter`: A query that matches the document to delete.
- `projection` (optional): a list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is deleted.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
kwargs['remove'] = True
return self.__find_and_modify(filter, projection, sort,
session=session, **kwargs)
|
python
|
{
"resource": ""
}
|
q22519
|
ChangeStream._full_pipeline
|
train
|
def _full_pipeline(self):
"""Return the full aggregation pipeline for this ChangeStream."""
options = self._pipeline_options()
full_pipeline = [{'$changeStream': options}]
full_pipeline.extend(self._pipeline)
return full_pipeline
|
python
|
{
"resource": ""
}
|
q22520
|
ChangeStream._run_aggregation_cmd
|
train
|
def _run_aggregation_cmd(self, session, explicit_session):
"""Run the full aggregation pipeline for this ChangeStream and return
the corresponding CommandCursor.
"""
read_preference = self._target._read_preference_for(session)
client = self._database.client
def _cmd(session, server, sock_info, slave_ok):
pipeline = self._full_pipeline()
cmd = SON([("aggregate", self._aggregation_target),
("pipeline", pipeline),
("cursor", {})])
result = sock_info.command(
self._database.name,
cmd,
slave_ok,
read_preference,
self._target.codec_options,
parse_write_concern_error=True,
read_concern=self._target.read_concern,
collation=self._collation,
session=session,
client=self._database.client)
cursor = result["cursor"]
if (self._start_at_operation_time is None and
self._resume_token is None and
cursor.get("_id") is None and
sock_info.max_wire_version >= 7):
self._start_at_operation_time = result["operationTime"]
ns = cursor["ns"]
_, collname = ns.split(".", 1)
aggregation_collection = self._database.get_collection(
collname, codec_options=self._target.codec_options,
read_preference=read_preference,
write_concern=self._target.write_concern,
read_concern=self._target.read_concern
)
return CommandCursor(
aggregation_collection, cursor, sock_info.address,
batch_size=self._batch_size or 0,
max_await_time_ms=self._max_await_time_ms,
session=session, explicit_session=explicit_session)
return client._retryable_read(_cmd, read_preference, session)
|
python
|
{
"resource": ""
}
|
q22521
|
ChangeStream._resume
|
train
|
def _resume(self):
"""Reestablish this change stream after a resumable error."""
try:
self._cursor.close()
except PyMongoError:
pass
self._cursor = self._create_cursor()
|
python
|
{
"resource": ""
}
|
q22522
|
ChangeStream.try_next
|
train
|
def try_next(self):
"""Advance the cursor without blocking indefinitely.
This method returns the next change document without waiting
indefinitely for the next change. For example::
with db.collection.watch() as stream:
while stream.alive:
change = stream.try_next()
if change is not None:
print(change)
elif stream.alive:
# We end up here when there are no recent changes.
# Sleep for a while to avoid flooding the server with
# getMore requests when no changes are available.
time.sleep(10)
If no change document is cached locally then this method runs a single
getMore command. If the getMore yields any documents, the next
document is returned, otherwise, if the getMore returns no documents
(because there have been no changes) then ``None`` is returned.
:Returns:
The next change document or ``None`` when no document is available
after running a single getMore or when the cursor is closed.
.. versionadded:: 3.8
"""
# Attempt to get the next change with at most one getMore and at most
# one resume attempt.
try:
change = self._cursor._try_next(True)
except ConnectionFailure:
self._resume()
change = self._cursor._try_next(False)
except OperationFailure as exc:
if exc.code in _NON_RESUMABLE_GETMORE_ERRORS:
raise
self._resume()
change = self._cursor._try_next(False)
# No changes are available.
if change is None:
return None
try:
resume_token = change['_id']
except KeyError:
self.close()
raise InvalidOperation(
"Cannot provide resume functionality when the resume "
"token is missing.")
self._resume_token = copy.copy(resume_token)
self._start_at_operation_time = None
if self._decode_custom:
return _bson_to_dict(change.raw, self._orig_codec_options)
return change
|
python
|
{
"resource": ""
}
|
q22523
|
validate_non_negative_int_or_basestring
|
train
|
def validate_non_negative_int_or_basestring(option, value):
"""Validates that 'value' is an integer or string.
"""
if isinstance(value, integer_types):
return value
elif isinstance(value, string_type):
try:
val = int(value)
except ValueError:
return value
return validate_non_negative_integer(option, val)
raise TypeError("Wrong type for %s, value must be an "
"non negative integer or a string" % (option,))
|
python
|
{
"resource": ""
}
|
q22524
|
validate_type_registry
|
train
|
def validate_type_registry(option, value):
"""Validate the type_registry option."""
if value is not None and not isinstance(value, TypeRegistry):
raise TypeError("%s must be an instance of %s" % (
option, TypeRegistry))
return value
|
python
|
{
"resource": ""
}
|
q22525
|
validate_driver_or_none
|
train
|
def validate_driver_or_none(option, value):
"""Validate the driver keyword arg."""
if value is None:
return value
if not isinstance(value, DriverInfo):
raise TypeError("%s must be an instance of DriverInfo" % (option,))
return value
|
python
|
{
"resource": ""
}
|
q22526
|
validate_is_callable_or_none
|
train
|
def validate_is_callable_or_none(option, value):
"""Validates that 'value' is a callable."""
if value is None:
return value
if not callable(value):
raise ValueError("%s must be a callable" % (option,))
return value
|
python
|
{
"resource": ""
}
|
q22527
|
BaseObject._read_preference_for
|
train
|
def _read_preference_for(self, session):
"""Read only access to the read preference of this instance or session.
"""
# Override this operation's read preference with the transaction's.
if session:
return session._txn_read_preference() or self.__read_preference
return self.__read_preference
|
python
|
{
"resource": ""
}
|
q22528
|
get_data_files
|
train
|
def get_data_files(top):
"""Get data files"""
data_files = []
ntrim = len(here + os.path.sep)
for (d, _, filenames) in os.walk(top):
data_files.append((
d[ntrim:],
[os.path.join(d, f) for f in filenames]
))
return data_files
|
python
|
{
"resource": ""
}
|
q22529
|
run
|
train
|
def run(cmd, *args, **kwargs):
"""Echo a command before running it. Defaults to repo as cwd"""
log.info('> ' + list2cmdline(cmd))
kwargs.setdefault('cwd', here)
kwargs.setdefault('shell', sys.platform == 'win32')
if not isinstance(cmd, list):
cmd = cmd.split()
return check_call(cmd, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q22530
|
combine_commands
|
train
|
def combine_commands(*commands):
"""Return a Command that combines several commands."""
class CombinedCommand(Command):
def initialize_options(self):
self.commands = []
for C in commands:
self.commands.append(C(self.distribution))
for c in self.commands:
c.initialize_options()
def finalize_options(self):
for c in self.commands:
c.finalize_options()
def run(self):
for c in self.commands:
c.run()
return CombinedCommand
|
python
|
{
"resource": ""
}
|
q22531
|
run_gradle
|
train
|
def run_gradle(path=kernel_path, cmd='build', skip_tests=False):
"""Return a Command for running gradle scripts.
Parameters
----------
path: str, optional
The base path of the node package. Defaults to the repo root.
cmd: str, optional
The command to run with gradlew.
"""
class Gradle(BaseCommand):
description = 'Run gradle script'
def skip_test_option(self, skip):
if skip:
return '-Dskip.tests=True'
else:
return '-Dskip.tests=False'
def run(self):
run([('' if sys.platform == 'win32' else './') + 'gradlew', '--no-daemon', cmd,
self.skip_test_option(skip_tests)], cwd=path)
return Gradle
|
python
|
{
"resource": ""
}
|
q22532
|
Init.read_init_status
|
train
|
def read_init_status(self):
"""Read the initialization status of Vault.
Supported methods:
GET: /sys/init. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/sys/init'
response = self._adapter.get(
url=api_path,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22533
|
Init.initialize
|
train
|
def initialize(self, secret_shares=5, secret_threshold=3, pgp_keys=None, root_token_pgp_key=None,
stored_shares=None, recovery_shares=None, recovery_threshold=None, recovery_pgp_keys=None):
"""Initialize a new Vault.
The Vault must not have been previously initialized. The recovery options, as well as the stored shares option,
are only available when using Vault HSM.
Supported methods:
PUT: /sys/init. Produces: 200 application/json
:param secret_shares: The number of shares to split the master key into.
:type secret_shares: int
:param secret_threshold: Specifies the number of shares required to reconstruct the master key. This must be
less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as
secret_shares.
:type secret_threshold: int
:param pgp_keys: List of PGP public keys used to encrypt the output unseal keys.
Ordering is preserved. The keys must be base64-encoded from their original binary representation.
The size of this array must be the same as secret_shares.
:type pgp_keys: list
:param root_token_pgp_key: Specifies a PGP public key used to encrypt the initial root token. The
key must be base64-encoded from its original binary representation.
:type root_token_pgp_key: str | unicode
:param stored_shares: <enterprise only> Specifies the number of shares that should be encrypted by the HSM and
stored for auto-unsealing. Currently must be the same as secret_shares.
:type stored_shares: int
:param recovery_shares: <enterprise only> Specifies the number of shares to split the recovery key into.
:type recovery_shares: int
:param recovery_threshold: <enterprise only> Specifies the number of shares required to reconstruct the recovery
key. This must be less than or equal to recovery_shares.
:type recovery_threshold: int
:param recovery_pgp_keys: <enterprise only> Specifies an array of PGP public keys used to encrypt the output
recovery keys. Ordering is preserved. The keys must be base64-encoded from their original binary
representation. The size of this array must be the same as recovery_shares.
:type recovery_pgp_keys: list
:return: The JSON response of the request.
:rtype: dict
"""
params = {
'secret_shares': secret_shares,
'secret_threshold': secret_threshold,
'root_token_pgp_key': root_token_pgp_key,
}
if pgp_keys is not None:
if len(pgp_keys) != secret_shares:
raise ParamValidationError('length of pgp_keys list argument must equal secret_shares value')
params['pgp_keys'] = pgp_keys
if stored_shares is not None:
if stored_shares != secret_shares:
raise ParamValidationError('value for stored_shares argument must equal secret_shares argument')
params['stored_shares'] = stored_shares
if recovery_shares is not None:
params['recovery_shares'] = recovery_shares
if recovery_threshold is not None:
if recovery_threshold > recovery_shares:
error_msg = 'value for recovery_threshold argument be less than or equal to recovery_shares argument'
raise ParamValidationError(error_msg)
params['recovery_threshold'] = recovery_threshold
if recovery_pgp_keys is not None:
if len(recovery_pgp_keys) != recovery_shares:
raise ParamValidationError('length of recovery_pgp_keys list argument must equal recovery_shares value')
params['recovery_pgp_keys'] = recovery_pgp_keys
api_path = '/v1/sys/init'
response = self._adapter.put(
url=api_path,
json=params,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22534
|
KvV1.list_secrets
|
train
|
def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT):
"""Return a list of key names at the specified location.
Folders are suffixed with /. The input must be a folder; list on a file will not return a value. Note that no
policy-based filtering is performed on keys; do not encode sensitive information in key names. The values
themselves are not accessible via this command.
Supported methods:
LIST: /{mount_point}/{path}. Produces: 200 application/json
:param path: Specifies the path of the secrets to list.
This is specified as part of the URL.
:type path: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the list_secrets request.
:rtype: dict
"""
api_path = '/v1/{mount_point}/{path}'.format(mount_point=mount_point, path=path)
response = self._adapter.list(
url=api_path,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22535
|
KvV1.create_or_update_secret
|
train
|
def create_or_update_secret(self, path, secret, method=None, mount_point=DEFAULT_MOUNT_POINT):
"""Store a secret at the specified location.
If the value does not yet exist, the calling token must have an ACL policy granting the create capability.
If the value already exists, the calling token must have an ACL policy granting the update capability.
Supported methods:
POST: /{mount_point}/{path}. Produces: 204 (empty body)
PUT: /{mount_point}/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secrets to create/update. This is specified as part of the URL.
:type path: str | unicode
:param secret: Specifies keys, paired with associated values, to be held at the given location. Multiple
key/value pairs can be specified, and all will be returned on a read operation. A key called ttl will
trigger some special behavior. See the Vault KV secrets engine documentation for details.
:type secret: dict
:param method: Optional parameter to explicitly request a POST (create) or PUT (update) request to the selected
kv secret engine. If no argument is provided for this parameter, hvac attempts to intelligently determine
which method is appropriate.
:type method: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the create_or_update_secret request.
:rtype: requests.Response
"""
if method is None:
# If no method was selected by the caller, use the result of a `read_secret()` call to determine if we need
# to perform an update (PUT) or creation (POST) request.
try:
self.read_secret(
path=path,
mount_point=mount_point,
)
method = 'PUT'
except exceptions.InvalidPath:
method = 'POST'
if method == 'POST':
api_path = '/v1/{mount_point}/{path}'.format(mount_point=mount_point, path=path)
return self._adapter.post(
url=api_path,
json=secret,
)
elif method == 'PUT':
api_path = '/v1/{mount_point}/{path}'.format(mount_point=mount_point, path=path)
return self._adapter.post(
url=api_path,
json=secret,
)
else:
error_message = '"method" parameter provided invalid value; POST or PUT allowed, "{method}" provided'.format(method=method)
raise exceptions.ParamValidationError(error_message)
|
python
|
{
"resource": ""
}
|
q22536
|
KvV1.delete_secret
|
train
|
def delete_secret(self, path, mount_point=DEFAULT_MOUNT_POINT):
"""Delete the secret at the specified location.
Supported methods:
DELETE: /{mount_point}/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secret to delete.
This is specified as part of the URL.
:type path: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the delete_secret request.
:rtype: requests.Response
"""
api_path = '/v1/{mount_point}/{path}'.format(mount_point=mount_point, path=path)
return self._adapter.delete(
url=api_path,
)
|
python
|
{
"resource": ""
}
|
q22537
|
Radius.configure
|
train
|
def configure(self, host, secret, port=1812, unregistered_user_policies=None, dial_timeout=10, nas_port=10,
mount_point=DEFAULT_MOUNT_POINT):
"""
Configure the RADIUS auth method.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param host: The RADIUS server to connect to. Examples: radius.myorg.com, 127.0.0.1
:type host: str | unicode
:param secret: The RADIUS shared secret.
:type secret: str | unicode
:param port: The UDP port where the RADIUS server is listening on. Defaults is 1812.
:type port: int
:param unregistered_user_policies: A comma-separated list of policies to be granted to unregistered users.
:type unregistered_user_policies: list
:param dial_timeout: Number of second to wait for a backend connection before timing out. Default is 10.
:type dial_timeout: int
:param nas_port: The NAS-Port attribute of the RADIUS request. Defaults is 10.
:type nas_port: int
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure request.
:rtype: requests.Response
"""
params = {
'host': host,
'secret': secret,
'port': port,
'dial_timeout': dial_timeout,
'nas_port': nas_port,
}
# Fill out params dictionary with any optional parameters provided
if unregistered_user_policies is not None:
if not isinstance(unregistered_user_policies, list):
error_msg = (
'"unregistered_user_policies" argument must be an instance of list or None, '
'"{unregistered_user_policies}" provided.'
).format(unregistered_user_policies=type(unregistered_user_policies))
raise exceptions.ParamValidationError(error_msg)
params['unregistered_user_policies'] = ','.join(unregistered_user_policies)
api_path = '/v1/auth/{mount_point}/config'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22538
|
Aws.configure_root_iam_credentials
|
train
|
def configure_root_iam_credentials(self, access_key, secret_key, region=None, iam_endpoint=None, sts_endpoint=None,
max_retries=-1, mount_point=DEFAULT_MOUNT_POINT):
"""Configure the root IAM credentials to communicate with AWS.
There are multiple ways to pass root IAM credentials to the Vault server, specified below with the highest
precedence first. If credentials already exist, this will overwrite them.
The official AWS SDK is used for sourcing credentials from env vars, shared files, or IAM/ECS instances.
* Static credentials provided to the API as a payload
* Credentials in the AWS_ACCESS_KEY, AWS_SECRET_KEY, and AWS_REGION environment variables on the server
* Shared credentials files
* Assigned IAM role or ECS task role credentials
At present, this endpoint does not confirm that the provided AWS credentials are valid AWS credentials with
proper permissions.
Supported methods:
POST: /{mount_point}/config/root. Produces: 204 (empty body)
:param access_key: Specifies the AWS access key ID.
:type access_key: str | unicode
:param secret_key: Specifies the AWS secret access key.
:type secret_key: str | unicode
:param region: Specifies the AWS region. If not set it will use the AWS_REGION env var, AWS_DEFAULT_REGION env
var, or us-east-1 in that order.
:type region: str | unicode
:param iam_endpoint: Specifies a custom HTTP IAM endpoint to use.
:type iam_endpoint: str | unicode
:param sts_endpoint: Specifies a custom HTTP STS endpoint to use.
:type sts_endpoint: str | unicode
:param max_retries: Number of max retries the client should use for recoverable errors. The default (-1) falls
back to the AWS SDK's default behavior.
:type max_retries: int
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'access_key': access_key,
'secret_key': secret_key,
'region': region,
'iam_endpoint': iam_endpoint,
'sts_endpoint': sts_endpoint,
'max_retries': max_retries,
}
api_path = '/v1/{mount_point}/config/root'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22539
|
Aws.rotate_root_iam_credentials
|
train
|
def rotate_root_iam_credentials(self, mount_point=DEFAULT_MOUNT_POINT):
"""Rotate static root IAM credentials.
When you have configured Vault with static credentials, you can use this endpoint to have Vault rotate the
access key it used. Note that, due to AWS eventual consistency, after calling this endpoint, subsequent calls
from Vault to AWS may fail for a few seconds until AWS becomes consistent again.
In order to call this endpoint, Vault's AWS access key MUST be the only access key on the IAM user; otherwise,
generation of a new access key will fail. Once this method is called, Vault will now be the only entity that
knows the AWS secret key is used to access AWS.
Supported methods:
POST: /{mount_point}/config/rotate-root. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/{mount_point}/config/rotate-root'.format(mount_point=mount_point)
response = self._adapter.post(
url=api_path,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22540
|
Aws.configure_lease
|
train
|
def configure_lease(self, lease, lease_max, mount_point=DEFAULT_MOUNT_POINT):
"""Configure lease settings for the AWS secrets engine.
It is optional, as there are default values for lease and lease_max.
Supported methods:
POST: /{mount_point}/config/lease. Produces: 204 (empty body)
:param lease: Specifies the lease value provided as a string duration with time suffix. "h" (hour) is the
largest suffix.
:type lease: str | unicode
:param lease_max: Specifies the maximum lease value provided as a string duration with time suffix. "h" (hour)
is the largest suffix.
:type lease_max: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'lease': lease,
'lease_max': lease_max,
}
api_path = '/v1/{mount_point}/config/lease'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22541
|
Aws.read_lease_config
|
train
|
def read_lease_config(self, mount_point=DEFAULT_MOUNT_POINT):
"""Read the current lease settings for the AWS secrets engine.
Supported methods:
GET: /{mount_point}/config/lease. Produces: 200 application/json
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/{mount_point}/config/lease'.format(mount_point=mount_point)
response = self._adapter.get(
url=api_path,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22542
|
Aws.create_or_update_role
|
train
|
def create_or_update_role(self, name, credential_type, policy_document=None, default_sts_ttl=None, max_sts_ttl=None,
role_arns=None, policy_arns=None, legacy_params=False, mount_point=DEFAULT_MOUNT_POINT):
"""Create or update the role with the given name.
If a role with the name does not exist, it will be created. If the role exists, it will be updated with the new
attributes.
Supported methods:
POST: /{mount_point}/roles/{name}. Produces: 204 (empty body)
:param name: Specifies the name of the role to create. This is part of the request URL.
:type name: str | unicode
:param credential_type: Specifies the type of credential to be used when retrieving credentials from the role.
Must be one of iam_user, assumed_role, or federation_token.
:type credential_type: str | unicode
:param policy_document: The IAM policy document for the role. The behavior depends on the credential type. With
iam_user, the policy document will be attached to the IAM user generated and augment the permissions the IAM
user has. With assumed_role and federation_token, the policy document will act as a filter on what the
credentials can do.
:type policy_document: dict | str | unicode
:param default_sts_ttl: The default TTL for STS credentials. When a TTL is not specified when STS credentials
are requested, and a default TTL is specified on the role, then this default TTL will be used. Valid only
when credential_type is one of assumed_role or federation_token.
:type default_sts_ttl: str | unicode
:param max_sts_ttl: The max allowed TTL for STS credentials (credentials TTL are capped to max_sts_ttl). Valid
only when credential_type is one of assumed_role or federation_token.
:type max_sts_ttl: str | unicode
:param role_arns: Specifies the ARNs of the AWS roles this Vault role is allowed to assume. Required when
credential_type is assumed_role and prohibited otherwise. This is a comma-separated string or JSON array.
String types supported for Vault legacy parameters.
:type role_arns: list | str | unicode
:param policy_arns: Specifies the ARNs of the AWS managed policies to be attached to IAM users when they are
requested. Valid only when credential_type is iam_user. When credential_type is iam_user, at least one of
policy_arns or policy_document must be specified. This is a comma-separated string or JSON array.
:type policy_arns: list
:param legacy_params: Flag to send legacy (Vault versions < 0.11.0) parameters in the request. When this is set
to True, policy_document and policy_arns are the only parameters used from this method.
:type legacy_params: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if credential_type not in ALLOWED_CREDS_TYPES:
error_msg = 'invalid credential_type argument provided "{arg}", supported types: "{allowed_types}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=credential_type,
allowed_types=', '.join(ALLOWED_CREDS_TYPES),
))
if isinstance(policy_document, dict):
policy_document = json.dumps(policy_document, indent=4, sort_keys=True)
if legacy_params:
# Support for Vault <0.11.0
params = {
'policy': policy_document,
'arn': policy_arns[0] if isinstance(policy_arns, list) else policy_arns,
}
else:
params = {
'credential_type': credential_type,
'policy_document': policy_document,
'default_sts_ttl': default_sts_ttl,
'max_sts_ttl': max_sts_ttl,
'role_arns': role_arns,
'policy_arns': policy_arns,
}
api_path = '/v1/{mount_point}/roles/{name}'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22543
|
Aws.delete_role
|
train
|
def delete_role(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""Delete an existing role by the given name.
If the role does not exist, a 404 is returned.
Supported methods:
DELETE: /{mount_point}/roles/{name}. Produces: 204 (empty body)
:param name: the name of the role to delete. This
is part of the request URL.
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = '/v1/{mount_point}/roles/{name}'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.delete(
url=api_path,
)
|
python
|
{
"resource": ""
}
|
q22544
|
Aws.generate_credentials
|
train
|
def generate_credentials(self, name, role_arn=None, ttl="3600s", endpoint='creds', mount_point=DEFAULT_MOUNT_POINT):
"""Generates credential based on the named role.
This role must be created before queried.
The /aws/creds and /aws/sts endpoints are almost identical. The exception is when retrieving credentials for a
role that was specified with the legacy arn or policy parameter. In this case, credentials retrieved through
/aws/sts must be of either the assumed_role or federation_token types, and credentials retrieved through
/aws/creds must be of the iam_user type.
:param name: Specifies the name of the role to generate credentials against. This is part of the request URL.
:type name: str | unicode
:param role_arn: The ARN of the role to assume if credential_type on the Vault role is assumed_role. Must match
one of the allowed role ARNs in the Vault role. Optional if the Vault role only allows a single AWS role
ARN; required otherwise.
:type role_arn: str | unicode
:param ttl: Specifies the TTL for the use of the STS token. This is specified as a string with a duration
suffix. Valid only when credential_type is assumed_role or federation_token. When not specified, the default
sts_ttl set for the role will be used. If that is also not set, then the default value of 3600s will be
used. AWS places limits on the maximum TTL allowed. See the AWS documentation on the DurationSeconds
parameter for AssumeRole (for assumed_role credential types) and GetFederationToken (for federation_token
credential types) for more details.
:type ttl: str | unicode
:param endpoint: Supported endpoints:
GET: /{mount_point}/creds/{name}. Produces: 200 application/json
GET: /{mount_point}/sts/{name}. Produces: 200 application/json
:type endpoint: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
if endpoint not in ALLOWED_CREDS_ENDPOINTS:
error_msg = 'invalid endpoint argument provided "{arg}", supported types: "{allowed_endpoints}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=endpoint,
allowed_endpoints=', '.join(ALLOWED_CREDS_ENDPOINTS),
))
params = {
'name': name,
'role_arn': role_arn,
'ttl': ttl,
}
api_path = '/v1/{mount_point}/{endpoint}/{name}'.format(
mount_point=mount_point,
endpoint=endpoint,
name=name,
)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22545
|
Azure.configure
|
train
|
def configure(self, subscription_id, tenant_id, client_id="", client_secret="", environment='AzurePublicCloud',
mount_point=DEFAULT_MOUNT_POINT):
"""Configure the credentials required for the plugin to perform API calls to Azure.
These credentials will be used to query roles and create/delete service principals. Environment variables will
override any parameters set in the config.
Supported methods:
POST: /{mount_point}/config. Produces: 204 (empty body)
:param subscription_id: The subscription id for the Azure Active Directory
:type subscription_id: str | unicode
:param tenant_id: The tenant id for the Azure Active Directory.
:type tenant_id: str | unicode
:param client_id: The OAuth2 client id to connect to Azure.
:type client_id: str | unicode
:param client_secret: The OAuth2 client secret to connect to Azure.
:type client_secret: str | unicode
:param environment: The Azure environment. If not specified, Vault will use Azure Public Cloud.
:type environment: str | unicode
:param mount_point: The OAuth2 client secret to connect to Azure.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if environment not in VALID_ENVIRONMENTS:
error_msg = 'invalid environment argument provided "{arg}", supported environments: "{environments}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=environment,
environments=','.join(VALID_ENVIRONMENTS),
))
params = {
'subscription_id': subscription_id,
'tenant_id': tenant_id,
'client_id': client_id,
'client_secret': client_secret,
'environment': environment,
}
api_path = '/v1/{mount_point}/config'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22546
|
Azure.delete_config
|
train
|
def delete_config(self, mount_point=DEFAULT_MOUNT_POINT):
"""Delete the stored Azure configuration and credentials.
Supported methods:
DELETE: /auth/{mount_point}/config. Produces: 204 (empty body)
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = '/v1/{mount_point}/config'.format(mount_point=mount_point)
return self._adapter.delete(
url=api_path,
)
|
python
|
{
"resource": ""
}
|
q22547
|
Azure.create_or_update_role
|
train
|
def create_or_update_role(self, name, azure_roles, ttl="", max_ttl="", mount_point=DEFAULT_MOUNT_POINT):
"""Create or update a Vault role.
The provided Azure roles must exist for this call to succeed. See the Azure secrets roles docs for more
information about roles.
Supported methods:
POST: /{mount_point}/roles/{name}. Produces: 204 (empty body)
:param name: Name of the role.
:type name: str | unicode
:param azure_roles: List of Azure roles to be assigned to the generated service principal.
:type azure_roles: list(dict)
:param ttl: Specifies the default TTL for service principals generated using this role. Accepts time suffixed
strings ("1h") or an integer number of seconds. Defaults to the system/engine default TTL time.
:type ttl: str | unicode
:param max_ttl: Specifies the maximum TTL for service principals generated using this role. Accepts time
suffixed strings ("1h") or an integer number of seconds. Defaults to the system/engine max TTL time.
:type max_ttl: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'azure_roles': json.dumps(azure_roles),
'ttl': ttl,
'max_ttl': max_ttl,
}
api_path = '/v1/{mount_point}/roles/{name}'.format(
mount_point=mount_point,
name=name
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22548
|
Ldap.configure
|
train
|
def configure(self, user_dn, group_dn, url='ldap://127.0.0.1', case_sensitive_names=False, starttls=False,
tls_min_version='tls12', tls_max_version='tls12', insecure_tls=False, certificate=None, bind_dn=None,
bind_pass=None, user_attr='cn', discover_dn=False, deny_null_bind=True, upn_domain=None,
group_filter=DEFAULT_GROUP_FILTER, group_attr='cn', mount_point=DEFAULT_MOUNT_POINT):
"""
Configure the LDAP auth method.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param user_dn: Base DN under which to perform user search. Example: ou=Users,dc=example,dc=com
:type user_dn: str | unicode
:param group_dn: LDAP search base to use for group membership search. This can be the root containing either
groups or users. Example: ou=Groups,dc=example,dc=com
:type group_dn: str | unicode
:param url: The LDAP server to connect to. Examples: ldap://ldap.myorg.com, ldaps://ldap.myorg.com:636.
Multiple URLs can be specified with commas, e.g. ldap://ldap.myorg.com,ldap://ldap2.myorg.com; these will be
tried in-order.
:type url: str | unicode
:param case_sensitive_names: If set, user and group names assigned to policies within the backend will be case
sensitive. Otherwise, names will be normalized to lower case. Case will still be preserved when sending the
username to the LDAP server at login time; this is only for matching local user/group definitions.
:type case_sensitive_names: bool
:param starttls: If true, issues a StartTLS command after establishing an unencrypted connection.
:type starttls: bool
:param tls_min_version: Minimum TLS version to use. Accepted values are tls10, tls11 or tls12.
:type tls_min_version: str | unicode
:param tls_max_version: Maximum TLS version to use. Accepted values are tls10, tls11 or tls12.
:type tls_max_version: str | unicode
:param insecure_tls: If true, skips LDAP server SSL certificate verification - insecure, use with caution!
:type insecure_tls: bool
:param certificate: CA certificate to use when verifying LDAP server certificate, must be x509 PEM encoded.
:type certificate: str | unicode
:param bind_dn: Distinguished name of object to bind when performing user search. Example:
cn=vault,ou=Users,dc=example,dc=com
:type bind_dn: str | unicode
:param bind_pass: Password to use along with binddn when performing user search.
:type bind_pass: str | unicode
:param user_attr: Attribute on user attribute object matching the username passed when authenticating. Examples:
sAMAccountName, cn, uid
:type user_attr: str | unicode
:param discover_dn: Use anonymous bind to discover the bind DN of a user.
:type discover_dn: bool
:param deny_null_bind: This option prevents users from bypassing authentication when providing an empty password.
:type deny_null_bind: bool
:param upn_domain: The userPrincipalDomain used to construct the UPN string for the authenticating user. The
constructed UPN will appear as [username]@UPNDomain. Example: example.com, which will cause vault to bind as
username@example.com.
:type upn_domain: str | unicode
:param group_filter: Go template used when constructing the group membership query. The template can access the
following context variables: [UserDN, Username]. The default is
`(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`, which is compatible with several
common directory schemas. To support nested group resolution for Active Directory, instead use the following
query: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}})).
:type group_filter: str | unicode
:param group_attr: LDAP attribute to follow on objects returned by groupfilter in order to enumerate user group
membership. Examples: for groupfilter queries returning group objects, use: cn. For queries returning user
objects, use: memberOf. The default is cn.
:type group_attr: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure request.
:rtype: requests.Response
"""
params = {
'userdn': user_dn,
'groupdn': group_dn,
'url': url,
'case_sensitive_names': case_sensitive_names,
'starttls': starttls,
'tls_min_version': tls_min_version,
'tls_max_version': tls_max_version,
'insecure_tls': insecure_tls,
'certificate': certificate,
'userattr': user_attr,
'discoverdn': discover_dn,
'deny_null_bind': deny_null_bind,
'groupfilter': group_filter,
'groupattr': group_attr,
}
# Fill out params dictionary with any optional parameters provided
if upn_domain is not None:
params['upndomain'] = upn_domain
if bind_dn is not None:
params['binddn'] = bind_dn
if bind_pass is not None:
params['bindpass'] = bind_pass
if certificate is not None:
params['certificate'] = certificate
api_path = '/v1/auth/{mount_point}/config'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22549
|
Ldap.create_or_update_group
|
train
|
def create_or_update_group(self, name, policies=None, mount_point=DEFAULT_MOUNT_POINT):
"""
Create or update LDAP group policies.
Supported methods:
POST: /auth/{mount_point}/groups/{name}. Produces: 204 (empty body)
:param name: The name of the LDAP group
:type name: str | unicode
:param policies: List of policies associated with the group. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type policies: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the create_or_update_group request.
:rtype: requests.Response
"""
if policies is None:
policies = []
if not isinstance(policies, list):
error_msg = '"policies" argument must be an instance of list or None, "{policies_type}" provided.'.format(
policies_type=type(policies),
)
raise exceptions.ParamValidationError(error_msg)
params = {
'policies': ','.join(policies),
}
api_path = '/v1/auth/{mount_point}/groups/{name}'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22550
|
Ldap.create_or_update_user
|
train
|
def create_or_update_user(self, username, policies=None, groups=None, mount_point=DEFAULT_MOUNT_POINT):
"""
Create or update LDAP users policies and group associations.
Supported methods:
POST: /auth/{mount_point}/users/{username}. Produces: 204 (empty body)
:param username: The username of the LDAP user
:type username: str | unicode
:param policies: List of policies associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type policies: str | unicode
:param groups: List of groups associated with the user. This parameter is transformed to a comma-delimited
string before being passed to Vault.
:type groups: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the create_or_update_user request.
:rtype: requests.Response
"""
if policies is None:
policies = []
if groups is None:
groups = []
list_required_params = {
'policies': policies,
'groups': groups,
}
for param_name, param_arg in list_required_params.items():
if not isinstance(param_arg, list):
error_msg = '"{param_name}" argument must be an instance of list or None, "{param_type}" provided.'.format(
param_name=param_name,
param_type=type(param_arg),
)
raise exceptions.ParamValidationError(error_msg)
params = {
'policies': ','.join(policies),
'groups': ','.join(groups),
}
api_path = '/v1/auth/{mount_point}/users/{username}'.format(
mount_point=mount_point,
username=username,
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22551
|
Wrapping.unwrap
|
train
|
def unwrap(self, token=None):
"""Return the original response inside the given wrapping token.
Unlike simply reading cubbyhole/response (which is deprecated), this endpoint provides additional validation
checks on the token, returns the original value on the wire rather than a JSON string representation of it, and
ensures that the response is properly audit-logged.
Supported methods:
POST: /sys/wrapping/unwrap. Produces: 200 application/json
:param token: Specifies the wrapping token ID. This is required if the client token is not the wrapping token.
Do not use the wrapping token in both locations.
:type token: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {}
if token is not None:
params['token'] = token
api_path = '/v1/sys/wrapping/unwrap'
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22552
|
KvV2.configure
|
train
|
def configure(self, max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT):
"""Configure backend level settings that are applied to every key in the key-value store.
Supported methods:
POST: /{mount_point}/config. Produces: 204 (empty body)
:param max_versions: The number of versions to keep per key. This value applies to all keys, but a key's
metadata setting can overwrite this value. Once a key has more than the configured allowed versions the
oldest version will be permanently deleted. Defaults to 10.
:type max_versions: int
:param cas_required: If true all keys will require the cas parameter to be set on all write requests.
:type cas_required: bool
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'max_versions': max_versions,
}
if cas_required is not None:
params['cas_required'] = cas_required
api_path = '/v1/{mount_point}/config'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22553
|
KvV2.read_secret_version
|
train
|
def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT):
"""Retrieve the secret at the specified location.
Supported methods:
GET: /{mount_point}/data/{path}. Produces: 200 application/json
:param path: Specifies the path of the secret to read. This is specified as part of the URL.
:type path: str | unicode
:param version: Specifies the version to return. If not set the latest version is returned.
:type version: int
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {}
if version is not None:
params['version'] = version
api_path = '/v1/{mount_point}/data/{path}'.format(mount_point=mount_point, path=path)
response = self._adapter.get(
url=api_path,
params=params,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22554
|
KvV2.create_or_update_secret
|
train
|
def create_or_update_secret(self, path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT):
"""Create a new version of a secret at the specified location.
If the value does not yet exist, the calling token must have an ACL policy granting the create capability. If
the value already exists, the calling token must have an ACL policy granting the update capability.
Supported methods:
POST: /{mount_point}/data/{path}. Produces: 200 application/json
:param path: Path
:type path: str | unicode
:param cas: Set the "cas" value to use a Check-And-Set operation. If not set the write will be allowed. If set
to 0 a write will only be allowed if the key doesn't exist. If the index is non-zero the write will only be
allowed if the key's current version matches the version specified in the cas parameter.
:type cas: int
:param secret: The contents of the "secret" dict will be stored and returned on read.
:type secret: dict
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {
'options': {},
'data': secret,
}
if cas is not None:
params['options']['cas'] = cas
api_path = '/v1/{mount_point}/data/{path}'.format(mount_point=mount_point, path=path)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22555
|
KvV2.patch
|
train
|
def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT):
"""Set or update data in the KV store without overwriting.
:param path: Path
:type path: str | unicode
:param secret: The contents of the "secret" dict will be stored and returned on read.
:type secret: dict
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the create_or_update_secret request.
:rtype: dict
"""
# First, do a read.
try:
current_secret_version = self.read_secret_version(
path=path,
mount_point=mount_point,
)
except exceptions.InvalidPath:
raise exceptions.InvalidPath('No value found at "{path}"; patch only works on existing data.'.format(path=path))
# Update existing secret dict.
patched_secret = current_secret_version['data']['data']
patched_secret.update(secret)
# Write back updated secret.
return self.create_or_update_secret(
path=path,
cas=current_secret_version['data']['metadata']['version'],
secret=patched_secret,
mount_point=mount_point,
)
|
python
|
{
"resource": ""
}
|
q22556
|
KvV2.delete_secret_versions
|
train
|
def delete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT):
"""Issue a soft delete of the specified versions of the secret.
This marks the versions as deleted and will stop them from being returned from reads,
but the underlying data will not be removed. A delete can be undone using the
undelete path.
Supported methods:
POST: /{mount_point}/delete/{path}. Produces: 204 (empty body)
:param path: Specifies the path of the secret to delete. This is specified as part of the URL.
:type path: str | unicode
:param versions: The versions to be deleted. The versioned data will not be deleted, but it will no longer be
returned in normal get requests.
:type versions: int
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if not isinstance(versions, list) or len(versions) == 0:
error_msg = 'argument to "versions" must be a list containing one or more integers, "{versions}" provided.'.format(
versions=versions
)
raise exceptions.ParamValidationError(error_msg)
params = {
'versions': versions,
}
api_path = '/v1/{mount_point}/delete/{path}'.format(mount_point=mount_point, path=path)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22557
|
KvV2.read_secret_metadata
|
train
|
def read_secret_metadata(self, path, mount_point=DEFAULT_MOUNT_POINT):
"""Retrieve the metadata and versions for the secret at the specified path.
Supported methods:
GET: /{mount_point}/metadata/{path}. Produces: 200 application/json
:param path: Specifies the path of the secret to read. This is specified as part of the URL.
:type path: str | unicode
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/{mount_point}/metadata/{path}'.format(mount_point=mount_point, path=path)
response = self._adapter.get(
url=api_path,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22558
|
KvV2.update_metadata
|
train
|
def update_metadata(self, path, max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT):
"""Updates the max_versions of cas_required setting on an existing path.
Supported methods:
POST: /{mount_point}/metadata/{path}. Produces: 204 (empty body)
:param path: Path
:type path: str | unicode
:param max_versions: The number of versions to keep per key. If not set, the backend's configured max version is
used. Once a key has more than the configured allowed versions the oldest version will be permanently
deleted.
:type max_versions: int
:param cas_required: If true the key will require the cas parameter to be set on all write requests. If false,
the backend's configuration will be used.
:type cas_required: bool
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {}
if max_versions is not None:
params['max_versions'] = max_versions
if cas_required is not None:
if not isinstance(cas_required, bool):
error_msg = 'bool expected for cas_required param, {type} received'.format(type=type(cas_required))
raise exceptions.ParamValidationError(error_msg)
params['cas_required'] = cas_required
api_path = '/v1/{mount_point}/metadata/{path}'.format(mount_point=mount_point, path=path)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22559
|
Gcp.configure
|
train
|
def configure(self, credentials="", google_certs_endpoint=GCP_CERTS_ENDPOINT, mount_point=DEFAULT_MOUNT_POINT):
"""Configure the credentials required for the GCP auth method to perform API calls to Google Cloud.
These credentials will be used to query the status of IAM entities and get service account or other Google
public certificates to confirm signed JWTs passed in during login.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param credentials: A JSON string containing the contents of a GCP credentials file. The credentials file must
have the following permissions: `iam.serviceAccounts.get`, `iam.serviceAccountKeys.get`.
If this value is empty, Vault will try to use Application Default Credentials from the machine on which the
Vault server is running. The project must have the iam.googleapis.com API enabled.
:type credentials: str | unicode
:param google_certs_endpoint: The Google OAuth2 endpoint from which to obtain public certificates. This is used
for testing and should generally not be set by end users.
:type google_certs_endpoint: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'credentials': credentials,
'google_certs_endpoint': google_certs_endpoint,
}
api_path = '/v1/auth/{mount_point}/config'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22560
|
Gcp.create_role
|
train
|
def create_role(self, name, role_type, project_id, ttl="", max_ttl="", period="", policies=None,
bound_service_accounts=None, max_jwt_exp='15m', allow_gce_inference=True, bound_zones=None,
bound_regions=None, bound_instance_groups=None, bound_labels=None, mount_point=DEFAULT_MOUNT_POINT):
"""Register a role in the GCP auth method.
Role types have specific entities that can perform login operations against this endpoint. Constraints specific
to the role type must be set on the role. These are applied to the authenticated entities attempting to
login.
Supported methods:
POST: /auth/{mount_point}/role/{name}. Produces: 204 (empty body)
:param name: The name of the role.
:type name: str | unicode
:param role_type: The type of this role. Certain fields correspond to specific roles and will be rejected
otherwise.
:type role_type: str | unicode
:param project_id: The GCP project ID. Only entities belonging to this project can authenticate with this role.
:type project_id: str | unicode
:param ttl: The TTL period of tokens issued using this role. This can be specified as an integer number of
seconds or as a duration value like "5m".
:type ttl: str | unicode
:param max_ttl: The maximum allowed lifetime of tokens issued in seconds using this role. This can be specified
as an integer number of seconds or as a duration value like "5m".
:type max_ttl: str | unicode
:param period: If set, indicates that the token generated using this role should never expire. The token should
be renewed within the duration specified by this value. At each renewal, the token's TTL will be set to the
value of this parameter. This can be specified as an integer number of seconds or as a duration value like
"5m".
:type period: str | unicode
:param policies: The list of policies to be set on tokens issued using this role.
:type policies: list
:param bound_service_accounts: <required for iam> A list of service account emails or IDs that login is
restricted to. If set to `*`, all service accounts are allowed (role will still be bound by project). Will be
inferred from service account used to issue metadata token for GCE instances.
:type bound_service_accounts: list
:param max_jwt_exp: <iam only> The number of seconds past the time of authentication that the login param JWT
must expire within. For example, if a user attempts to login with a token that expires within an hour and
this is set to 15 minutes, Vault will return an error prompting the user to create a new signed JWT with a
shorter exp. The GCE metadata tokens currently do not allow the exp claim to be customized.
:type max_jwt_exp: str | unicode
:param allow_gce_inference: <iam only> A flag to determine if this role should allow GCE instances to
authenticate by inferring service accounts from the GCE identity metadata token.
:type allow_gce_inference: bool
:param bound_zones: <gce only> The list of zones that a GCE instance must belong to in order to be
authenticated. If bound_instance_groups is provided, it is assumed to be a zonal group and the group must
belong to this zone.
:type bound_zones: list
:param bound_regions: <gce only> The list of regions that a GCE instance must belong to in order to be
authenticated. If bound_instance_groups is provided, it is assumed to be a regional group and the group
must belong to this region. If bound_zones are provided, this attribute is ignored.
:type bound_regions: list
:param bound_instance_groups: <gce only> The instance groups that an authorized instance must belong to in
order to be authenticated. If specified, either bound_zones or bound_regions must be set too.
:type bound_instance_groups: list
:param bound_labels: <gce only> A list of GCP labels formatted as "key:value" strings that must be set on
authorized GCE instances. Because GCP labels are not currently ACL'd, we recommend that this be used in
conjunction with other restrictions.
:type bound_labels: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The data key from the JSON response of the request.
:rtype: requests.Response
"""
type_specific_params = {
'iam': {
'max_jwt_exp': '15m',
'allow_gce_inference': True,
},
'gce': {
'bound_zones': None,
'bound_regions': None,
'bound_instance_groups': None,
'bound_labels': None,
},
}
list_of_strings_params = {
'policies': policies,
'bound_service_accounts': bound_service_accounts,
'bound_zones': bound_zones,
'bound_regions': bound_regions,
'bound_instance_groups': bound_instance_groups,
'bound_labels': bound_labels,
}
for param_name, param_argument in list_of_strings_params.items():
validate_list_of_strings_param(
param_name=param_name,
param_argument=param_argument,
)
if role_type not in ALLOWED_ROLE_TYPES:
error_msg = 'unsupported role_type argument provided "{arg}", supported types: "{role_types}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=type,
role_types=','.join(ALLOWED_ROLE_TYPES),
))
params = {
'type': role_type,
'project_id': project_id,
'ttl': ttl,
'max_ttl': max_ttl,
'period': period,
'policies': list_to_comma_delimited(policies),
'bound_service_accounts': list_to_comma_delimited(bound_service_accounts),
}
if role_type == 'iam':
params['max_jwt_exp'] = max_jwt_exp
params['allow_gce_inference'] = allow_gce_inference
for param, default_arg in type_specific_params['gce'].items():
if locals().get(param) != default_arg:
warning_msg = 'Argument for parameter "{param}" ignored for role type iam'.format(
param=param
)
logger.warning(warning_msg)
elif role_type == 'gce':
params['bound_zones'] = list_to_comma_delimited(bound_zones)
params['bound_regions'] = list_to_comma_delimited(bound_regions)
params['bound_instance_groups'] = list_to_comma_delimited(bound_instance_groups)
params['bound_labels'] = list_to_comma_delimited(bound_labels)
for param, default_arg in type_specific_params['iam'].items():
if locals().get(param) != default_arg:
warning_msg = 'Argument for parameter "{param}" ignored for role type gce'.format(
param=param
)
logger.warning(warning_msg)
api_path = '/v1/auth/{mount_point}/role/{name}'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22561
|
Gcp.edit_labels_on_gce_role
|
train
|
def edit_labels_on_gce_role(self, name, add=None, remove=None, mount_point=DEFAULT_MOUNT_POINT):
"""Edit labels for an existing GCE role in the backend.
This allows you to add or remove labels (keys, values, or both) from the list of keys on the role.
Supported methods:
POST: /auth/{mount_point}/role/{name}/labels. Produces: 204 (empty body)
:param name: The name of an existing gce role. This will return an error if role is not a gce type role.
:type name: str | unicode
:param add: The list of key:value labels to add to the GCE role's bound labels.
:type add: list
:param remove: The list of label keys to remove from the role's bound labels. If any of the specified keys do
not exist, no error is returned (idempotent).
:type remove: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the edit_labels_on_gce_role request.
:rtype: requests.Response
"""
params = {
'add': add,
'remove': remove,
}
api_path = '/v1/auth/{mount_point}/role/{name}/labels'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22562
|
Gcp.delete_role
|
train
|
def delete_role(self, role, mount_point=DEFAULT_MOUNT_POINT):
"""Delete the previously registered role.
Supported methods:
DELETE: /auth/{mount_point}/role/{role}. Produces: 204 (empty body)
:param role: The name of the role to delete.
:type role: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'role': role,
}
api_path = '/v1/auth/{mount_point}/role/{role}'.format(
mount_point=mount_point,
role=role,
)
return self._adapter.delete(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22563
|
Gcp.login
|
train
|
def login(self, role, jwt, use_token=True, mount_point=DEFAULT_MOUNT_POINT):
"""Login to retrieve a Vault token via the GCP auth method.
This endpoint takes a signed JSON Web Token (JWT) and a role name for some entity. It verifies the JWT
signature with Google Cloud to authenticate that entity and then authorizes the entity for the given role.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param role: The name of the role against which the login is being attempted.
:type role: str | unicode
:param jwt: A signed JSON web token
:type jwt: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {
'role': role,
'jwt': jwt,
}
api_path = '/v1/auth/{mount_point}/login'.format(mount_point=mount_point)
response = self._adapter.login(
url=api_path,
use_token=use_token,
json=params,
)
return response
|
python
|
{
"resource": ""
}
|
q22564
|
Okta.configure
|
train
|
def configure(self, org_name, api_token=None, base_url='okta.com', ttl=None, max_ttl=None, bypass_okta_mfa=False,
mount_point=DEFAULT_MOUNT_POINT):
"""Configure the connection parameters for Okta.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param org_name: Name of the organization to be used in the Okta API.
:type org_name: str | unicode
:param api_token: Okta API token. This is required to query Okta for user group membership. If this is not
supplied only locally configured groups will be enabled.
:type api_token: str | unicode
:param base_url: If set, will be used as the base domain for API requests. Examples are okta.com,
oktapreview.com, and okta-emea.com.
:type base_url: str | unicode
:param ttl: Duration after which authentication will be expired.
:type ttl: str | unicode
:param max_ttl: Maximum duration after which authentication will be expired.
:type max_ttl: str | unicode
:param bypass_okta_mfa: Whether to bypass an Okta MFA request. Useful if using one of Vault's built-in MFA
mechanisms, but this will also cause certain other statuses to be ignored, such as PASSWORD_EXPIRED.
:type bypass_okta_mfa: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'org_name': org_name,
'api_token': api_token,
'base_url': base_url,
'ttl': ttl,
'max_ttl': max_ttl,
'bypass_okta_mfa': bypass_okta_mfa,
}
api_path = '/v1/auth/{mount_point}/config'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22565
|
Okta.register_user
|
train
|
def register_user(self, username, groups=None, policies=None, mount_point=DEFAULT_MOUNT_POINT):
"""Register a new user and maps a set of policies to it.
Supported methods:
POST: /auth/{mount_point}/users/{username}. Produces: 204 (empty body)
:param username: Name of the user.
:type username: str | unicode
:param groups: List or comma-separated string of groups associated with the user.
:type groups: list
:param policies: List or comma-separated string of policies associated with the user.
:type policies: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'username': username,
'groups': groups,
'policies': policies,
}
api_path = '/v1/auth/{mount_point}/users/{username}'.format(
mount_point=mount_point,
username=username,
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22566
|
Okta.read_user
|
train
|
def read_user(self, username, mount_point=DEFAULT_MOUNT_POINT):
"""Read the properties of an existing username.
Supported methods:
GET: /auth/{mount_point}/users/{username}. Produces: 200 application/json
:param username: Username for this user.
:type username: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {
'username': username,
}
api_path = '/v1/auth/{mount_point}/users/{username}'.format(
mount_point=mount_point,
username=username,
)
response = self._adapter.get(
url=api_path,
json=params,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22567
|
Okta.delete_user
|
train
|
def delete_user(self, username, mount_point=DEFAULT_MOUNT_POINT):
"""Delete an existing username from the method.
Supported methods:
DELETE: /auth/{mount_point}/users/{username}. Produces: 204 (empty body)
:param username: Username for this user.
:type username: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'username': username,
}
api_path = '/v1/auth/{mount_point}/users/{username}'.format(
mount_point=mount_point,
username=username,
)
return self._adapter.delete(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22568
|
Okta.register_group
|
train
|
def register_group(self, name, policies=None, mount_point=DEFAULT_MOUNT_POINT):
"""Register a new group and maps a set of policies to it.
Supported methods:
POST: /auth/{mount_point}/groups/{name}. Produces: 204 (empty body)
:param name: The name of the group.
:type name: str | unicode
:param policies: The list or comma-separated string of policies associated with the group.
:type policies: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'policies': policies,
}
api_path = '/v1/auth/{mount_point}/groups/{name}'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22569
|
VaultApiCategory.adapter
|
train
|
def adapter(self, adapter):
"""Sets the adapter instance under the "_adapter" property in use by this class.
Also sets the adapter property for all implemented classes under this category.
:param adapter: New adapter instance to set for this class and all implemented classes under this category.
:type adapter: hvac.adapters.Adapter
"""
self._adapter = adapter
for implemented_class in self.implemented_classes:
class_name = implemented_class.__name__.lower()
getattr(self, self.get_private_attr_name(class_name)).adapter = adapter
|
python
|
{
"resource": ""
}
|
q22570
|
Github.configure
|
train
|
def configure(self, organization, base_url='', ttl='', max_ttl='', mount_point=DEFAULT_MOUNT_POINT):
"""Configure the connection parameters for GitHub.
This path honors the distinction between the create and update capabilities inside ACL policies.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param organization: The organization users must be part of.
:type organization: str | unicode
:param base_url: The API endpoint to use. Useful if you are running GitHub Enterprise or an API-compatible
authentication server.
:type base_url: str | unicode
:param ttl: Duration after which authentication will be expired.
:type ttl: str | unicode
:param max_ttl: Maximum duration after which authentication will
be expired.
:type max_ttl: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the configure_method request.
:rtype: requests.Response
"""
params = {
'organization': organization,
'base_url': base_url,
'ttl': ttl,
'max_ttl': max_ttl,
}
api_path = '/v1/auth/{mount_point}/config'.format(
mount_point=mount_point
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22571
|
Github.map_team
|
train
|
def map_team(self, team_name, policies=None, mount_point=DEFAULT_MOUNT_POINT):
"""Map a list of policies to a team that exists in the configured GitHub organization.
Supported methods:
POST: /auth/{mount_point}/map/teams/{team_name}. Produces: 204 (empty body)
:param team_name: GitHub team name in "slugified" format
:type team_name: str | unicode
:param policies: Comma separated list of policies to assign
:type policies: List[str]
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the map_github_teams request.
:rtype: requests.Response
"""
# First, perform parameter validation.
if policies is None:
policies = []
if not isinstance(policies, list) or not all([isinstance(p, str) for p in policies]):
error_msg = 'unsupported policies argument provided "{arg}" ({arg_type}), required type: List[str]"'
raise exceptions.ParamValidationError(error_msg.format(
arg=policies,
arg_type=type(policies),
))
# Then, perform request.
params = {
'value': ','.join(policies),
}
api_path = '/v1/auth/{mount_point}/map/teams/{team_name}'.format(
mount_point=mount_point,
team_name=team_name,
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22572
|
Github.read_team_mapping
|
train
|
def read_team_mapping(self, team_name, mount_point=DEFAULT_MOUNT_POINT):
"""Read the GitHub team policy mapping.
Supported methods:
GET: /auth/{mount_point}/map/teams/{team_name}. Produces: 200 application/json
:param team_name: GitHub team name
:type team_name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the read_team_mapping request.
:rtype: dict
"""
api_path = '/v1/auth/{mount_point}/map/teams/{team_name}'.format(
mount_point=mount_point,
team_name=team_name,
)
response = self._adapter.get(url=api_path)
return response.json()
|
python
|
{
"resource": ""
}
|
q22573
|
Github.read_user_mapping
|
train
|
def read_user_mapping(self, user_name, mount_point=DEFAULT_MOUNT_POINT):
"""Read the GitHub user policy mapping.
Supported methods:
GET: /auth/{mount_point}/map/users/{user_name}. Produces: 200 application/json
:param user_name: GitHub user name
:type user_name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the read_user_mapping request.
:rtype: dict
"""
api_path = '/v1/auth/{mount_point}/map/users/{user_name}'.format(
mount_point=mount_point,
user_name=user_name,
)
response = self._adapter.get(url=api_path)
return response.json()
|
python
|
{
"resource": ""
}
|
q22574
|
Github.login
|
train
|
def login(self, token, use_token=True, mount_point=DEFAULT_MOUNT_POINT):
"""Login using GitHub access token.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param token: GitHub personal API token.
:type token: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the login request.
:rtype: dict
"""
params = {
'token': token,
}
api_path = '/v1/auth/{mount_point}/login'.format(mount_point=mount_point)
return self._adapter.login(
url=api_path,
use_token=use_token,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22575
|
generate_sigv4_auth_request
|
train
|
def generate_sigv4_auth_request(header_value=None):
"""Helper function to prepare a AWS API request to subsequently generate a "AWS Signature Version 4" header.
:param header_value: Vault allows you to require an additional header, X-Vault-AWS-IAM-Server-ID, to be present
to mitigate against different types of replay attacks. Depending on the configuration of the AWS auth
backend, providing a argument to this optional parameter may be required.
:type header_value: str
:return: A PreparedRequest instance, optionally containing the provided header value under a
'X-Vault-AWS-IAM-Server-ID' header name pointed to AWS's simple token service with action "GetCallerIdentity"
:rtype: requests.PreparedRequest
"""
request = requests.Request(
method='POST',
url='https://sts.amazonaws.com/',
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', 'Host': 'sts.amazonaws.com'},
data='Action=GetCallerIdentity&Version=2011-06-15',
)
if header_value:
request.headers['X-Vault-AWS-IAM-Server-ID'] = header_value
prepared_request = request.prepare()
return prepared_request
|
python
|
{
"resource": ""
}
|
q22576
|
Lease.read_lease
|
train
|
def read_lease(self, lease_id):
"""Retrieve lease metadata.
Supported methods:
PUT: /sys/leases/lookup. Produces: 200 application/json
:param lease_id: the ID of the lease to lookup.
:type lease_id: str | unicode
:return: Parsed JSON response from the leases PUT request
:rtype: dict.
"""
params = {
'lease_id': lease_id
}
api_path = '/v1/sys/leases/lookup'
response = self._adapter.put(
url=api_path,
json=params
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22577
|
Lease.list_leases
|
train
|
def list_leases(self, prefix):
"""Retrieve a list of lease ids.
Supported methods:
LIST: /sys/leases/lookup/{prefix}. Produces: 200 application/json
:param prefix: Lease prefix to filter list by.
:type prefix: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/sys/leases/lookup/{prefix}'.format(prefix=prefix)
response = self._adapter.list(
url=api_path,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22578
|
Lease.renew_lease
|
train
|
def renew_lease(self, lease_id, increment=None):
"""Renew a lease, requesting to extend the lease.
Supported methods:
PUT: /sys/leases/renew. Produces: 200 application/json
:param lease_id: The ID of the lease to extend.
:type lease_id: str | unicode
:param increment: The requested amount of time (in seconds) to extend the lease.
:type increment: int
:return: The JSON response of the request
:rtype: dict
"""
params = {
'lease_id': lease_id,
'increment': increment,
}
api_path = '/v1/sys/leases/renew'
response = self._adapter.put(
url=api_path,
json=params,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22579
|
Lease.revoke_lease
|
train
|
def revoke_lease(self, lease_id):
"""Revoke a lease immediately.
Supported methods:
PUT: /sys/leases/revoke. Produces: 204 (empty body)
:param lease_id: Specifies the ID of the lease to revoke.
:type lease_id: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'lease_id': lease_id,
}
api_path = '/v1/sys/leases/revoke'
return self._adapter.put(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22580
|
Identity.create_or_update_entity
|
train
|
def create_or_update_entity(self, name, entity_id=None, metadata=None, policies=None, disabled=False,
mount_point=DEFAULT_MOUNT_POINT):
"""Create or update an Entity.
Supported methods:
POST: /{mount_point}/entity. Produces: 200 application/json
:param entity_id: ID of the entity. If set, updates the corresponding existing entity.
:type entity_id: str | unicode
:param name: Name of the entity.
:type name: str | unicode
:param metadata: Metadata to be associated with the entity.
:type metadata: dict
:param policies: Policies to be tied to the entity.
:type policies: str | unicode
:param disabled: Whether the entity is disabled. Disabled entities' associated tokens cannot be used, but are
not revoked.
:type disabled: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response for creates, the generic response object for updates, of the request.
:rtype: dict | requests.Response
"""
if metadata is None:
metadata = {}
if not isinstance(metadata, dict):
error_msg = 'unsupported metadata argument provided "{arg}" ({arg_type}), required type: dict"'
raise exceptions.ParamValidationError(error_msg.format(
arg=metadata,
arg_type=type(metadata),
))
params = {
'name': name,
'metadata': metadata,
'policies': policies,
'disabled': disabled,
}
if entity_id is not None:
params['id'] = entity_id
api_path = '/v1/{mount_point}/entity'.format(mount_point=mount_point)
response = self._adapter.post(
url=api_path,
json=params,
)
if response.status_code == 204:
return response
else:
return response.json()
|
python
|
{
"resource": ""
}
|
q22581
|
Identity.read_entity
|
train
|
def read_entity(self, entity_id, mount_point=DEFAULT_MOUNT_POINT):
"""Query an entity by its identifier.
Supported methods:
GET: /auth/{mount_point}/entity/id/{id}. Produces: 200 application/json
:param entity_id: Identifier of the entity.
:type entity_id: str
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/{mount_point}/entity/id/{id}'.format(
mount_point=mount_point,
id=entity_id,
)
response = self._adapter.get(url=api_path)
return response.json()
|
python
|
{
"resource": ""
}
|
q22582
|
Identity.delete_entity
|
train
|
def delete_entity(self, entity_id, mount_point=DEFAULT_MOUNT_POINT):
"""Delete an entity and all its associated aliases.
Supported methods:
DELETE: /{mount_point}/entity/id/:id. Produces: 204 (empty body)
:param entity_id: Identifier of the entity.
:type entity_id: str
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = '/v1/{mount_point}/entity/id/{id}'.format(
mount_point=mount_point,
id=entity_id,
)
return self._adapter.delete(
url=api_path,
)
|
python
|
{
"resource": ""
}
|
q22583
|
Identity.merge_entities
|
train
|
def merge_entities(self, from_entity_ids, to_entity_id, force=False, mount_point=DEFAULT_MOUNT_POINT):
"""Merge many entities into one entity.
Supported methods:
POST: /{mount_point}/entity/merge. Produces: 204 (empty body)
:param from_entity_ids: Entity IDs which needs to get merged.
:type from_entity_ids: array
:param to_entity_id: Entity ID into which all the other entities need to get merged.
:type to_entity_id: str | unicode
:param force: Setting this will follow the 'mine' strategy for merging MFA secrets. If there are secrets of the
same type both in entities that are merged from and in entity into which all others are getting merged,
secrets in the destination will be unaltered. If not set, this API will throw an error containing all the
conflicts.
:type force: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'from_entity_ids': from_entity_ids,
'to_entity_id': to_entity_id,
'force': force,
}
api_path = '/v1/{mount_point}/entity/merge'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22584
|
Identity.read_entity_alias
|
train
|
def read_entity_alias(self, alias_id, mount_point=DEFAULT_MOUNT_POINT):
"""Query the entity alias by its identifier.
Supported methods:
GET: /{mount_point}/entity-alias/id/{id}. Produces: 200 application/json
:param alias_id: Identifier of entity alias.
:type alias_id: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/{mount_point}/entity-alias/id/{id}'.format(
mount_point=mount_point,
id=alias_id,
)
response = self._adapter.get(
url=api_path,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22585
|
Identity.update_entity_alias
|
train
|
def update_entity_alias(self, alias_id, name, canonical_id, mount_accessor, mount_point=DEFAULT_MOUNT_POINT):
"""Update an existing entity alias.
Supported methods:
POST: /{mount_point}/entity-alias/id/{id}. Produces: 200 application/json
:param alias_id: Identifier of the entity alias.
:type alias_id: str | unicode
:param name: Name of the alias. Name should be the identifier of the client in the authentication source. For
example, if the alias belongs to userpass backend, the name should be a valid username within userpass
backend. If alias belongs to GitHub, it should be the GitHub username.
:type name: str | unicode
:param canonical_id: Entity ID to which this alias belongs to.
:type canonical_id: str | unicode
:param mount_accessor: Accessor of the mount to which the alias should belong to.
:type mount_accessor: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response where available, otherwise the generic response object, of the request.
:rtype: dict | requests.Response
"""
params = {
'name': name,
'canonical_id': canonical_id,
'mount_accessor': mount_accessor,
}
api_path = '/v1/{mount_point}/entity-alias/id/{id}'.format(
mount_point=mount_point,
id=alias_id,
)
response = self._adapter.post(
url=api_path,
json=params,
)
if response.status_code == 204:
return response
else:
return response.json()
|
python
|
{
"resource": ""
}
|
q22586
|
Identity.list_entity_aliases
|
train
|
def list_entity_aliases(self, method='LIST', mount_point=DEFAULT_MOUNT_POINT):
"""List available entity aliases by their identifiers.
:param method: Supported methods:
LIST: /{mount_point}/entity-alias/id. Produces: 200 application/json
GET: /{mount_point}/entity-alias/id?list=true. Produces: 200 application/json
:type method: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The the JSON response of the request.
:rtype: dict
"""
if method == 'LIST':
api_path = '/v1/{mount_point}/entity-alias/id'.format(mount_point=mount_point)
response = self._adapter.list(
url=api_path,
)
elif method == 'GET':
api_path = '/v1/{mount_point}/entity-alias/id?list=true'.format(mount_point=mount_point)
response = self._adapter.get(
url=api_path,
)
else:
error_message = '"method" parameter provided invalid value; LIST or GET allowed, "{method}" provided'.format(method=method)
raise exceptions.ParamValidationError(error_message)
return response.json()
|
python
|
{
"resource": ""
}
|
q22587
|
Identity.delete_entity_alias
|
train
|
def delete_entity_alias(self, alias_id, mount_point=DEFAULT_MOUNT_POINT):
"""Delete a entity alias.
Supported methods:
DELETE: /{mount_point}/entity-alias/id/{alias_id}. Produces: 204 (empty body)
:param alias_id: Identifier of the entity.
:type alias_id: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = '/v1/{mount_point}/entity-alias/id/{id}'.format(
mount_point=mount_point,
id=alias_id,
)
return self._adapter.delete(
url=api_path,
)
|
python
|
{
"resource": ""
}
|
q22588
|
Identity.read_group
|
train
|
def read_group(self, group_id, mount_point=DEFAULT_MOUNT_POINT):
"""Query the group by its identifier.
Supported methods:
GET: /{mount_point}/group/id/{id}. Produces: 200 application/json
:param group_id: Identifier of the group.
:type group_id: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
api_path = '/v1/{mount_point}/group/id/{id}'.format(
mount_point=mount_point,
id=group_id,
)
response = self._adapter.get(
url=api_path,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22589
|
Identity.create_or_update_group_by_name
|
train
|
def create_or_update_group_by_name(self, name, group_type="internal", metadata=None, policies=None, member_group_ids=None,
member_entity_ids=None, mount_point=DEFAULT_MOUNT_POINT):
"""Create or update a group by its name.
Supported methods:
POST: /{mount_point}/group/name/{name}. Produces: 200 application/json
:param name: Name of the group.
:type name: str | unicode
:param group_type: Type of the group, internal or external. Defaults to internal.
:type group_type: str | unicode
:param metadata: Metadata to be associated with the group.
:type metadata: dict
:param policies: Policies to be tied to the group.
:type policies: str | unicode
:param member_group_ids: Group IDs to be assigned as group members.
:type member_group_ids: str | unicode
:param member_entity_ids: Entity IDs to be assigned as group members.
:type member_entity_ids: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if metadata is None:
metadata = {}
if not isinstance(metadata, dict):
error_msg = 'unsupported metadata argument provided "{arg}" ({arg_type}), required type: dict"'
raise exceptions.ParamValidationError(error_msg.format(
arg=metadata,
arg_type=type(metadata),
))
if group_type not in ALLOWED_GROUP_TYPES:
error_msg = 'unsupported group_type argument provided "{arg}", allowed values: ({allowed_values})'
raise exceptions.ParamValidationError(error_msg.format(
arg=group_type,
allowed_values=ALLOWED_GROUP_TYPES,
))
params = {
'type': group_type,
'metadata': metadata,
'policies': policies,
'member_group_ids': member_group_ids,
'member_entity_ids': member_entity_ids,
}
api_path = '/v1/{mount_point}/group/name/{name}'.format(
mount_point=mount_point,
name=name,
)
response = self._adapter.post(
url=api_path,
json=params,
)
return response
|
python
|
{
"resource": ""
}
|
q22590
|
Identity.create_or_update_group_alias
|
train
|
def create_or_update_group_alias(self, name, alias_id=None, mount_accessor=None, canonical_id=None, mount_point=DEFAULT_MOUNT_POINT):
"""Creates or update a group alias.
Supported methods:
POST: /{mount_point}/group-alias. Produces: 200 application/json
:param alias_id: ID of the group alias. If set, updates the corresponding existing group alias.
:type alias_id: str | unicode
:param name: Name of the group alias.
:type name: str | unicode
:param mount_accessor: Mount accessor to which this alias belongs to
:type mount_accessor: str | unicode
:param canonical_id: ID of the group to which this is an alias.
:type canonical_id: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
params = {
'name': name,
'mount_accessor': mount_accessor,
'canonical_id': canonical_id,
}
if alias_id is not None:
params['id'] = alias_id
api_path = '/v1/{mount_point}/group-alias'.format(mount_point=mount_point)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22591
|
Identity.update_group_alias
|
train
|
def update_group_alias(self, entity_id, name, mount_accessor="", canonical_id="", mount_point=DEFAULT_MOUNT_POINT):
"""Update an existing group alias.
Supported methods:
POST: /{mount_point}/group-alias/id/{id}. Produces: 200 application/json
:param entity_id: ID of the group alias.
:type entity_id: str | unicode
:param name: Name of the group alias.
:type name: str | unicode
:param mount_accessor: Mount accessor to which this alias belongs
toMount accessor to which this alias belongs to.
:type mount_accessor: str | unicode
:param canonical_id: ID of the group to which this is an alias.
:type canonical_id: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'name': name,
'mount_accessor': mount_accessor,
'canonical_id': canonical_id,
}
api_path = '/v1/{mount_point}/group-alias/id/{id}'.format(
mount_point=mount_point,
id=entity_id,
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22592
|
Identity.lookup_entity
|
train
|
def lookup_entity(self, name=None, entity_id=None, alias_id=None, alias_name=None, alias_mount_accessor=None, mount_point=DEFAULT_MOUNT_POINT):
"""Query an entity based on the given criteria.
The criteria can be name, id, alias_id, or a combination of alias_name and alias_mount_accessor.
Supported methods:
POST: /{mount_point}/lookup/entity. Produces: 200 application/json
:param name: Name of the entity.
:type name: str | unicode
:param entity_id: ID of the entity.
:type entity_id: str | unicode
:param alias_id: ID of the alias.
:type alias_id: str | unicode
:param alias_name: Name of the alias. This should be supplied in conjunction with alias_mount_accessor.
:type alias_name: str | unicode
:param alias_mount_accessor: Accessor of the mount to which the alias belongs to. This should be supplied in conjunction with alias_name.
:type alias_mount_accessor: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request if a entity / entity alias is found in the lookup, None otherwise.
:rtype: dict | None
"""
params = {}
if name is not None:
params['name'] = name
elif entity_id is not None:
params['id'] = entity_id
elif alias_id is not None:
params['alias_id'] = alias_id
elif alias_name is not None and alias_mount_accessor is not None:
params['alias_name'] = alias_name
params['alias_mount_accessor'] = alias_mount_accessor
api_path = '/v1/{mount_point}/lookup/entity'.format(mount_point=mount_point)
response = self._adapter.post(
url=api_path,
json=params,
)
if response.status_code == 204:
logger.debug('Identity.lookup_entity: no entities found with params: {params}'.format(params=params))
return None
else:
return response.json()
|
python
|
{
"resource": ""
}
|
q22593
|
Transit.create_key
|
train
|
def create_key(self, name, convergent_encryption=False, derived=False, exportable=False, allow_plaintext_backup=False,
key_type="aes256-gcm96", mount_point=DEFAULT_MOUNT_POINT):
"""Create a new named encryption key of the specified type.
The values set here cannot be changed after key creation.
Supported methods:
POST: /{mount_point}/keys/{name}. Produces: 204 (empty body)
:param name: Specifies the name of the encryption key to create. This is specified as part of the URL.
:type name: str | unicode
:param convergent_encryption: If enabled, the key will support convergent encryption, where the same plaintext
creates the same ciphertext. This requires derived to be set to true. When enabled, each
encryption(/decryption/rewrap/datakey) operation will derive a nonce value rather than randomly generate it.
:type convergent_encryption: bool
:param derived: Specifies if key derivation is to be used. If enabled, all encrypt/decrypt requests to this
named key must provide a context which is used for key derivation.
:type derived: bool
:param exportable: Enables keys to be exportable. This allows for all the valid keys in the key ring to be
exported. Once set, this cannot be disabled.
:type exportable: bool
:param allow_plaintext_backup: If set, enables taking backup of named key in the plaintext format. Once set,
this cannot be disabled.
:type allow_plaintext_backup: bool
:param key_type: Specifies the type of key to create. The currently-supported types are:
* **aes256-gcm96**: AES-256 wrapped with GCM using a 96-bit nonce size AEAD
* **chacha20-poly1305**: ChaCha20-Poly1305 AEAD (symmetric, supports derivation and convergent encryption)
* **ed25519**: ED25519 (asymmetric, supports derivation).
* **ecdsa-p256**: ECDSA using the P-256 elliptic curve (asymmetric)
* **rsa-2048**: RSA with bit size of 2048 (asymmetric)
* **rsa-4096**: RSA with bit size of 4096 (asymmetric)
:type key_type: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if convergent_encryption and not derived:
raise exceptions.ParamValidationError('derived must be set to True when convergent_encryption is True')
if key_type not in transit_constants.ALLOWED_KEY_TYPES:
error_msg = 'invalid key_type argument provided "{arg}", supported types: "{allowed_types}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=key_type,
allowed_types=', '.join(transit_constants.ALLOWED_KEY_TYPES),
))
params = {
'convergent_encryption': convergent_encryption,
'derived': derived,
'exportable': exportable,
'allow_plaintext_backup': allow_plaintext_backup,
'type': key_type,
}
api_path = '/v1/{mount_point}/keys/{name}'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22594
|
Transit.update_key_configuration
|
train
|
def update_key_configuration(self, name, min_decryption_version=0, min_encryption_version=0, deletion_allowed=False,
exportable=False, allow_plaintext_backup=False, mount_point=DEFAULT_MOUNT_POINT):
"""Tune configuration values for a given key.
These values are returned during a read operation on the named key.
Supported methods:
POST: /{mount_point}/keys/{name}/config. Produces: 204 (empty body)
:param name: Specifies the name of the encryption key to update configuration for.
:type name: str | unicode
:param min_decryption_version: Specifies the minimum version of ciphertext allowed to be decrypted. Adjusting
this as part of a key rotation policy can prevent old copies of ciphertext from being decrypted, should they
fall into the wrong hands. For signatures, this value controls the minimum version of signature that can be
verified against. For HMACs, this controls the minimum version of a key allowed to be used as the key for
verification.
:type min_decryption_version: int
:param min_encryption_version: Specifies the minimum version of the key that can be used to encrypt plaintext,
sign payloads, or generate HMACs. Must be 0 (which will use the latest version) or a value greater or equal
to min_decryption_version.
:type min_encryption_version: int
:param deletion_allowed: Specifies if the key is allowed to be deleted.
:type deletion_allowed: bool
:param exportable: Enables keys to be exportable. This allows for all the valid keys in the key ring to be
exported. Once set, this cannot be disabled.
:type exportable: bool
:param allow_plaintext_backup: If set, enables taking backup of named key in the plaintext format. Once set,
this cannot be disabled.
:type allow_plaintext_backup: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if min_encryption_version != 0 and min_encryption_version <= min_decryption_version:
raise exceptions.ParamValidationError('min_encryption_version must be 0 or > min_decryption_version')
params = {
'min_decryption_version': min_decryption_version,
'min_encryption_version': min_encryption_version,
'deletion_allowed': deletion_allowed,
'exportable': exportable,
'allow_plaintext_backup': allow_plaintext_backup,
}
api_path = '/v1/{mount_point}/keys/{name}/config'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
)
|
python
|
{
"resource": ""
}
|
q22595
|
Transit.rotate_key
|
train
|
def rotate_key(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""Rotate the version of the named key.
After rotation, new plaintext requests will be encrypted with the new version of the key. To upgrade ciphertext
to be encrypted with the latest version of the key, use the rewrap endpoint. This is only supported with keys
that support encryption and decryption operations.
Supported methods:
POST: /{mount_point}/keys/{name}/rotate. Produces: 204 (empty body)
:param name: Specifies the name of the key to read information about. This is specified as part of the URL.
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = '/v1/{mount_point}/keys/{name}/rotate'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
)
|
python
|
{
"resource": ""
}
|
q22596
|
Transit.export_key
|
train
|
def export_key(self, name, key_type, version=None, mount_point=DEFAULT_MOUNT_POINT):
"""Return the named key.
The keys object shows the value of the key for each version. If version is specified, the specific version will
be returned. If latest is provided as the version, the current key will be provided. Depending on the type of
key, different information may be returned. The key must be exportable to support this operation and the version
must still be valid.
Supported methods:
GET: /{mount_point}/export/{key_type}/{name}(/{version}). Produces: 200 application/json
:param name: Specifies the name of the key to read information about. This is specified as part of the URL.
:type name: str | unicode
:param key_type: Specifies the type of the key to export. This is specified as part of the URL. Valid values are:
encryption-key
signing-key
hmac-key
:type key_type: str | unicode
:param version: Specifies the version of the key to read. If omitted, all versions of the key will be returned.
If the version is set to latest, the current key will be returned.
:type version: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
if key_type not in transit_constants.ALLOWED_EXPORT_KEY_TYPES:
error_msg = 'invalid key_type argument provided "{arg}", supported types: "{allowed_types}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=key_type,
allowed_types=', '.join(transit_constants.ALLOWED_EXPORT_KEY_TYPES),
))
api_path = '/v1/{mount_point}/export/{key_type}/{name}'.format(
mount_point=mount_point,
key_type=key_type,
name=name,
)
if version is not None:
api_path = self._adapter.urljoin(api_path, version)
response = self._adapter.get(
url=api_path,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22597
|
Transit.encrypt_data
|
train
|
def encrypt_data(self, name, plaintext, context="", key_version=0, nonce=None, batch_input=None, type="aes256-gcm96",
convergent_encryption="", mount_point=DEFAULT_MOUNT_POINT):
"""Encrypt the provided plaintext using the named key.
This path supports the create and update policy capabilities as follows: if the user has the create capability
for this endpoint in their policies, and the key does not exist, it will be upserted with default values
(whether the key requires derivation depends on whether the context parameter is empty or not). If the user only
has update capability and the key does not exist, an error will be returned.
Supported methods:
POST: /{mount_point}/encrypt/{name}. Produces: 200 application/json
:param name: Specifies the name of the encryption key to encrypt against. This is specified as part of the URL.
:type name: str | unicode
:param plaintext: Specifies base64 encoded plaintext to be encoded.
:type plaintext: str | unicode
:param context: Specifies the base64 encoded context for key derivation. This is required if key derivation is
enabled for this key.
:type context: str | unicode
:param key_version: Specifies the version of the key to use for encryption. If not set, uses the latest version.
Must be greater than or equal to the key's min_encryption_version, if set.
:type key_version: int
:param nonce: Specifies the base64 encoded nonce value. This must be provided if convergent encryption is
enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+.
The value must be exactly 96 bits (12 bytes) long and the user must ensure that for any given context (and
thus, any given encryption key) this nonce value is never reused.
:type nonce: str | unicode
:param batch_input: Specifies a list of items to be encrypted in a single batch. When this parameter is set, if
the parameters 'plaintext', 'context' and 'nonce' are also set, they will be ignored. The format for the
input is: [dict(context="b64_context", plaintext="b64_plaintext"), ...]
:type batch_input: List[dict]
:param type: This parameter is required when encryption key is expected to be created. When performing an
upsert operation, the type of key to create.
:type type: str | unicode
:param convergent_encryption: This parameter will only be used when a key is expected to be created. Whether to
support convergent encryption. This is only supported when using a key with key derivation enabled and will
require all requests to carry both a context and 96-bit (12-byte) nonce. The given nonce will be used in
place of a randomly generated nonce. As a result, when the same context and nonce are supplied, the same
ciphertext is generated. It is very important when using this mode that you ensure that all nonces are
unique for a given context. Failing to do so will severely impact the ciphertext's security.
:type convergent_encryption: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
params = {
'plaintext': plaintext,
'context': context,
'key_version': key_version,
'nonce': nonce,
'batch_input': batch_input,
'type': type,
'convergent_encryption': convergent_encryption,
}
api_path = '/v1/{mount_point}/encrypt/{name}'.format(
mount_point=mount_point,
name=name,
)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22598
|
Transit.decrypt_data
|
train
|
def decrypt_data(self, name, ciphertext, context="", nonce="", batch_input=None, mount_point=DEFAULT_MOUNT_POINT):
"""Decrypt the provided ciphertext using the named key.
Supported methods:
POST: /{mount_point}/decrypt/{name}. Produces: 200 application/json
:param name: Specifies the name of the encryption key to decrypt against. This is specified as part of the URL.
:type name: str | unicode
:param ciphertext: the ciphertext to decrypt.
:type ciphertext: str | unicode
:param context: Specifies the base64 encoded context for key derivation. This is required if key derivation is
enabled.
:type context: str | unicode
:param nonce: Specifies a base64 encoded nonce value used during encryption. Must be provided if convergent
encryption is enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created
in 0.6.2+.
:type nonce: str | unicode
:param batch_input: Specifies a list of items to be decrypted in a single batch. When this parameter is set, if
the parameters 'ciphertext', 'context' and 'nonce' are also set, they will be ignored. Format for the input
goes like this: [dict(context="b64_context", ciphertext="b64_plaintext"), ...]
:type batch_input: List[dict]
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
params = {
'ciphertext': ciphertext,
'context': context,
'nonce': nonce,
'batch_input': batch_input,
}
api_path = '/v1/{mount_point}/decrypt/{name}'.format(
mount_point=mount_point,
name=name,
)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json()
|
python
|
{
"resource": ""
}
|
q22599
|
Transit.generate_data_key
|
train
|
def generate_data_key(self, name, key_type, context="", nonce="", bits=256, mount_point=DEFAULT_MOUNT_POINT):
"""Generates a new high-entropy key and the value encrypted with the named key.
Optionally return the plaintext of the key as well. Whether plaintext is returned depends on the path; as a
result, you can use Vault ACL policies to control whether a user is allowed to retrieve the plaintext value of a
key. This is useful if you want an untrusted user or operation to generate keys that are then made available to
trusted users.
Supported methods:
POST: /{mount_point}/datakey/{key_type}/{name}. Produces: 200 application/json
:param name: Specifies the name of the encryption key to use to encrypt the datakey. This is specified as part
of the URL.
:type name: str | unicode
:param key_type: Specifies the type of key to generate. If plaintext, the plaintext key will be returned along
with the ciphertext. If wrapped, only the ciphertext value will be returned. This is specified as part of
the URL.
:type key_type: str | unicode
:param context: Specifies the key derivation context, provided as a base64-encoded string. This must be provided
if derivation is enabled.
:type context: str | unicode
:param nonce: Specifies a nonce value, provided as base64 encoded. Must be provided if convergent encryption is
enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+.
The value must be exactly 96 bits (12 bytes) long and the user must ensure that for any given context (and
thus, any given encryption key) this nonce value is never reused.
:type nonce: str | unicode
:param bits: Specifies the number of bits in the desired key. Can be 128, 256, or 512.
:type bits: int
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
if key_type not in transit_constants.ALLOWED_DATA_KEY_TYPES:
error_msg = 'invalid key_type argument provided "{arg}", supported types: "{allowed_types}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=key_type,
allowed_types=', '.join(transit_constants.ALLOWED_DATA_KEY_TYPES),
))
if bits not in transit_constants.ALLOWED_DATA_KEY_BITS:
error_msg = 'invalid bits argument provided "{arg}", supported values: "{allowed_values}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=bits,
allowed_values=', '.join([str(b) for b in transit_constants.ALLOWED_DATA_KEY_BITS]),
))
params = {
'context': context,
'nonce': nonce,
'bits': bits,
}
api_path = '/v1/{mount_point}/datakey/{key_type}/{name}'.format(
mount_point=mount_point,
key_type=key_type,
name=name,
)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json()
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.