_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q22400
_zlib_no_compress
train
def _zlib_no_compress(data): """Compress data with zlib level 0.""" cobj = zlib.compressobj(0) return b"".join([cobj.compress(data), cobj.flush()])
python
{ "resource": "" }
q22401
_parse_codec_options
train
def _parse_codec_options(options): """Parse BSON codec options.""" return CodecOptions( document_class=options.get( 'document_class', DEFAULT_CODEC_OPTIONS.document_class), tz_aware=options.get( 'tz_aware', DEFAULT_CODEC_OPTIONS.tz_aware), uuid_representation=options.get( 'uuidrepresentation', DEFAULT_CODEC_OPTIONS.uuid_representation), unicode_decode_error_handler=options.get( 'unicode_decode_error_handler', DEFAULT_CODEC_OPTIONS.unicode_decode_error_handler), tzinfo=options.get('tzinfo', DEFAULT_CODEC_OPTIONS.tzinfo), type_registry=options.get( 'type_registry', DEFAULT_CODEC_OPTIONS.type_registry))
python
{ "resource": "" }
q22402
CodecOptions._arguments_repr
train
def _arguments_repr(self): """Representation of the arguments used to create this object.""" document_class_repr = ( 'dict' if self.document_class is dict else repr(self.document_class)) uuid_rep_repr = UUID_REPRESENTATION_NAMES.get(self.uuid_representation, self.uuid_representation) return ('document_class=%s, tz_aware=%r, uuid_representation=%s, ' 'unicode_decode_error_handler=%r, tzinfo=%r, ' 'type_registry=%r' % (document_class_repr, self.tz_aware, uuid_rep_repr, self.unicode_decode_error_handler, self.tzinfo, self.type_registry))
python
{ "resource": "" }
q22403
_merge_command
train
def _merge_command(run, full_result, offset, result): """Merge a write command result into the full bulk result. """ affected = result.get("n", 0) if run.op_type == _INSERT: full_result["nInserted"] += affected elif run.op_type == _DELETE: full_result["nRemoved"] += affected elif run.op_type == _UPDATE: upserted = result.get("upserted") if upserted: n_upserted = len(upserted) for doc in upserted: doc["index"] = run.index(doc["index"] + offset) full_result["upserted"].extend(upserted) full_result["nUpserted"] += n_upserted full_result["nMatched"] += (affected - n_upserted) else: full_result["nMatched"] += affected full_result["nModified"] += result["nModified"] write_errors = result.get("writeErrors") if write_errors: for doc in write_errors: # Leave the server response intact for APM. replacement = doc.copy() idx = doc["index"] + offset replacement["index"] = run.index(idx) # Add the failed operation to the error document. replacement[_UOP] = run.ops[idx] full_result["writeErrors"].append(replacement) wc_error = result.get("writeConcernError") if wc_error: full_result["writeConcernErrors"].append(wc_error)
python
{ "resource": "" }
q22404
_raise_bulk_write_error
train
def _raise_bulk_write_error(full_result): """Raise a BulkWriteError from the full bulk api result. """ if full_result["writeErrors"]: full_result["writeErrors"].sort( key=lambda error: error["index"]) raise BulkWriteError(full_result)
python
{ "resource": "" }
q22405
_Bulk.add_update
train
def add_update(self, selector, update, multi=False, upsert=False, collation=None, array_filters=None): """Create an update document and add it to the list of ops. """ validate_ok_for_update(update) cmd = SON([('q', selector), ('u', update), ('multi', multi), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation if array_filters is not None: self.uses_array_filters = True cmd['arrayFilters'] = array_filters if multi: # A bulk_write containing an update_many is not retryable. self.is_retryable = False self.ops.append((_UPDATE, cmd))
python
{ "resource": "" }
q22406
_Bulk.execute_insert_no_results
train
def execute_insert_no_results(self, sock_info, run, op_id, acknowledged): """Execute insert, returning no results. """ command = SON([('insert', self.collection.name), ('ordered', self.ordered)]) concern = {'w': int(self.ordered)} command['writeConcern'] = concern if self.bypass_doc_val and sock_info.max_wire_version >= 4: command['bypassDocumentValidation'] = True db = self.collection.database bwc = _BulkWriteContext( db.name, command, sock_info, op_id, db.client._event_listeners, session=None) # Legacy batched OP_INSERT. _do_batched_insert( self.collection.full_name, run.ops, True, acknowledged, concern, not self.ordered, self.collection.codec_options, bwc)
python
{ "resource": "" }
q22407
_Bulk.execute_op_msg_no_results
train
def execute_op_msg_no_results(self, sock_info, generator): """Execute write commands with OP_MSG and w=0 writeConcern, unordered. """ db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners op_id = _randint() if not self.current_run: self.current_run = next(generator) run = self.current_run while run: cmd = SON([(_COMMANDS[run.op_type], self.collection.name), ('ordered', False), ('writeConcern', {'w': 0})]) bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners, None) while run.idx_offset < len(run.ops): check_keys = run.op_type == _INSERT ops = islice(run.ops, run.idx_offset, None) # Run as many ops as possible. request_id, msg, to_send = _do_bulk_write_command( self.namespace, run.op_type, cmd, ops, check_keys, self.collection.codec_options, bwc) if not to_send: raise InvalidOperation("cannot do an empty bulk write") run.idx_offset += len(to_send) # Though this isn't strictly a "legacy" write, the helper # handles publishing commands and sending our message # without receiving a result. Send 0 for max_doc_size # to disable size checking. Size checking is handled while # the documents are encoded to BSON. bwc.legacy_write(request_id, msg, 0, False, to_send) self.current_run = run = next(generator, None)
python
{ "resource": "" }
q22408
_Bulk.execute_command_no_results
train
def execute_command_no_results(self, sock_info, generator): """Execute write commands with OP_MSG and w=0 WriteConcern, ordered. """ full_result = { "writeErrors": [], "writeConcernErrors": [], "nInserted": 0, "nUpserted": 0, "nMatched": 0, "nModified": 0, "nRemoved": 0, "upserted": [], } # Ordered bulk writes have to be acknowledged so that we stop # processing at the first error, even when the application # specified unacknowledged writeConcern. write_concern = WriteConcern() op_id = _randint() try: self._execute_command( generator, write_concern, None, sock_info, op_id, False, full_result) except OperationFailure: pass
python
{ "resource": "" }
q22409
Server.run_operation_with_response
train
def run_operation_with_response( self, sock_info, operation, set_slave_okay, listeners, exhaust, unpack_res): """Run a _Query or _GetMore operation and return a Response object. This method is used only to run _Query/_GetMore operations from cursors. Can raise ConnectionFailure, OperationFailure, etc. :Parameters: - `operation`: A _Query or _GetMore object. - `set_slave_okay`: Pass to operation.get_message. - `all_credentials`: dict, maps auth source to MongoCredential. - `listeners`: Instance of _EventListeners or None. - `exhaust`: If True, then this is an exhaust cursor operation. - `unpack_res`: A callable that decodes the wire protocol response. """ duration = None publish = listeners.enabled_for_commands if publish: start = datetime.now() send_message = not operation.exhaust_mgr if send_message: use_cmd = operation.use_command(sock_info, exhaust) message = operation.get_message( set_slave_okay, sock_info, use_cmd) request_id, data, max_doc_size = self._split_message(message) else: use_cmd = False request_id = 0 if publish: cmd, dbn = operation.as_command(sock_info) listeners.publish_command_start( cmd, dbn, request_id, sock_info.address) start = datetime.now() try: if send_message: sock_info.send_message(data, max_doc_size) reply = sock_info.receive_message(request_id) else: reply = sock_info.receive_message(None) # Unpack and check for command errors. if use_cmd: user_fields = _CURSOR_DOC_FIELDS legacy_response = False else: user_fields = None legacy_response = True docs = unpack_res(reply, operation.cursor_id, operation.codec_options, legacy_response=legacy_response, user_fields=user_fields) if use_cmd: first = docs[0] operation.client._process_response( first, operation.session) _check_command_response(first) except Exception as exc: if publish: duration = datetime.now() - start if isinstance(exc, (NotMasterError, OperationFailure)): failure = exc.details else: failure = _convert_exception(exc) listeners.publish_command_failure( duration, failure, operation.name, request_id, sock_info.address) raise if publish: duration = datetime.now() - start # Must publish in find / getMore / explain command response # format. if use_cmd: res = docs[0] elif operation.name == "explain": res = docs[0] if docs else {} else: res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} if operation.name == "find": res["cursor"]["firstBatch"] = docs else: res["cursor"]["nextBatch"] = docs listeners.publish_command_success( duration, res, operation.name, request_id, sock_info.address) if exhaust: response = ExhaustResponse( data=reply, address=self._description.address, socket_info=sock_info, pool=self._pool, duration=duration, request_id=request_id, from_command=use_cmd, docs=docs) else: response = Response( data=reply, address=self._description.address, duration=duration, request_id=request_id, from_command=use_cmd, docs=docs) return response
python
{ "resource": "" }
q22410
ServerDescription.retryable_writes_supported
train
def retryable_writes_supported(self): """Checks if this server supports retryable writes.""" return ( self._ls_timeout_minutes is not None and self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary))
python
{ "resource": "" }
q22411
_compress
train
def _compress(operation, data, ctx): """Takes message data, compresses it, and adds an OP_COMPRESSED header.""" compressed = ctx.compress(data) request_id = _randint() header = _pack_compression_header( _COMPRESSION_HEADER_SIZE + len(compressed), # Total message length request_id, # Request id 0, # responseTo 2012, # operation id operation, # original operation id len(data), # uncompressed message length ctx.compressor_id) # compressor id return request_id, header + compressed
python
{ "resource": "" }
q22412
_insert
train
def _insert(collection_name, docs, check_keys, flags, opts): """Get an OP_INSERT message""" encode = _dict_to_bson # Make local. Uses extensions. if len(docs) == 1: encoded = encode(docs[0], check_keys, opts) return b"".join([ b"\x00\x00\x00\x00", # Flags don't matter for one doc. _make_c_string(collection_name), encoded]), len(encoded) encoded = [encode(doc, check_keys, opts) for doc in docs] if not encoded: raise InvalidOperation("cannot do an empty bulk insert") return b"".join([ _pack_int(flags), _make_c_string(collection_name), b"".join(encoded)]), max(map(len, encoded))
python
{ "resource": "" }
q22413
_insert_compressed
train
def _insert_compressed( collection_name, docs, check_keys, continue_on_error, opts, ctx): """Internal compressed unacknowledged insert message helper.""" op_insert, max_bson_size = _insert( collection_name, docs, check_keys, continue_on_error, opts) rid, msg = _compress(2002, op_insert, ctx) return rid, msg, max_bson_size
python
{ "resource": "" }
q22414
_insert_uncompressed
train
def _insert_uncompressed(collection_name, docs, check_keys, safe, last_error_args, continue_on_error, opts): """Internal insert message helper.""" op_insert, max_bson_size = _insert( collection_name, docs, check_keys, continue_on_error, opts) rid, msg = __pack_message(2002, op_insert) if safe: rid, gle, _ = __last_error(collection_name, last_error_args) return rid, msg + gle, max_bson_size return rid, msg, max_bson_size
python
{ "resource": "" }
q22415
_update
train
def _update(collection_name, upsert, multi, spec, doc, check_keys, opts): """Get an OP_UPDATE message.""" flags = 0 if upsert: flags += 1 if multi: flags += 2 encode = _dict_to_bson # Make local. Uses extensions. encoded_update = encode(doc, check_keys, opts) return b"".join([ _ZERO_32, _make_c_string(collection_name), _pack_int(flags), encode(spec, False, opts), encoded_update]), len(encoded_update)
python
{ "resource": "" }
q22416
_update_compressed
train
def _update_compressed( collection_name, upsert, multi, spec, doc, check_keys, opts, ctx): """Internal compressed unacknowledged update message helper.""" op_update, max_bson_size = _update( collection_name, upsert, multi, spec, doc, check_keys, opts) rid, msg = _compress(2001, op_update, ctx) return rid, msg, max_bson_size
python
{ "resource": "" }
q22417
_update_uncompressed
train
def _update_uncompressed(collection_name, upsert, multi, spec, doc, safe, last_error_args, check_keys, opts): """Internal update message helper.""" op_update, max_bson_size = _update( collection_name, upsert, multi, spec, doc, check_keys, opts) rid, msg = __pack_message(2001, op_update) if safe: rid, gle, _ = __last_error(collection_name, last_error_args) return rid, msg + gle, max_bson_size return rid, msg, max_bson_size
python
{ "resource": "" }
q22418
_op_msg_compressed
train
def _op_msg_compressed(flags, command, identifier, docs, check_keys, opts, ctx): """Internal OP_MSG message helper.""" msg, total_size, max_bson_size = _op_msg_no_header( flags, command, identifier, docs, check_keys, opts) rid, msg = _compress(2013, msg, ctx) return rid, msg, total_size, max_bson_size
python
{ "resource": "" }
q22419
_op_msg_uncompressed
train
def _op_msg_uncompressed(flags, command, identifier, docs, check_keys, opts): """Internal compressed OP_MSG message helper.""" data, total_size, max_bson_size = _op_msg_no_header( flags, command, identifier, docs, check_keys, opts) request_id, op_message = __pack_message(2013, data) return request_id, op_message, total_size, max_bson_size
python
{ "resource": "" }
q22420
_query
train
def _query(options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, check_keys): """Get an OP_QUERY message.""" encoded = _dict_to_bson(query, check_keys, opts) if field_selector: efs = _dict_to_bson(field_selector, False, opts) else: efs = b"" max_bson_size = max(len(encoded), len(efs)) return b"".join([ _pack_int(options), _make_c_string(collection_name), _pack_int(num_to_skip), _pack_int(num_to_return), encoded, efs]), max_bson_size
python
{ "resource": "" }
q22421
_query_compressed
train
def _query_compressed(options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, check_keys=False, ctx=None): """Internal compressed query message helper.""" op_query, max_bson_size = _query( options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, check_keys) rid, msg = _compress(2004, op_query, ctx) return rid, msg, max_bson_size
python
{ "resource": "" }
q22422
_query_uncompressed
train
def _query_uncompressed(options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, check_keys=False): """Internal query message helper.""" op_query, max_bson_size = _query( options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, check_keys) rid, msg = __pack_message(2004, op_query) return rid, msg, max_bson_size
python
{ "resource": "" }
q22423
_get_more
train
def _get_more(collection_name, num_to_return, cursor_id): """Get an OP_GET_MORE message.""" return b"".join([ _ZERO_32, _make_c_string(collection_name), _pack_int(num_to_return), _pack_long_long(cursor_id)])
python
{ "resource": "" }
q22424
_get_more_compressed
train
def _get_more_compressed(collection_name, num_to_return, cursor_id, ctx): """Internal compressed getMore message helper.""" return _compress( 2005, _get_more(collection_name, num_to_return, cursor_id), ctx)
python
{ "resource": "" }
q22425
_delete
train
def _delete(collection_name, spec, opts, flags): """Get an OP_DELETE message.""" encoded = _dict_to_bson(spec, False, opts) # Uses extensions. return b"".join([ _ZERO_32, _make_c_string(collection_name), _pack_int(flags), encoded]), len(encoded)
python
{ "resource": "" }
q22426
_delete_compressed
train
def _delete_compressed(collection_name, spec, opts, flags, ctx): """Internal compressed unacknowledged delete message helper.""" op_delete, max_bson_size = _delete(collection_name, spec, opts, flags) rid, msg = _compress(2006, op_delete, ctx) return rid, msg, max_bson_size
python
{ "resource": "" }
q22427
_delete_uncompressed
train
def _delete_uncompressed( collection_name, spec, safe, last_error_args, opts, flags=0): """Internal delete message helper.""" op_delete, max_bson_size = _delete(collection_name, spec, opts, flags) rid, msg = __pack_message(2006, op_delete) if safe: rid, gle, _ = __last_error(collection_name, last_error_args) return rid, msg + gle, max_bson_size return rid, msg, max_bson_size
python
{ "resource": "" }
q22428
_raise_document_too_large
train
def _raise_document_too_large(operation, doc_size, max_size): """Internal helper for raising DocumentTooLarge.""" if operation == "insert": raise DocumentTooLarge("BSON document too large (%d bytes)" " - the connected server supports" " BSON document sizes up to %d" " bytes." % (doc_size, max_size)) else: # There's nothing intelligent we can say # about size for update and delete raise DocumentTooLarge("%r command document too large" % (operation,))
python
{ "resource": "" }
q22429
_do_batched_insert
train
def _do_batched_insert(collection_name, docs, check_keys, safe, last_error_args, continue_on_error, opts, ctx): """Insert `docs` using multiple batches. """ def _insert_message(insert_message, send_safe): """Build the insert message with header and GLE. """ request_id, final_message = __pack_message(2002, insert_message) if send_safe: request_id, error_message, _ = __last_error(collection_name, last_error_args) final_message += error_message return request_id, final_message send_safe = safe or not continue_on_error last_error = None data = StringIO() data.write(struct.pack("<i", int(continue_on_error))) data.write(_make_c_string(collection_name)) message_length = begin_loc = data.tell() has_docs = False to_send = [] encode = _dict_to_bson # Make local compress = ctx.compress and not (safe or send_safe) for doc in docs: encoded = encode(doc, check_keys, opts) encoded_length = len(encoded) too_large = (encoded_length > ctx.max_bson_size) message_length += encoded_length if message_length < ctx.max_message_size and not too_large: data.write(encoded) to_send.append(doc) has_docs = True continue if has_docs: # We have enough data, send this message. try: if compress: rid, msg = None, data.getvalue() else: rid, msg = _insert_message(data.getvalue(), send_safe) ctx.legacy_bulk_insert( rid, msg, 0, send_safe, to_send, compress) # Exception type could be OperationFailure or a subtype # (e.g. DuplicateKeyError) except OperationFailure as exc: # Like it says, continue on error... if continue_on_error: # Store exception details to re-raise after the final batch. last_error = exc # With unacknowledged writes just return at the first error. elif not safe: return # With acknowledged writes raise immediately. else: raise if too_large: _raise_document_too_large( "insert", encoded_length, ctx.max_bson_size) message_length = begin_loc + encoded_length data.seek(begin_loc) data.truncate() data.write(encoded) to_send = [doc] if not has_docs: raise InvalidOperation("cannot do an empty bulk insert") if compress: request_id, msg = None, data.getvalue() else: request_id, msg = _insert_message(data.getvalue(), safe) ctx.legacy_bulk_insert(request_id, msg, 0, safe, to_send, compress) # Re-raise any exception stored due to continue_on_error if last_error is not None: raise last_error
python
{ "resource": "" }
q22430
_batched_op_msg_impl
train
def _batched_op_msg_impl( operation, command, docs, check_keys, ack, opts, ctx, buf): """Create a batched OP_MSG write.""" max_bson_size = ctx.max_bson_size max_write_batch_size = ctx.max_write_batch_size max_message_size = ctx.max_message_size flags = b"\x00\x00\x00\x00" if ack else b"\x02\x00\x00\x00" # Flags buf.write(flags) # Type 0 Section buf.write(b"\x00") buf.write(_dict_to_bson(command, False, opts)) # Type 1 Section buf.write(b"\x01") size_location = buf.tell() # Save space for size buf.write(b"\x00\x00\x00\x00") try: buf.write(_OP_MSG_MAP[operation]) except KeyError: raise InvalidOperation('Unknown command') if operation in (_UPDATE, _DELETE): check_keys = False to_send = [] idx = 0 for doc in docs: # Encode the current operation value = _dict_to_bson(doc, check_keys, opts) doc_length = len(value) new_message_size = buf.tell() + doc_length # Does first document exceed max_message_size? doc_too_large = (idx == 0 and (new_message_size > max_message_size)) # When OP_MSG is used unacknowleged we have to check # document size client side or applications won't be notified. # Otherwise we let the server deal with documents that are too large # since ordered=False causes those documents to be skipped instead of # halting the bulk write operation. unacked_doc_too_large = (not ack and (doc_length > max_bson_size)) if doc_too_large or unacked_doc_too_large: write_op = list(_FIELD_MAP.keys())[operation] _raise_document_too_large( write_op, len(value), max_bson_size) # We have enough data, return this batch. if new_message_size > max_message_size: break buf.write(value) to_send.append(doc) idx += 1 # We have enough documents, return this batch. if idx == max_write_batch_size: break # Write type 1 section size length = buf.tell() buf.seek(size_location) buf.write(_pack_int(length - size_location)) return to_send, length
python
{ "resource": "" }
q22431
_encode_batched_op_msg
train
def _encode_batched_op_msg( operation, command, docs, check_keys, ack, opts, ctx): """Encode the next batched insert, update, or delete operation as OP_MSG. """ buf = StringIO() to_send, _ = _batched_op_msg_impl( operation, command, docs, check_keys, ack, opts, ctx, buf) return buf.getvalue(), to_send
python
{ "resource": "" }
q22432
_batched_op_msg_compressed
train
def _batched_op_msg_compressed( operation, command, docs, check_keys, ack, opts, ctx): """Create the next batched insert, update, or delete operation with OP_MSG, compressed. """ data, to_send = _encode_batched_op_msg( operation, command, docs, check_keys, ack, opts, ctx) request_id, msg = _compress( 2013, data, ctx.sock_info.compression_context) return request_id, msg, to_send
python
{ "resource": "" }
q22433
_batched_op_msg
train
def _batched_op_msg( operation, command, docs, check_keys, ack, opts, ctx): """OP_MSG implementation entry point.""" buf = StringIO() # Save space for message length and request id buf.write(_ZERO_64) # responseTo, opCode buf.write(b"\x00\x00\x00\x00\xdd\x07\x00\x00") to_send, length = _batched_op_msg_impl( operation, command, docs, check_keys, ack, opts, ctx, buf) # Header - request id and message length buf.seek(4) request_id = _randint() buf.write(_pack_int(request_id)) buf.seek(0) buf.write(_pack_int(length)) return request_id, buf.getvalue(), to_send
python
{ "resource": "" }
q22434
_do_batched_op_msg
train
def _do_batched_op_msg( namespace, operation, command, docs, check_keys, opts, ctx): """Create the next batched insert, update, or delete operation using OP_MSG. """ command['$db'] = namespace.split('.', 1)[0] if 'writeConcern' in command: ack = bool(command['writeConcern'].get('w', 1)) else: ack = True if ctx.sock_info.compression_context: return _batched_op_msg_compressed( operation, command, docs, check_keys, ack, opts, ctx) return _batched_op_msg( operation, command, docs, check_keys, ack, opts, ctx)
python
{ "resource": "" }
q22435
_batched_write_command_compressed
train
def _batched_write_command_compressed( namespace, operation, command, docs, check_keys, opts, ctx): """Create the next batched insert, update, or delete command, compressed. """ data, to_send = _encode_batched_write_command( namespace, operation, command, docs, check_keys, opts, ctx) request_id, msg = _compress( 2004, data, ctx.sock_info.compression_context) return request_id, msg, to_send
python
{ "resource": "" }
q22436
_encode_batched_write_command
train
def _encode_batched_write_command( namespace, operation, command, docs, check_keys, opts, ctx): """Encode the next batched insert, update, or delete command. """ buf = StringIO() to_send, _ = _batched_write_command_impl( namespace, operation, command, docs, check_keys, opts, ctx, buf) return buf.getvalue(), to_send
python
{ "resource": "" }
q22437
_batched_write_command
train
def _batched_write_command( namespace, operation, command, docs, check_keys, opts, ctx): """Create the next batched insert, update, or delete command. """ buf = StringIO() # Save space for message length and request id buf.write(_ZERO_64) # responseTo, opCode buf.write(b"\x00\x00\x00\x00\xd4\x07\x00\x00") # Write OP_QUERY write command to_send, length = _batched_write_command_impl( namespace, operation, command, docs, check_keys, opts, ctx, buf) # Header - request id and message length buf.seek(4) request_id = _randint() buf.write(_pack_int(request_id)) buf.seek(0) buf.write(_pack_int(length)) return request_id, buf.getvalue(), to_send
python
{ "resource": "" }
q22438
_do_batched_write_command
train
def _do_batched_write_command( namespace, operation, command, docs, check_keys, opts, ctx): """Batched write commands entry point.""" if ctx.sock_info.compression_context: return _batched_write_command_compressed( namespace, operation, command, docs, check_keys, opts, ctx) return _batched_write_command( namespace, operation, command, docs, check_keys, opts, ctx)
python
{ "resource": "" }
q22439
_do_bulk_write_command
train
def _do_bulk_write_command( namespace, operation, command, docs, check_keys, opts, ctx): """Bulk write commands entry point.""" if ctx.sock_info.max_wire_version > 5: return _do_batched_op_msg( namespace, operation, command, docs, check_keys, opts, ctx) return _do_batched_write_command( namespace, operation, command, docs, check_keys, opts, ctx)
python
{ "resource": "" }
q22440
_batched_write_command_impl
train
def _batched_write_command_impl( namespace, operation, command, docs, check_keys, opts, ctx, buf): """Create a batched OP_QUERY write command.""" max_bson_size = ctx.max_bson_size max_write_batch_size = ctx.max_write_batch_size # Max BSON object size + 16k - 2 bytes for ending NUL bytes. # Server guarantees there is enough room: SERVER-10643. max_cmd_size = max_bson_size + _COMMAND_OVERHEAD # No options buf.write(_ZERO_32) # Namespace as C string buf.write(b(namespace)) buf.write(_ZERO_8) # Skip: 0, Limit: -1 buf.write(_SKIPLIM) # Where to write command document length command_start = buf.tell() buf.write(bson.BSON.encode(command)) # Start of payload buf.seek(-1, 2) # Work around some Jython weirdness. buf.truncate() try: buf.write(_OP_MAP[operation]) except KeyError: raise InvalidOperation('Unknown command') if operation in (_UPDATE, _DELETE): check_keys = False # Where to write list document length list_start = buf.tell() - 4 to_send = [] idx = 0 for doc in docs: # Encode the current operation key = b(str(idx)) value = bson.BSON.encode(doc, check_keys, opts) # Is there enough room to add this document? max_cmd_size accounts for # the two trailing null bytes. enough_data = (buf.tell() + len(key) + len(value)) >= max_cmd_size enough_documents = (idx >= max_write_batch_size) if enough_data or enough_documents: if not idx: write_op = list(_FIELD_MAP.keys())[operation] _raise_document_too_large( write_op, len(value), max_bson_size) break buf.write(_BSONOBJ) buf.write(key) buf.write(_ZERO_8) buf.write(value) to_send.append(doc) idx += 1 # Finalize the current OP_QUERY message. # Close list and command documents buf.write(_ZERO_16) # Write document lengths and request id length = buf.tell() buf.seek(list_start) buf.write(_pack_int(length - list_start - 1)) buf.seek(command_start) buf.write(_pack_int(length - command_start)) return to_send, length
python
{ "resource": "" }
q22441
_OpReply.raw_response
train
def raw_response(self, cursor_id=None): """Check the response header from the database, without decoding BSON. Check the response for errors and unpack. Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or OperationFailure. :Parameters: - `cursor_id` (optional): cursor_id we sent to get this response - used for raising an informative exception when we get cursor id not valid at server response. """ if self.flags & 1: # Shouldn't get this response if we aren't doing a getMore if cursor_id is None: raise ProtocolError("No cursor id for getMore operation") # Fake a getMore command response. OP_GET_MORE provides no # document. msg = "Cursor not found, cursor id: %d" % (cursor_id,) errobj = {"ok": 0, "errmsg": msg, "code": 43} raise CursorNotFound(msg, 43, errobj) elif self.flags & 2: error_object = bson.BSON(self.documents).decode() # Fake the ok field if it doesn't exist. error_object.setdefault("ok", 0) if error_object["$err"].startswith("not master"): raise NotMasterError(error_object["$err"], error_object) elif error_object.get("code") == 50: raise ExecutionTimeout(error_object.get("$err"), error_object.get("code"), error_object) raise OperationFailure("database error: %s" % error_object.get("$err"), error_object.get("code"), error_object) return [self.documents]
python
{ "resource": "" }
q22442
_OpReply.unpack
train
def unpack(cls, msg): """Construct an _OpReply from raw bytes.""" # PYTHON-945: ignore starting_from field. flags, cursor_id, _, number_returned = cls.UNPACK_FROM(msg) # Convert Python 3 memoryview to bytes. Note we should call # memoryview.tobytes() if we start using memoryview in Python 2.7. documents = bytes(msg[20:]) return cls(flags, cursor_id, number_returned, documents)
python
{ "resource": "" }
q22443
_OpMsg.unpack_response
train
def unpack_response(self, cursor_id=None, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, user_fields=None, legacy_response=False): """Unpack a OP_MSG command response. :Parameters: - `cursor_id` (optional): Ignored, for compatibility with _OpReply. - `codec_options` (optional): an instance of :class:`~bson.codec_options.CodecOptions` """ # If _OpMsg is in-use, this cannot be a legacy response. assert not legacy_response return bson._decode_all_selective( self.payload_document, codec_options, user_fields)
python
{ "resource": "" }
q22444
_OpMsg.unpack
train
def unpack(cls, msg): """Construct an _OpMsg from raw bytes.""" flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg) if flags != 0: raise ProtocolError("Unsupported OP_MSG flags (%r)" % (flags,)) if first_payload_type != 0: raise ProtocolError( "Unsupported OP_MSG payload type (%r)" % (first_payload_type,)) if len(msg) != first_payload_size + 5: raise ProtocolError("Unsupported OP_MSG reply: >1 section") # Convert Python 3 memoryview to bytes. Note we should call # memoryview.tobytes() if we start using memoryview in Python 2.7. payload_document = bytes(msg[5:]) return cls(flags, payload_document)
python
{ "resource": "" }
q22445
TopologyDescription.has_known_servers
train
def has_known_servers(self): """Whether there are any Servers of types besides Unknown.""" return any(s for s in self._server_descriptions.values() if s.is_server_type_known)
python
{ "resource": "" }
q22446
MongoClient._cache_index
train
def _cache_index(self, dbname, collection, index, cache_for): """Add an index to the index cache for ensure_index operations.""" now = datetime.datetime.utcnow() expire = datetime.timedelta(seconds=cache_for) + now with self.__index_cache_lock: if dbname not in self.__index_cache: self.__index_cache[dbname] = {} self.__index_cache[dbname][collection] = {} self.__index_cache[dbname][collection][index] = expire elif collection not in self.__index_cache[dbname]: self.__index_cache[dbname][collection] = {} self.__index_cache[dbname][collection][index] = expire else: self.__index_cache[dbname][collection][index] = expire
python
{ "resource": "" }
q22447
MongoClient.close
train
def close(self): """Cleanup client resources and disconnect from MongoDB. On MongoDB >= 3.6, end all server sessions created by this client by sending one or more endSessions commands. Close all sockets in the connection pools and stop the monitor threads. If this instance is used again it will be automatically re-opened and the threads restarted. .. versionchanged:: 3.6 End all server sessions created by this client. """ session_ids = self._topology.pop_all_sessions() if session_ids: self._end_sessions(session_ids) # Stop the periodic task thread and then run _process_periodic_tasks # to send pending killCursor requests before closing the topology. self._kill_cursors_executor.close() self._process_periodic_tasks() self._topology.close()
python
{ "resource": "" }
q22448
MongoClient._select_server
train
def _select_server(self, server_selector, session, address=None): """Select a server to run an operation on this client. :Parameters: - `server_selector`: The server selector to use if the session is not pinned and no address is given. - `session`: The ClientSession for the next operation, or None. May be pinned to a mongos server address. - `address` (optional): Address when sending a message to a specific server, used for getMore. """ try: topology = self._get_topology() address = address or (session and session._pinned_address) if address: # We're running a getMore or this session is pinned to a mongos. server = topology.select_server_by_address(address) if not server: raise AutoReconnect('server %s:%d no longer available' % address) else: server = topology.select_server(server_selector) # Pin this session to the selected server if it's performing a # sharded transaction. if server.description.mongos and (session and session._in_transaction): session._pin_mongos(server) return server except PyMongoError as exc: if session and exc.has_error_label("TransientTransactionError"): session._unpin_mongos() raise
python
{ "resource": "" }
q22449
MongoClient._reset_on_error
train
def _reset_on_error(self, server_address, session): """On "not master" or "node is recovering" errors reset the server according to the SDAM spec. Unpin the session on transient transaction errors. """ try: try: yield except PyMongoError as exc: if session and exc.has_error_label( "TransientTransactionError"): session._unpin_mongos() raise except NetworkTimeout: # The socket has been closed. Don't reset the server. # Server Discovery And Monitoring Spec: "When an application # operation fails because of any network error besides a socket # timeout...." raise except NotMasterError: # "When the client sees a "not master" error it MUST replace the # server's description with type Unknown. It MUST request an # immediate check of the server." self._reset_server_and_request_check(server_address) raise except ConnectionFailure: # "Client MUST replace the server's description with type Unknown # ... MUST NOT request an immediate check of the server." self.__reset_server(server_address) raise except OperationFailure as exc: if exc.code in helpers._RETRYABLE_ERROR_CODES: # Do not request an immediate check since the server is likely # shutting down. self.__reset_server(server_address) raise
python
{ "resource": "" }
q22450
MongoClient._retryable_write
train
def _retryable_write(self, retryable, func, session): """Internal retryable write helper.""" with self._tmp_session(session) as s: return self._retry_with_session(retryable, func, s, None)
python
{ "resource": "" }
q22451
MongoClient.close_cursor
train
def close_cursor(self, cursor_id, address=None): """DEPRECATED - Send a kill cursors message soon with the given id. Raises :class:`TypeError` if `cursor_id` is not an instance of ``(int, long)``. What closing the cursor actually means depends on this client's cursor manager. This method may be called from a :class:`~pymongo.cursor.Cursor` destructor during garbage collection, so it isn't safe to take a lock or do network I/O. Instead, we schedule the cursor to be closed soon on a background thread. :Parameters: - `cursor_id`: id of cursor to close - `address` (optional): (host, port) pair of the cursor's server. If it is not provided, the client attempts to close the cursor on the primary or standalone, or a mongos server. .. versionchanged:: 3.7 Deprecated. .. versionchanged:: 3.0 Added ``address`` parameter. """ warnings.warn( "close_cursor is deprecated.", DeprecationWarning, stacklevel=2) if not isinstance(cursor_id, integer_types): raise TypeError("cursor_id must be an instance of (int, long)") self._close_cursor(cursor_id, address)
python
{ "resource": "" }
q22452
MongoClient._kill_cursors
train
def _kill_cursors(self, cursor_ids, address, topology, session): """Send a kill cursors message with the given ids.""" listeners = self._event_listeners publish = listeners.enabled_for_commands if address: # address could be a tuple or _CursorAddress, but # select_server_by_address needs (host, port). server = topology.select_server_by_address(tuple(address)) else: # Application called close_cursor() with no address. server = topology.select_server(writable_server_selector) try: namespace = address.namespace db, coll = namespace.split('.', 1) except AttributeError: namespace = None db = coll = "OP_KILL_CURSORS" spec = SON([('killCursors', coll), ('cursors', cursor_ids)]) with server.get_socket(self.__all_credentials) as sock_info: if sock_info.max_wire_version >= 4 and namespace is not None: sock_info.command(db, spec, session=session, client=self) else: if publish: start = datetime.datetime.now() request_id, msg = message.kill_cursors(cursor_ids) if publish: duration = datetime.datetime.now() - start # Here and below, address could be a tuple or # _CursorAddress. We always want to publish a # tuple to match the rest of the monitoring # API. listeners.publish_command_start( spec, db, request_id, tuple(address)) start = datetime.datetime.now() try: sock_info.send_message(msg, 0) except Exception as exc: if publish: dur = ((datetime.datetime.now() - start) + duration) listeners.publish_command_failure( dur, message._convert_exception(exc), 'killCursors', request_id, tuple(address)) raise if publish: duration = ((datetime.datetime.now() - start) + duration) # OP_KILL_CURSORS returns no reply, fake one. reply = {'cursorsUnknown': cursor_ids, 'ok': 1} listeners.publish_command_success( duration, reply, 'killCursors', request_id, tuple(address))
python
{ "resource": "" }
q22453
MongoClient._process_periodic_tasks
train
def _process_periodic_tasks(self): """Process any pending kill cursors requests and maintain connection pool parameters.""" address_to_cursor_ids = defaultdict(list) # Other threads or the GC may append to the queue concurrently. while True: try: address, cursor_ids = self.__kill_cursors_queue.pop() except IndexError: break address_to_cursor_ids[address].extend(cursor_ids) # Don't re-open topology if it's closed and there's no pending cursors. if address_to_cursor_ids: topology = self._get_topology() for address, cursor_ids in address_to_cursor_ids.items(): try: self._kill_cursors( cursor_ids, address, topology, session=None) except Exception: helpers._handle_exception() try: self._topology.update_pool() except Exception: helpers._handle_exception()
python
{ "resource": "" }
q22454
MongoClient.start_session
train
def start_session(self, causal_consistency=True, default_transaction_options=None): """Start a logical session. This method takes the same parameters as :class:`~pymongo.client_session.SessionOptions`. See the :mod:`~pymongo.client_session` module for details and examples. Requires MongoDB 3.6. It is an error to call :meth:`start_session` if this client has been authenticated to multiple databases using the deprecated method :meth:`~pymongo.database.Database.authenticate`. A :class:`~pymongo.client_session.ClientSession` may only be used with the MongoClient that started it. :Returns: An instance of :class:`~pymongo.client_session.ClientSession`. .. versionadded:: 3.6 """ return self.__start_session( False, causal_consistency=causal_consistency, default_transaction_options=default_transaction_options)
python
{ "resource": "" }
q22455
MongoClient.server_info
train
def server_info(self, session=None): """Get information about the MongoDB server we're connected to. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. .. versionchanged:: 3.6 Added ``session`` parameter. """ return self.admin.command("buildinfo", read_preference=ReadPreference.PRIMARY, session=session)
python
{ "resource": "" }
q22456
MongoClient.list_databases
train
def list_databases(self, session=None, **kwargs): """Get a cursor over the databases of the connected server. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `**kwargs` (optional): Optional parameters of the `listDatabases command <https://docs.mongodb.com/manual/reference/command/listDatabases/>`_ can be passed as keyword arguments to this method. The supported options differ by server version. :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. .. versionadded:: 3.6 """ cmd = SON([("listDatabases", 1)]) cmd.update(kwargs) admin = self._database_default_options("admin") res = admin._retryable_read_command(cmd, session=session) # listDatabases doesn't return a cursor (yet). Fake one. cursor = { "id": 0, "firstBatch": res["databases"], "ns": "admin.$cmd", } return CommandCursor(admin["$cmd"], cursor, None)
python
{ "resource": "" }
q22457
MongoClient.list_database_names
train
def list_database_names(self, session=None): """Get a list of the names of all databases on the connected server. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. .. versionadded:: 3.6 """ return [doc["name"] for doc in self.list_databases(session, nameOnly=True)]
python
{ "resource": "" }
q22458
MongoClient.get_default_database
train
def get_default_database(self, default=None, codec_options=None, read_preference=None, write_concern=None, read_concern=None): """Get the database named in the MongoDB connection URI. >>> uri = 'mongodb://host/my_database' >>> client = MongoClient(uri) >>> db = client.get_default_database() >>> assert db.name == 'my_database' >>> db = client.get_database() >>> assert db.name == 'my_database' Useful in scripts where you want to choose which database to use based only on the URI in a configuration file. :Parameters: - `default` (optional): the database name to use if no database name was provided in the URI. - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) the :attr:`codec_options` of this :class:`MongoClient` is used. - `read_preference` (optional): The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` for options. - `write_concern` (optional): An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) the :attr:`write_concern` of this :class:`MongoClient` is used. - `read_concern` (optional): An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`MongoClient` is used. .. versionchanged:: 3.8 Undeprecated. Added the ``default``, ``codec_options``, ``read_preference``, ``write_concern`` and ``read_concern`` parameters. .. versionchanged:: 3.5 Deprecated, use :meth:`get_database` instead. """ if self.__default_database_name is None and default is None: raise ConfigurationError( 'No default database name defined or provided.') return database.Database( self, self.__default_database_name or default, codec_options, read_preference, write_concern, read_concern)
python
{ "resource": "" }
q22459
MongoClient._database_default_options
train
def _database_default_options(self, name): """Get a Database instance with the default settings.""" return self.get_database( name, codec_options=DEFAULT_CODEC_OPTIONS, read_preference=ReadPreference.PRIMARY, write_concern=DEFAULT_WRITE_CONCERN)
python
{ "resource": "" }
q22460
_handle_option_deprecations
train
def _handle_option_deprecations(options): """Issue appropriate warnings when deprecated options are present in the options dictionary. Removes deprecated option key, value pairs if the options dictionary is found to also have the renamed option.""" undeprecated_options = _CaseInsensitiveDictionary() for key, value in iteritems(options): optname = str(key).lower() if optname in URI_OPTIONS_DEPRECATION_MAP: renamed_key = URI_OPTIONS_DEPRECATION_MAP[optname] if renamed_key.lower() in options: warnings.warn("Deprecated option '%s' ignored in favor of " "'%s'." % (str(key), renamed_key)) continue warnings.warn("Option '%s' is deprecated, use '%s' instead." % ( str(key), renamed_key)) undeprecated_options[str(key)] = value return undeprecated_options
python
{ "resource": "" }
q22461
_normalize_options
train
def _normalize_options(options): """Renames keys in the options dictionary to their internally-used names.""" normalized_options = {} for key, value in iteritems(options): optname = str(key).lower() intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, key) normalized_options[intname] = options[key] return normalized_options
python
{ "resource": "" }
q22462
split_options
train
def split_options(opts, validate=True, warn=False, normalize=True): """Takes the options portion of a MongoDB URI, validates each option and returns the options in a dictionary. :Parameters: - `opt`: A string representing MongoDB URI options. - `validate`: If ``True`` (the default), validate and normalize all options. - `warn`: If ``False`` (the default), suppress all warnings raised during validation of options. - `normalize`: If ``True`` (the default), renames all options to their internally-used names. """ and_idx = opts.find("&") semi_idx = opts.find(";") try: if and_idx >= 0 and semi_idx >= 0: raise InvalidURI("Can not mix '&' and ';' for option separators.") elif and_idx >= 0: options = _parse_options(opts, "&") elif semi_idx >= 0: options = _parse_options(opts, ";") elif opts.find("=") != -1: options = _parse_options(opts, None) else: raise ValueError except ValueError: raise InvalidURI("MongoDB URI options are key=value pairs.") options = _handle_option_deprecations(options) if validate: options = validate_options(options, warn) if normalize: options = _normalize_options(options) return options
python
{ "resource": "" }
q22463
GridFS.new_file
train
def new_file(self, **kwargs): """Create a new file in GridFS. Returns a new :class:`~gridfs.grid_file.GridIn` instance to which data can be written. Any keyword arguments will be passed through to :meth:`~gridfs.grid_file.GridIn`. If the ``"_id"`` of the file is manually specified, it must not already exist in GridFS. Otherwise :class:`~gridfs.errors.FileExists` is raised. :Parameters: - `**kwargs` (optional): keyword arguments for file creation """ # No need for __ensure_index_files_id() here; GridIn ensures # the (files_id, n) index when needed. return GridIn( self.__collection, disable_md5=self.__disable_md5, **kwargs)
python
{ "resource": "" }
q22464
GridFS.get_version
train
def get_version(self, filename=None, version=-1, session=None, **kwargs): """Get a file from GridFS by ``"filename"`` or metadata fields. Returns a version of the file in GridFS whose filename matches `filename` and whose metadata fields match the supplied keyword arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. Version numbering is a convenience atop the GridFS API provided by MongoDB. If more than one file matches the query (either by `filename` alone, by metadata fields, or by a combination of both), then version ``-1`` will be the most recently uploaded matching file, ``-2`` the second most recently uploaded, etc. Version ``0`` will be the first version uploaded, ``1`` the second version, etc. So if three versions have been uploaded, then version ``0`` is the same as version ``-3``, version ``1`` is the same as version ``-2``, and version ``2`` is the same as version ``-1``. Raises :class:`~gridfs.errors.NoFile` if no such version of that file exists. :Parameters: - `filename`: ``"filename"`` of the file to get, or `None` - `version` (optional): version of the file to get (defaults to -1, the most recent version uploaded) - `session` (optional): a :class:`~pymongo.client_session.ClientSession` - `**kwargs` (optional): find files by custom metadata. .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.1 ``get_version`` no longer ensures indexes. """ query = kwargs if filename is not None: query["filename"] = filename cursor = self.__files.find(query, session=session) if version < 0: skip = abs(version) - 1 cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) else: cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) try: doc = next(cursor) return GridOut( self.__collection, file_document=doc, session=session) except StopIteration: raise NoFile("no version %d for filename %r" % (version, filename))
python
{ "resource": "" }
q22465
GridFS.get_last_version
train
def get_last_version(self, filename=None, session=None, **kwargs): """Get the most recent version of a file in GridFS by ``"filename"`` or metadata fields. Equivalent to calling :meth:`get_version` with the default `version` (``-1``). :Parameters: - `filename`: ``"filename"`` of the file to get, or `None` - `session` (optional): a :class:`~pymongo.client_session.ClientSession` - `**kwargs` (optional): find files by custom metadata. .. versionchanged:: 3.6 Added ``session`` parameter. """ return self.get_version(filename=filename, session=session, **kwargs)
python
{ "resource": "" }
q22466
GridFSBucket.upload_from_stream
train
def upload_from_stream(self, filename, source, chunk_size_bytes=None, metadata=None, session=None): """Uploads a user file to a GridFS bucket. Reads the contents of the user file from `source` and uploads it to the file `filename`. Source can be a string or file-like object. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) file_id = fs.upload_from_stream( "test_file", "data I want to store!", chunk_size_bytes=4, metadata={"contentType": "text/plain"}) Returns the _id of the uploaded file. Raises :exc:`~gridfs.errors.NoFile` if no such version of that file exists. Raises :exc:`~ValueError` if `filename` is not a string. :Parameters: - `filename`: The name of the file to upload. - `source`: The source stream of the content to be uploaded. Must be a file-like object that implements :meth:`read` or a string. - `chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. - `metadata` (optional): User data for the 'metadata' field of the files collection document. If not provided the metadata field will be omitted from the files collection document. - `session` (optional): a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 Added ``session`` parameter. """ with self.open_upload_stream( filename, chunk_size_bytes, metadata, session=session) as gin: gin.write(source) return gin._id
python
{ "resource": "" }
q22467
GridFSBucket.rename
train
def rename(self, file_id, new_filename, session=None): """Renames the stored file with the specified file_id. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) # Get _id of file to rename file_id = fs.upload_from_stream("test_file", "data I want to store!") fs.rename(file_id, "new_test_name") Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. :Parameters: - `file_id`: The _id of the file to be renamed. - `new_filename`: The new name of the file. - `session` (optional): a :class:`~pymongo.client_session.ClientSession` .. versionchanged:: 3.6 Added ``session`` parameter. """ result = self._files.update_one({"_id": file_id}, {"$set": {"filename": new_filename}}, session=session) if not result.matched_count: raise NoFile("no files could be renamed %r because none " "matched file_id %i" % (new_filename, file_id))
python
{ "resource": "" }
q22468
Database.with_options
train
def with_options(self, codec_options=None, read_preference=None, write_concern=None, read_concern=None): """Get a clone of this database changing the specified settings. >>> db1.read_preference Primary() >>> from pymongo import ReadPreference >>> db2 = db1.with_options(read_preference=ReadPreference.SECONDARY) >>> db1.read_preference Primary() >>> db2.read_preference Secondary(tag_sets=None) :Parameters: - `codec_options` (optional): An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) the :attr:`codec_options` of this :class:`Collection` is used. - `read_preference` (optional): The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this :class:`Collection` is used. See :mod:`~pymongo.read_preferences` for options. - `write_concern` (optional): An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the default) the :attr:`write_concern` of this :class:`Collection` is used. - `read_concern` (optional): An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`Collection` is used. .. versionadded:: 3.8 """ return Database(self.client, self.__name, codec_options or self.codec_options, read_preference or self.read_preference, write_concern or self.write_concern, read_concern or self.read_concern)
python
{ "resource": "" }
q22469
Database.watch
train
def watch(self, pipeline=None, full_document='default', resume_after=None, max_await_time_ms=None, batch_size=None, collation=None, start_at_operation_time=None, session=None): """Watch changes on this database. Performs an aggregation with an implicit initial ``$changeStream`` stage and returns a :class:`~pymongo.change_stream.DatabaseChangeStream` cursor which iterates over changes on all collections in this database. Introduced in MongoDB 4.0. .. code-block:: python with db.watch() as stream: for change in stream: print(change) The :class:`~pymongo.change_stream.DatabaseChangeStream` iterable blocks until the next change document is returned or an error is raised. If the :meth:`~pymongo.change_stream.DatabaseChangeStream.next` method encounters a network error when retrieving a batch from the server, it will automatically attempt to recreate the cursor such that no change events are missed. Any error encountered during the resume attempt indicates there may be an outage and will be raised. .. code-block:: python try: with db.watch( [{'$match': {'operationType': 'insert'}}]) as stream: for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the # resume attempt failed to recreate the cursor. logging.error('...') For a precise description of the resume process see the `change streams specification`_. :Parameters: - `pipeline` (optional): A list of aggregation pipeline stages to append to an initial ``$changeStream`` stage. Not all pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option to the ``$changeStream`` stage. Allowed values: 'default', 'updateLookup'. Defaults to 'default'. When set to 'updateLookup', the change notification for partial updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred. - `resume_after` (optional): The logical starting point for this change stream. - `max_await_time_ms` (optional): The maximum time in milliseconds for the server to wait for changes before responding to a getMore operation. - `batch_size` (optional): The maximum number of documents to return per batch. - `collation` (optional): The :class:`~pymongo.collation.Collation` to use for the aggregation. - `start_at_operation_time` (optional): If provided, the resulting change stream will only return changes that occurred at or after the specified :class:`~bson.timestamp.Timestamp`. Requires MongoDB >= 4.0. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. :Returns: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. .. versionadded:: 3.7 .. mongodoc:: changeStreams .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst """ return DatabaseChangeStream( self, pipeline, full_document, resume_after, max_await_time_ms, batch_size, collation, start_at_operation_time, session )
python
{ "resource": "" }
q22470
Database._retryable_read_command
train
def _retryable_read_command(self, command, value=1, check=True, allowable_errors=None, read_preference=None, codec_options=DEFAULT_CODEC_OPTIONS, session=None, **kwargs): """Same as command but used for retryable read commands.""" if read_preference is None: read_preference = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) def _cmd(session, server, sock_info, slave_ok): return self._command(sock_info, command, slave_ok, value, check, allowable_errors, read_preference, codec_options, session=session, **kwargs) return self.__client._retryable_read( _cmd, read_preference, session)
python
{ "resource": "" }
q22471
Database.list_collections
train
def list_collections(self, session=None, filter=None, **kwargs): """Get a cursor over the collectons of this database. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `filter` (optional): A query document to filter the list of collections returned from the listCollections command. - `**kwargs` (optional): Optional parameters of the `listCollections command <https://docs.mongodb.com/manual/reference/command/listCollections/>`_ can be passed as keyword arguments to this method. The supported options differ by server version. :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. .. versionadded:: 3.6 """ if filter is not None: kwargs['filter'] = filter read_pref = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) def _cmd(session, server, sock_info, slave_okay): return self._list_collections( sock_info, slave_okay, session, read_preference=read_pref, **kwargs) return self.__client._retryable_read( _cmd, read_pref, session)
python
{ "resource": "" }
q22472
Database.validate_collection
train
def validate_collection(self, name_or_collection, scandata=False, full=False, session=None): """Validate a collection. Returns a dict of validation info. Raises CollectionInvalid if validation fails. :Parameters: - `name_or_collection`: A Collection object or the name of a collection to validate. - `scandata`: Do extra checks beyond checking the overall structure of the collection. - `full`: Have the server do a more thorough scan of the collection. Use with `scandata` for a thorough scan of the structure of the collection and the individual documents. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. .. versionchanged:: 3.6 Added ``session`` parameter. """ name = name_or_collection if isinstance(name, Collection): name = name.name if not isinstance(name, string_type): raise TypeError("name_or_collection must be an instance of " "%s or Collection" % (string_type.__name__,)) result = self.command("validate", _unicode(name), scandata=scandata, full=full, session=session) valid = True # Pre 1.9 results if "result" in result: info = result["result"] if info.find("exception") != -1 or info.find("corrupt") != -1: raise CollectionInvalid("%s invalid: %s" % (name, info)) # Sharded results elif "raw" in result: for _, res in iteritems(result["raw"]): if "result" in res: info = res["result"] if (info.find("exception") != -1 or info.find("corrupt") != -1): raise CollectionInvalid("%s invalid: " "%s" % (name, info)) elif not res.get("valid", False): valid = False break # Post 1.9 non-sharded results. elif not result.get("valid", False): valid = False if not valid: raise CollectionInvalid("%s invalid: %r" % (name, result)) return result
python
{ "resource": "" }
q22473
Database.profiling_level
train
def profiling_level(self, session=None): """Get the database's current profiling level. Returns one of (:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`). :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. .. versionchanged:: 3.6 Added ``session`` parameter. .. mongodoc:: profiling """ result = self.command("profile", -1, session=session) assert result["was"] >= 0 and result["was"] <= 2 return result["was"]
python
{ "resource": "" }
q22474
Database.set_profiling_level
train
def set_profiling_level(self, level, slow_ms=None, session=None): """Set the database's profiling level. :Parameters: - `level`: Specifies a profiling level, see list of possible values below. - `slow_ms`: Optionally modify the threshold for the profile to consider a query or operation. Even if the profiler is off queries slower than the `slow_ms` level will get written to the logs. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. Possible `level` values: +----------------------------+------------------------------------+ | Level | Setting | +============================+====================================+ | :data:`~pymongo.OFF` | Off. No profiling. | +----------------------------+------------------------------------+ | :data:`~pymongo.SLOW_ONLY` | On. Only includes slow operations. | +----------------------------+------------------------------------+ | :data:`~pymongo.ALL` | On. Includes all operations. | +----------------------------+------------------------------------+ Raises :class:`ValueError` if level is not one of (:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`). .. versionchanged:: 3.6 Added ``session`` parameter. .. mongodoc:: profiling """ if not isinstance(level, int) or level < 0 or level > 2: raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)") if slow_ms is not None and not isinstance(slow_ms, int): raise TypeError("slow_ms must be an integer") if slow_ms is not None: self.command("profile", level, slowms=slow_ms, session=session) else: self.command("profile", level, session=session)
python
{ "resource": "" }
q22475
CommandCursor._try_next
train
def _try_next(self, get_more_allowed): """Advance the cursor blocking for at most one getMore command.""" if not len(self.__data) and not self.__killed and get_more_allowed: self._refresh() if len(self.__data): coll = self.__collection return coll.database._fix_outgoing(self.__data.popleft(), coll) else: return None
python
{ "resource": "" }
q22476
ObjectId._random
train
def _random(cls): """Generate a 5-byte random number once per process. """ pid = os.getpid() if pid != cls._pid: cls._pid = pid cls.__random = _random_bytes() return cls.__random
python
{ "resource": "" }
q22477
SocketInfo.command
train
def command(self, dbname, spec, slave_ok=False, read_preference=ReadPreference.PRIMARY, codec_options=DEFAULT_CODEC_OPTIONS, check=True, allowable_errors=None, check_keys=False, read_concern=None, write_concern=None, parse_write_concern_error=False, collation=None, session=None, client=None, retryable_write=False, publish_events=True, user_fields=None): """Execute a command or raise an error. :Parameters: - `dbname`: name of the database on which to run the command - `spec`: a command document as a dict, SON, or mapping object - `slave_ok`: whether to set the SlaveOkay wire protocol bit - `read_preference`: a read preference - `codec_options`: a CodecOptions instance - `check`: raise OperationFailure if there are errors - `allowable_errors`: errors to ignore if `check` is True - `check_keys`: if True, check `spec` for invalid keys - `read_concern`: The read concern for this command. - `write_concern`: The write concern for this command. - `parse_write_concern_error`: Whether to parse the ``writeConcernError`` field in the command response. - `collation`: The collation for this command. - `session`: optional ClientSession instance. - `client`: optional MongoClient for gossipping $clusterTime. - `retryable_write`: True if this command is a retryable write. - `publish_events`: Should we publish events for this command? - `user_fields` (optional): Response fields that should be decoded using the TypeDecoders from codec_options, passed to bson._decode_all_selective. """ self.validate_session(client, session) session = _validate_session_write_concern(session, write_concern) # Ensure command name remains in first place. if not isinstance(spec, ORDERED_TYPES): spec = SON(spec) if (read_concern and self.max_wire_version < 4 and not read_concern.ok_for_legacy): raise ConfigurationError( 'read concern level of %s is not valid ' 'with a max wire version of %d.' % (read_concern.level, self.max_wire_version)) if not (write_concern is None or write_concern.acknowledged or collation is None): raise ConfigurationError( 'Collation is unsupported for unacknowledged writes.') if (self.max_wire_version >= 5 and write_concern and not write_concern.is_server_default): spec['writeConcern'] = write_concern.document elif self.max_wire_version < 5 and collation is not None: raise ConfigurationError( 'Must be connected to MongoDB 3.4+ to use a collation.') if session: session._apply_to(spec, retryable_write, read_preference) self.send_cluster_time(spec, session, client) listeners = self.listeners if publish_events else None unacknowledged = write_concern and not write_concern.acknowledged if self.op_msg_enabled: self._raise_if_not_writable(unacknowledged) try: return command(self.sock, dbname, spec, slave_ok, self.is_mongos, read_preference, codec_options, session, client, check, allowable_errors, self.address, check_keys, listeners, self.max_bson_size, read_concern, parse_write_concern_error=parse_write_concern_error, collation=collation, compression_ctx=self.compression_context, use_op_msg=self.op_msg_enabled, unacknowledged=unacknowledged, user_fields=user_fields) except OperationFailure: raise # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. except BaseException as error: self._raise_connection_failure(error)
python
{ "resource": "" }
q22478
SocketInfo.validate_session
train
def validate_session(self, client, session): """Validate this session before use with client. Raises error if this session is logged in as a different user or the client is not the one that created the session. """ if session: if session._client is not client: raise InvalidOperation( 'Can only use session with the MongoClient that' ' started it') if session._authset != self.authset: raise InvalidOperation( 'Cannot use session after authenticating with different' ' credentials')
python
{ "resource": "" }
q22479
SocketInfo.send_cluster_time
train
def send_cluster_time(self, command, session, client): """Add cluster time for MongoDB >= 3.6.""" if self.max_wire_version >= 6 and client: client._send_cluster_time(command, session)
python
{ "resource": "" }
q22480
Pool.remove_stale_sockets
train
def remove_stale_sockets(self): """Removes stale sockets then adds new ones if pool is too small.""" if self.opts.max_idle_time_seconds is not None: with self.lock: while (self.sockets and self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds): sock_info = self.sockets.pop() sock_info.close() while True: with self.lock: if (len(self.sockets) + self.active_sockets >= self.opts.min_pool_size): # There are enough sockets in the pool. break # We must acquire the semaphore to respect max_pool_size. if not self._socket_semaphore.acquire(False): break try: sock_info = self.connect() with self.lock: self.sockets.appendleft(sock_info) finally: self._socket_semaphore.release()
python
{ "resource": "" }
q22481
SocketChecker.socket_closed
train
def socket_closed(self, sock): """Return True if we know socket has been closed, False otherwise. """ while True: try: if self._poller: with self._lock: self._poller.register(sock, _EVENT_MASK) try: rd = self._poller.poll(0) finally: self._poller.unregister(sock) else: rd, _, _ = select.select([sock], [], [], 0) except (RuntimeError, KeyError): # RuntimeError is raised during a concurrent poll. KeyError # is raised by unregister if the socket is not in the poller. # These errors should not be possible since we protect the # poller with a mutex. raise except ValueError: # ValueError is raised by register/unregister/select if the # socket file descriptor is negative or outside the range for # select (> 1023). return True except (_SELECT_ERROR, IOError) as exc: if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): continue return True except Exception: # Any other exceptions should be attributed to a closed # or invalid socket. return True return len(rd) > 0
python
{ "resource": "" }
q22482
_get_object_size
train
def _get_object_size(data, position, obj_end): """Validate and return a BSON document's size.""" try: obj_size = _UNPACK_INT(data[position:position + 4])[0] except struct.error as exc: raise InvalidBSON(str(exc)) end = position + obj_size - 1 if data[end:end + 1] != b"\x00": raise InvalidBSON("bad eoo") if end >= obj_end: raise InvalidBSON("invalid object length") # If this is the top-level document, validate the total size too. if position == 0 and obj_size != obj_end: raise InvalidBSON("invalid object length") return obj_size, end
python
{ "resource": "" }
q22483
_elements_to_dict
train
def _elements_to_dict(data, position, obj_end, opts, result=None): """Decode a BSON document into result.""" if result is None: result = opts.document_class() end = obj_end - 1 while position < end: key, value, position = _element_to_dict(data, position, obj_end, opts) result[key] = value if position != obj_end: raise InvalidBSON('bad object or element length') return result
python
{ "resource": "" }
q22484
_datetime_to_millis
train
def _datetime_to_millis(dtm): """Convert datetime to milliseconds since epoch UTC.""" if dtm.utcoffset() is not None: dtm = dtm - dtm.utcoffset() return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000)
python
{ "resource": "" }
q22485
_decode_all_selective
train
def _decode_all_selective(data, codec_options, fields): """Decode BSON data to a single document while using user-provided custom decoding logic. `data` must be a string representing a valid, BSON-encoded document. :Parameters: - `data`: BSON data - `codec_options`: An instance of :class:`~bson.codec_options.CodecOptions` with user-specified type decoders. If no decoders are found, this method is the same as ``decode_all``. - `fields`: Map of document namespaces where data that needs to be custom decoded lives or None. For example, to custom decode a list of objects in 'field1.subfield1', the specified value should be ``{'field1': {'subfield1': 1}}``. If ``fields`` is an empty map or None, this method is the same as ``decode_all``. :Returns: - `document_list`: Single-member list containing the decoded document. .. versionadded:: 3.8 """ if not codec_options.type_registry._decoder_map: return decode_all(data, codec_options) if not fields: return decode_all(data, codec_options.with_options(type_registry=None)) # Decode documents for internal use. from bson.raw_bson import RawBSONDocument internal_codec_options = codec_options.with_options( document_class=RawBSONDocument, type_registry=None) _doc = _bson_to_dict(data, internal_codec_options) return [_decode_selective(_doc, fields, codec_options,)]
python
{ "resource": "" }
q22486
_validate_session_write_concern
train
def _validate_session_write_concern(session, write_concern): """Validate that an explicit session is not used with an unack'ed write. Returns the session to use for the next operation. """ if session: if write_concern is not None and not write_concern.acknowledged: # For unacknowledged writes without an explicit session, # drivers SHOULD NOT use an implicit session. If a driver # creates an implicit session for unacknowledged writes # without an explicit session, the driver MUST NOT send the # session ID. if session._implicit: return None else: raise ConfigurationError( 'Explicit sessions are incompatible with ' 'unacknowledged write concern: %r' % ( write_concern,)) return session
python
{ "resource": "" }
q22487
ClientSession._inherit_option
train
def _inherit_option(self, name, val): """Return the inherited TransactionOption value.""" if val: return val txn_opts = self.options.default_transaction_options val = txn_opts and getattr(txn_opts, name) if val: return val return getattr(self.client, name)
python
{ "resource": "" }
q22488
ClientSession.with_transaction
train
def with_transaction(self, callback, read_concern=None, write_concern=None, read_preference=None): """Execute a callback in a transaction. This method starts a transaction on this session, executes ``callback`` once, and then commits the transaction. For example:: def callback(session): orders = session.client.db.orders inventory = session.client.db.inventory orders.insert_one({"sku": "abc123", "qty": 100}, session=session) inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, {"$inc": {"qty": -100}}, session=session) with client.start_session() as session: session.with_transaction(callback) To pass arbitrary arguments to the ``callback``, wrap your callable with a ``lambda`` like this:: def callback(session, custom_arg, custom_kwarg=None): # Transaction operations... with client.start_session() as session: session.with_transaction( lambda s: callback(s, "custom_arg", custom_kwarg=1)) In the event of an exception, ``with_transaction`` may retry the commit or the entire transaction, therefore ``callback`` may be invoked multiple times by a single call to ``with_transaction``. Developers should be mindful of this possiblity when writing a ``callback`` that modifies application state or has any other side-effects. Note that even when the ``callback`` is invoked multiple times, ``with_transaction`` ensures that the transaction will be committed at-most-once on the server. The ``callback`` should not attempt to start new transactions, but should simply run operations meant to be contained within a transaction. The ``callback`` should also not commit the transaction; this is handled automatically by ``with_transaction``. If the ``callback`` does commit or abort the transaction without error, however, ``with_transaction`` will return without taking further action. When ``callback`` raises an exception, ``with_transaction`` automatically aborts the current transaction. When ``callback`` or :meth:`~ClientSession.commit_transaction` raises an exception that includes the ``"TransientTransactionError"`` error label, ``with_transaction`` starts a new transaction and re-executes the ``callback``. When :meth:`~ClientSession.commit_transaction` raises an exception with the ``"UnknownTransactionCommitResult"`` error label, ``with_transaction`` retries the commit until the result of the transaction is known. This method will cease retrying after 120 seconds has elapsed. This timeout is not configurable and any exception raised by the ``callback`` or by :meth:`ClientSession.commit_transaction` after the timeout is reached will be re-raised. Applications that desire a different timeout duration should not use this method. :Parameters: - `callback`: The callable ``callback`` to run inside a transaction. The callable must accept a single argument, this session. Note, under certain error conditions the callback may be run multiple times. - `read_concern` (optional): The :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. - `write_concern` (optional): The :class:`~pymongo.write_concern.WriteConcern` to use for this transaction. - `read_preference` (optional): The read preference to use for this transaction. If ``None`` (the default) the :attr:`read_preference` of this :class:`Database` is used. See :mod:`~pymongo.read_preferences` for options. :Returns: The return value of the ``callback``. .. versionadded:: 3.9 """ start_time = monotonic.time() while True: self.start_transaction( read_concern, write_concern, read_preference) try: ret = callback(self) except Exception as exc: if self._in_transaction: self.abort_transaction() if (isinstance(exc, PyMongoError) and exc.has_error_label("TransientTransactionError") and _within_time_limit(start_time)): # Retry the entire transaction. continue raise if self._transaction.state in ( _TxnState.NONE, _TxnState.COMMITTED, _TxnState.ABORTED): # Assume callback intentionally ended the transaction. return ret while True: try: self.commit_transaction() except PyMongoError as exc: if (exc.has_error_label("UnknownTransactionCommitResult") and _within_time_limit(start_time)): # Retry the commit. continue if (exc.has_error_label("TransientTransactionError") and _within_time_limit(start_time)): # Retry the entire transaction. break raise # Commit succeeded. return ret
python
{ "resource": "" }
q22489
ClientSession.commit_transaction
train
def commit_transaction(self): """Commit a multi-statement transaction. .. versionadded:: 3.7 """ self._check_ended() retry = False state = self._transaction.state if state is _TxnState.NONE: raise InvalidOperation("No transaction started") elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY): # Server transaction was never started, no need to send a command. self._transaction.state = _TxnState.COMMITTED_EMPTY return elif state is _TxnState.ABORTED: raise InvalidOperation( "Cannot call commitTransaction after calling abortTransaction") elif state is _TxnState.COMMITTED: # We're explicitly retrying the commit, move the state back to # "in progress" so that _in_transaction returns true. self._transaction.state = _TxnState.IN_PROGRESS retry = True try: self._finish_transaction_with_retry("commitTransaction", retry) except ConnectionFailure as exc: # We do not know if the commit was successfully applied on the # server or if it satisfied the provided write concern, set the # unknown commit error label. exc._remove_error_label("TransientTransactionError") _reraise_with_unknown_commit(exc) except WTimeoutError as exc: # We do not know if the commit has satisfied the provided write # concern, add the unknown commit error label. _reraise_with_unknown_commit(exc) except OperationFailure as exc: if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES: # The server reports errorLabels in the case. raise # We do not know if the commit was successfully applied on the # server or if it satisfied the provided write concern, set the # unknown commit error label. _reraise_with_unknown_commit(exc) finally: self._transaction.state = _TxnState.COMMITTED
python
{ "resource": "" }
q22490
ClientSession.abort_transaction
train
def abort_transaction(self): """Abort a multi-statement transaction. .. versionadded:: 3.7 """ self._check_ended() state = self._transaction.state if state is _TxnState.NONE: raise InvalidOperation("No transaction started") elif state is _TxnState.STARTING: # Server transaction was never started, no need to send a command. self._transaction.state = _TxnState.ABORTED return elif state is _TxnState.ABORTED: raise InvalidOperation("Cannot call abortTransaction twice") elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY): raise InvalidOperation( "Cannot call abortTransaction after calling commitTransaction") try: self._finish_transaction_with_retry("abortTransaction", False) except (OperationFailure, ConnectionFailure): # The transactions spec says to ignore abortTransaction errors. pass finally: self._transaction.state = _TxnState.ABORTED
python
{ "resource": "" }
q22491
ClientSession._finish_transaction_with_retry
train
def _finish_transaction_with_retry(self, command_name, explict_retry): """Run commit or abort with one retry after any retryable error. :Parameters: - `command_name`: Either "commitTransaction" or "abortTransaction". - `explict_retry`: True when this is an explict commit retry attempt, ie the application called session.commit_transaction() twice. """ # This can be refactored with MongoClient._retry_with_session. try: return self._finish_transaction(command_name, explict_retry) except ServerSelectionTimeoutError: raise except ConnectionFailure as exc: try: return self._finish_transaction(command_name, True) except ServerSelectionTimeoutError: # Raise the original error so the application can infer that # an attempt was made. raise exc except OperationFailure as exc: if exc.code not in _RETRYABLE_ERROR_CODES: raise try: return self._finish_transaction(command_name, True) except ServerSelectionTimeoutError: # Raise the original error so the application can infer that # an attempt was made. raise exc
python
{ "resource": "" }
q22492
ClientSession._advance_cluster_time
train
def _advance_cluster_time(self, cluster_time): """Internal cluster time helper.""" if self._cluster_time is None: self._cluster_time = cluster_time elif cluster_time is not None: if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]: self._cluster_time = cluster_time
python
{ "resource": "" }
q22493
ClientSession.advance_cluster_time
train
def advance_cluster_time(self, cluster_time): """Update the cluster time for this session. :Parameters: - `cluster_time`: The :data:`~pymongo.client_session.ClientSession.cluster_time` from another `ClientSession` instance. """ if not isinstance(cluster_time, abc.Mapping): raise TypeError( "cluster_time must be a subclass of collections.Mapping") if not isinstance(cluster_time.get("clusterTime"), Timestamp): raise ValueError("Invalid cluster_time") self._advance_cluster_time(cluster_time)
python
{ "resource": "" }
q22494
ClientSession._advance_operation_time
train
def _advance_operation_time(self, operation_time): """Internal operation time helper.""" if self._operation_time is None: self._operation_time = operation_time elif operation_time is not None: if operation_time > self._operation_time: self._operation_time = operation_time
python
{ "resource": "" }
q22495
ClientSession.advance_operation_time
train
def advance_operation_time(self, operation_time): """Update the operation time for this session. :Parameters: - `operation_time`: The :data:`~pymongo.client_session.ClientSession.operation_time` from another `ClientSession` instance. """ if not isinstance(operation_time, Timestamp): raise TypeError("operation_time must be an instance " "of bson.timestamp.Timestamp") self._advance_operation_time(operation_time)
python
{ "resource": "" }
q22496
ClientSession._process_response
train
def _process_response(self, reply): """Process a response to a command that was run with this session.""" self._advance_cluster_time(reply.get('$clusterTime')) self._advance_operation_time(reply.get('operationTime')) if self._in_transaction and self._transaction.sharded: recovery_token = reply.get('recoveryToken') if recovery_token: self._transaction.recovery_token = recovery_token
python
{ "resource": "" }
q22497
ClientSession._pin_mongos
train
def _pin_mongos(self, server): """Pin this session to the given mongos Server.""" self._transaction.sharded = True self._transaction.pinned_address = server.description.address
python
{ "resource": "" }
q22498
_parse_write_concern
train
def _parse_write_concern(options): """Parse write concern options.""" concern = options.get('w') wtimeout = options.get('wtimeoutms') j = options.get('journal') fsync = options.get('fsync') return WriteConcern(concern, wtimeout, j, fsync)
python
{ "resource": "" }
q22499
_parse_ssl_options
train
def _parse_ssl_options(options): """Parse ssl options.""" use_ssl = options.get('ssl') if use_ssl is not None: validate_boolean('ssl', use_ssl) certfile = options.get('ssl_certfile') keyfile = options.get('ssl_keyfile') passphrase = options.get('ssl_pem_passphrase') ca_certs = options.get('ssl_ca_certs') cert_reqs = options.get('ssl_cert_reqs') match_hostname = options.get('ssl_match_hostname', True) crlfile = options.get('ssl_crlfile') ssl_kwarg_keys = [k for k in options if k.startswith('ssl_') and options[k]] if use_ssl == False and ssl_kwarg_keys: raise ConfigurationError("ssl has not been enabled but the " "following ssl parameters have been set: " "%s. Please set `ssl=True` or remove." % ', '.join(ssl_kwarg_keys)) if ssl_kwarg_keys and use_ssl is None: # ssl options imply ssl = True use_ssl = True if use_ssl is True: ctx = get_ssl_context( certfile, keyfile, passphrase, ca_certs, cert_reqs, crlfile, match_hostname) return ctx, match_hostname return None, match_hostname
python
{ "resource": "" }