desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'If the monitor is sleeping, wake and check the server soon.'
def request_check(self):
self._executor.wake()
'Call ismaster once or twice. Reset server\'s pool on error. Returns a ServerDescription.'
def _check_with_retry(self):
address = self._server_description.address retry = True metadata = None if (self._server_description.server_type == SERVER_TYPE.Unknown): retry = False metadata = self._pool.opts.metadata start = _time() try: return self._check_once(metadata=metadata) except ReferenceError: raise except Exception as error: error_time = (_time() - start) if self._publish: self._listeners.publish_server_heartbeat_failed(address, error_time, error) self._topology.reset_pool(address) default = ServerDescription(address, error=error) if (not retry): self._avg_round_trip_time.reset() return default start = _time() try: return self._check_once(metadata=self._pool.opts.metadata) except ReferenceError: raise except Exception as error: error_time = (_time() - start) if self._publish: self._listeners.publish_server_heartbeat_failed(address, error_time, error) self._avg_round_trip_time.reset() return default
'A single attempt to call ismaster. Returns a ServerDescription, or raises an exception.'
def _check_once(self, metadata=None):
address = self._server_description.address if self._publish: self._listeners.publish_server_heartbeat_started(address) with self._pool.get_socket({}) as sock_info: (response, round_trip_time) = self._check_with_socket(sock_info, metadata=metadata) self._avg_round_trip_time.add_sample(round_trip_time) sd = ServerDescription(address=address, ismaster=response, round_trip_time=self._avg_round_trip_time.get()) if self._publish: self._listeners.publish_server_heartbeat_succeeded(address, round_trip_time, response) return sd
'Return (IsMaster, round_trip_time). Can raise ConnectionFailure or OperationFailure.'
def _check_with_socket(self, sock_info, metadata=None):
cmd = SON([('ismaster', 1)]) if (metadata is not None): cmd['client'] = metadata start = _time() (request_id, msg, max_doc_size) = message.query(0, 'admin.$cmd', 0, (-1), cmd, None, DEFAULT_CODEC_OPTIONS) sock_info.send_message(msg, max_doc_size) raw_response = sock_info.receive_message(1, request_id) result = helpers._unpack_response(raw_response) return (IsMaster(result['data'][0]), (_time() - start))
'Return True if we know socket has been closed, False otherwise.'
def socket_closed(self, sock):
while True: try: if self._poller: with self._lock: self._poller.register(sock, _EVENT_MASK) try: rd = self._poller.poll(0) finally: self._poller.unregister(sock) else: (rd, _, _) = select.select([sock], [], [], 0) except (RuntimeError, KeyError): raise except ValueError: return True except (_SELECT_ERROR, IOError) as exc: if (_errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN)): continue return True except: return True return (len(rd) > 0)
'Raise an exception on property access if unacknowledged.'
def _raise_if_unacknowledged(self, property_name):
if (not self.__acknowledged): raise InvalidOperation(('A value for %s is not available when the write is unacknowledged. Check the acknowledged attribute to avoid this error.' % (property_name,)))
'Is this the result of an acknowledged write operation? The :attr:`acknowledged` attribute will be ``False`` when using ``WriteConcern(w=0)``, otherwise ``True``. .. note:: If the :attr:`acknowledged` attribute is ``False`` all other attibutes of this class will raise :class:`~pymongo.errors.InvalidOperation` when accessed. Values for other attributes cannot be determined if the write operation was unacknowledged. .. seealso:: :class:`~pymongo.write_concern.WriteConcern`'
@property def acknowledged(self):
return self.__acknowledged
'The inserted document\'s _id.'
@property def inserted_id(self):
return self.__inserted_id
'A list of _ids of the inserted documents, in the order provided. .. note:: If ``False`` is passed for the `ordered` parameter to :meth:`~pymongo.collection.Collection.insert_many` the server may have inserted the documents in a different order than what is presented here.'
@property def inserted_ids(self):
return self.__inserted_ids
'The raw result document returned by the server.'
@property def raw_result(self):
return self.__raw_result
'The number of documents matched for this update.'
@property def matched_count(self):
self._raise_if_unacknowledged('matched_count') if (self.upserted_id is not None): return 0 return self.__raw_result.get('n', 0)
'The number of documents modified. .. note:: modified_count is only reported by MongoDB 2.6 and later. When connected to an earlier server version, or in certain mixed version sharding configurations, this attribute will be set to ``None``.'
@property def modified_count(self):
self._raise_if_unacknowledged('modified_count') return self.__raw_result.get('nModified')
'The _id of the inserted document if an upsert took place. Otherwise ``None``.'
@property def upserted_id(self):
self._raise_if_unacknowledged('upserted_id') return self.__raw_result.get('upserted')
'The raw result document returned by the server.'
@property def raw_result(self):
return self.__raw_result
'The number of documents deleted.'
@property def deleted_count(self):
self._raise_if_unacknowledged('deleted_count') return self.__raw_result.get('n', 0)
'Create a BulkWriteResult instance. :Parameters: - `bulk_api_result`: A result dict from the bulk API - `acknowledged`: Was this write result acknowledged? If ``False`` then all properties of this object will raise :exc:`~pymongo.errors.InvalidOperation`.'
def __init__(self, bulk_api_result, acknowledged):
self.__bulk_api_result = bulk_api_result super(BulkWriteResult, self).__init__(acknowledged)
'The raw bulk API result.'
@property def bulk_api_result(self):
return self.__bulk_api_result
'The number of documents inserted.'
@property def inserted_count(self):
self._raise_if_unacknowledged('inserted_count') return self.__bulk_api_result.get('nInserted')
'The number of documents matched for an update.'
@property def matched_count(self):
self._raise_if_unacknowledged('matched_count') return self.__bulk_api_result.get('nMatched')
'The number of documents modified. .. note:: modified_count is only reported by MongoDB 2.6 and later. When connected to an earlier server version, or in certain mixed version sharding configurations, this attribute will be set to ``None``.'
@property def modified_count(self):
self._raise_if_unacknowledged('modified_count') return self.__bulk_api_result.get('nModified')
'The number of documents deleted.'
@property def deleted_count(self):
self._raise_if_unacknowledged('deleted_count') return self.__bulk_api_result.get('nRemoved')
'The number of documents upserted.'
@property def upserted_count(self):
self._raise_if_unacknowledged('upserted_count') return self.__bulk_api_result.get('nUpserted')
'A map of operation index to the _id of the upserted document.'
@property def upserted_ids(self):
self._raise_if_unacknowledged('upserted_ids') if self.__bulk_api_result: return dict(((upsert['index'], upsert['_id']) for upsert in self.bulk_api_result['upserted']))
'Start monitoring, or restart after a fork. No effect if called multiple times. .. warning:: Topology is shared among multiple threads and is protected by mutual exclusion. Using Topology from a process other than the one that initialized it will emit a warning and may result in deadlock. To prevent this from happening, MongoClient must be created after any forking OR MongoClient must be started with connect=False.'
def open(self):
if (self._pid is None): self._pid = os.getpid() elif (os.getpid() != self._pid): warnings.warn("MongoClient opened before fork. Create MongoClient with connect=False, or create client after forking. See PyMongo's documentation for details: http://api.mongodb.org/python/current/faq.html#pymongo-fork-safe>") with self._lock: self._ensure_opened()
'Return a list of Servers matching selector, or time out. :Parameters: - `selector`: function that takes a list of Servers and returns a subset of them. - `server_selection_timeout` (optional): maximum seconds to wait. If not provided, the default value common.SERVER_SELECTION_TIMEOUT is used. - `address`: optional server address to select. Calls self.open() if needed. Raises exc:`ServerSelectionTimeoutError` after `server_selection_timeout` if no matching servers are found.'
def select_servers(self, selector, server_selection_timeout=None, address=None):
if (server_selection_timeout is None): server_timeout = self._settings.server_selection_timeout else: server_timeout = server_selection_timeout with self._lock: self._description.check_compatible() now = _time() end_time = (now + server_timeout) server_descriptions = self._description.apply_selector(selector, address) while (not server_descriptions): if ((server_timeout == 0) or (now > end_time)): raise ServerSelectionTimeoutError(self._error_message(selector)) self._ensure_opened() self._request_check_all() self._condition.wait(common.MIN_HEARTBEAT_INTERVAL) self._description.check_compatible() now = _time() server_descriptions = self._description.apply_selector(selector, address) return [self.get_server_by_address(sd.address) for sd in server_descriptions]
'Like select_servers, but choose a random server if several match.'
def select_server(self, selector, server_selection_timeout=None, address=None):
return random.choice(self.select_servers(selector, server_selection_timeout, address))
'Return a Server for "address", reconnecting if necessary. If the server\'s type is not known, request an immediate check of all servers. Time out after "server_selection_timeout" if the server cannot be reached. :Parameters: - `address`: A (host, port) pair. - `server_selection_timeout` (optional): maximum seconds to wait. If not provided, the default value common.SERVER_SELECTION_TIMEOUT is used. Calls self.open() if needed. Raises exc:`ServerSelectionTimeoutError` after `server_selection_timeout` if no matching servers are found.'
def select_server_by_address(self, address, server_selection_timeout=None):
return self.select_server(any_server_selector, server_selection_timeout, address)
'Process a new ServerDescription after an ismaster call completes.'
def on_change(self, server_description):
with self._lock: if self._description.has_server(server_description.address): td_old = self._description if self._publish_server: old_server_description = td_old._server_descriptions[server_description.address] self._events.put((self._listeners.publish_server_description_changed, (old_server_description, server_description, server_description.address, self._topology_id))) self._description = updated_topology_description(self._description, server_description) self._update_servers() if self._publish_tp: self._events.put((self._listeners.publish_topology_description_changed, (td_old, self._description, self._topology_id))) self._condition.notify_all()
'Get a Server or None. Returns the current version of the server immediately, even if it\'s Unknown or absent from the topology. Only use this in unittests. In driver code, use select_server_by_address, since then you\'re assured a recent view of the server\'s type and wire protocol version.'
def get_server_by_address(self, address):
return self._servers.get(address)
'Return primary\'s address or None.'
def get_primary(self):
with self._lock: topology_type = self._description.topology_type if (topology_type != TOPOLOGY_TYPE.ReplicaSetWithPrimary): return None return writable_server_selector(self._new_selection())[0].address
'Return set of replica set member addresses.'
def _get_replica_set_members(self, selector):
with self._lock: topology_type = self._description.topology_type if (topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary, TOPOLOGY_TYPE.ReplicaSetNoPrimary)): return set() return set([sd.address for sd in selector(self._new_selection())])
'Return set of secondary addresses.'
def get_secondaries(self):
return self._get_replica_set_members(secondary_server_selector)
'Return set of arbiter addresses.'
def get_arbiters(self):
return self._get_replica_set_members(arbiter_server_selector)
'Wake all monitors, wait for at least one to check its server.'
def request_check_all(self, wait_time=5):
with self._lock: self._request_check_all() self._condition.wait(wait_time)
'Clear our pool for a server and mark it Unknown. Do *not* request an immediate check.'
def reset_server(self, address):
with self._lock: self._reset_server(address)
'Clear our pool for a server, mark it Unknown, and check it soon.'
def reset_server_and_request_check(self, address):
with self._lock: self._reset_server(address) self._request_check(address)
'Clear pools and terminate monitors. Topology reopens on demand.'
def close(self):
with self._lock: for server in self._servers.values(): server.close() self._description = self._description.reset() self._update_servers() self._opened = False if self._publish_tp: self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) if (self._publish_server or self._publish_tp): self.__events_executor.close()
'A Selection object, initially including all known servers. Hold the lock when calling this.'
def _new_selection(self):
return Selection.from_topology_description(self._description)
'Start monitors, or restart after a fork. Hold the lock when calling this.'
def _ensure_opened(self):
if (not self._opened): self._opened = True self._update_servers() if (self._publish_tp or self._publish_server): self.__events_executor.open() else: for server in itervalues(self._servers): server.open()
'Clear our pool for a server and mark it Unknown. Hold the lock when calling this. Does *not* request an immediate check.'
def _reset_server(self, address):
server = self._servers.get(address) if server: server.reset() self._description = self._description.reset_server(address) self._update_servers()
'Wake one monitor. Hold the lock when calling this.'
def _request_check(self, address):
server = self._servers.get(address) if server: server.request_check()
'Wake all monitors. Hold the lock when calling this.'
def _request_check_all(self):
for server in self._servers.values(): server.request_check()
'Sync our Servers from TopologyDescription.server_descriptions. Hold the lock while calling this.'
def _update_servers(self):
for (address, sd) in self._description.server_descriptions().items(): if (address not in self._servers): monitor = self._settings.monitor_class(server_description=sd, topology=self, pool=self._create_pool_for_monitor(address), topology_settings=self._settings) weak = None if self._publish_server: weak = weakref.ref(self._events) server = Server(server_description=sd, pool=self._create_pool_for_server(address), monitor=monitor, topology_id=self._topology_id, listeners=self._listeners, events=weak) self._servers[address] = server server.open() else: self._servers[address].description = sd for (address, server) in list(self._servers.items()): if (not self._description.has_server(address)): server.close() self._servers.pop(address)
'Format an error message if server selection fails. Hold the lock when calling this.'
def _error_message(self, selector):
is_replica_set = (self._description.topology_type in (TOPOLOGY_TYPE.ReplicaSetWithPrimary, TOPOLOGY_TYPE.ReplicaSetNoPrimary)) if is_replica_set: server_plural = 'replica set members' elif (self._description.topology_type == TOPOLOGY_TYPE.Sharded): server_plural = 'mongoses' else: server_plural = 'servers' if self._description.known_servers: if (selector is writable_server_selector): if is_replica_set: return 'No primary available for writes' else: return ('No %s available for writes' % server_plural) else: return ('No %s match selector "%s"' % (server_plural, selector)) else: addresses = list(self._description.server_descriptions()) servers = list(self._description.server_descriptions().values()) if (not servers): if is_replica_set: return ('No %s available for replica set name "%s"' % (server_plural, self._settings.replica_set_name)) else: return ('No %s available' % server_plural) error = servers[0].error same = all(((server.error == error) for server in servers[1:])) if same: if (error is None): return ('No %s found yet' % server_plural) if (is_replica_set and (not set(addresses).intersection(self._seed_addresses))): return ('Could not reach any servers in %s. Replica set is configured with internal hostnames or IPs?' % addresses) return str(error) else: return ','.join((str(server.error) for server in servers if server.error))
'Initialize a new Run object.'
def __init__(self, op_type):
self.op_type = op_type self.index_map = [] self.ops = []
'Get the original index of an operation in this run. :Parameters: - `idx`: The Run index that maps to the original index.'
def index(self, idx):
return self.index_map[idx]
'Add an operation to this Run instance. :Parameters: - `original_index`: The original index of this operation within a larger bulk operation. - `operation`: The operation document.'
def add(self, original_index, operation):
self.index_map.append(original_index) self.ops.append(operation)
'Initialize a _Bulk instance.'
def __init__(self, collection, ordered, bypass_document_validation):
self.collection = collection.with_options(codec_options=collection.codec_options._replace(unicode_decode_error_handler='replace', document_class=dict)) self.ordered = ordered self.ops = [] self.name = ('%s.%s' % (collection.database.name, collection.name)) self.namespace = (collection.database.name + '.$cmd') self.executed = False self.bypass_doc_val = bypass_document_validation self.uses_collation = False
'Add an insert document to the list of ops.'
def add_insert(self, document):
validate_is_document_type('document', document) if (not (isinstance(document, RawBSONDocument) or ('_id' in document))): document['_id'] = ObjectId() self.ops.append((_INSERT, document))
'Create an update document and add it to the list of ops.'
def add_update(self, selector, update, multi=False, upsert=False, collation=None):
validate_ok_for_update(update) cmd = SON([('q', selector), ('u', update), ('multi', multi), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if (collation is not None): self.uses_collation = True cmd['collation'] = collation self.ops.append((_UPDATE, cmd))
'Create a replace document and add it to the list of ops.'
def add_replace(self, selector, replacement, upsert=False, collation=None):
validate_ok_for_replace(replacement) cmd = SON([('q', selector), ('u', replacement), ('multi', False), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if (collation is not None): self.uses_collation = True cmd['collation'] = collation self.ops.append((_UPDATE, cmd))
'Create a delete document and add it to the list of ops.'
def add_delete(self, selector, limit, collation=None):
cmd = SON([('q', selector), ('limit', limit)]) collation = validate_collation_or_none(collation) if (collation is not None): self.uses_collation = True cmd['collation'] = collation self.ops.append((_DELETE, cmd))
'Generate batches of operations, batched by type of operation, in the order **provided**.'
def gen_ordered(self):
run = None for (idx, (op_type, operation)) in enumerate(self.ops): if (run is None): run = _Run(op_type) elif (run.op_type != op_type): (yield run) run = _Run(op_type) run.add(idx, operation) (yield run)
'Generate batches of operations, batched by type of operation, in arbitrary order.'
def gen_unordered(self):
operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)] for (idx, (op_type, operation)) in enumerate(self.ops): operations[op_type].add(idx, operation) for run in operations: if run.ops: (yield run)
'Execute using write commands.'
def execute_command(self, sock_info, generator, write_concern):
full_result = {'writeErrors': [], 'writeConcernErrors': [], 'nInserted': 0, 'nUpserted': 0, 'nMatched': 0, 'nModified': 0, 'nRemoved': 0, 'upserted': []} op_id = _randint() db_name = self.collection.database.name listeners = self.collection.database.client._event_listeners for run in generator: cmd = SON([(_COMMANDS[run.op_type], self.collection.name), ('ordered', self.ordered)]) if write_concern.document: cmd['writeConcern'] = write_concern.document if (self.bypass_doc_val and (sock_info.max_wire_version >= 4)): cmd['bypassDocumentValidation'] = True bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners) results = _do_batched_write_command(self.namespace, run.op_type, cmd, run.ops, True, self.collection.codec_options, bwc) _merge_command(run, full_result, results) if (self.ordered and full_result['writeErrors']): break if (full_result['writeErrors'] or full_result['writeConcernErrors']): if full_result['writeErrors']: full_result['writeErrors'].sort(key=(lambda error: error['index'])) raise BulkWriteError(full_result) return full_result
'Execute all operations, returning no results (w=0).'
def execute_no_results(self, sock_info, generator):
if (self.bypass_doc_val and (sock_info.max_wire_version >= 4)): raise OperationFailure('Cannot set bypass_document_validation with unacknowledged write concern') coll = self.collection write_concern = WriteConcern(w=int(self.ordered)) op_id = _randint() for run in generator: try: if (run.op_type == _INSERT): coll._insert(sock_info, run.ops, self.ordered, write_concern=write_concern, op_id=op_id, bypass_doc_val=self.bypass_doc_val) elif (run.op_type == _UPDATE): for operation in run.ops: doc = operation['u'] check_keys = True if (doc and next(iter(doc)).startswith('$')): check_keys = False coll._update(sock_info, operation['q'], doc, operation['upsert'], check_keys, operation['multi'], write_concern=write_concern, op_id=op_id, ordered=self.ordered, bypass_doc_val=self.bypass_doc_val) else: for operation in run.ops: coll._delete(sock_info, operation['q'], (not operation['limit']), write_concern, op_id, self.ordered) except OperationFailure: if self.ordered: break
'Execute using legacy wire protocol ops.'
def execute_legacy(self, sock_info, generator, write_concern):
coll = self.collection full_result = {'writeErrors': [], 'writeConcernErrors': [], 'nInserted': 0, 'nUpserted': 0, 'nMatched': 0, 'nRemoved': 0, 'upserted': []} op_id = _randint() stop = False for run in generator: for (idx, operation) in enumerate(run.ops): try: if (run.op_type == _INSERT): coll._insert(sock_info, operation, self.ordered, write_concern=write_concern, op_id=op_id) result = {} elif (run.op_type == _UPDATE): doc = operation['u'] check_keys = True if (doc and next(iter(doc)).startswith('$')): check_keys = False result = coll._update(sock_info, operation['q'], doc, operation['upsert'], check_keys, operation['multi'], write_concern=write_concern, op_id=op_id, ordered=self.ordered) else: result = coll._delete(sock_info, operation['q'], (not operation['limit']), write_concern, op_id, self.ordered) _merge_legacy(run, full_result, result, idx) except DocumentTooLarge as exc: error = _make_error(run.index(idx), _BAD_VALUE, str(exc), operation) full_result['writeErrors'].append(error) if self.ordered: stop = True break except OperationFailure as exc: if (not exc.details): raise _merge_legacy(run, full_result, exc.details, idx) if (self.ordered and full_result['writeErrors']): stop = True break if stop: break if (full_result['writeErrors'] or full_result['writeConcernErrors']): if full_result['writeErrors']: full_result['writeErrors'].sort(key=(lambda error: error['index'])) raise BulkWriteError(full_result) return full_result
'Execute operations.'
def execute(self, write_concern):
if (not self.ops): raise InvalidOperation('No operations to execute') if self.executed: raise InvalidOperation('Bulk operations can only be executed once.') self.executed = True write_concern = (WriteConcern(**write_concern) if write_concern else self.collection.write_concern) if self.ordered: generator = self.gen_ordered() else: generator = self.gen_unordered() client = self.collection.database.client with client._socket_for_writes() as sock_info: if ((sock_info.max_wire_version < 5) and self.uses_collation): raise ConfigurationError('Must be connected to MongoDB 3.4+ to use a collation.') if (not write_concern.acknowledged): if self.uses_collation: raise ConfigurationError('Collation is unsupported for unacknowledged writes.') self.execute_no_results(sock_info, generator) elif (sock_info.max_wire_version > 1): return self.execute_command(sock_info, generator, write_concern) else: return self.execute_legacy(sock_info, generator, write_concern)
'Update one document matching the selector. :Parameters: - `update` (dict): the update operations to apply'
def update_one(self, update):
self.__bulk.add_update(self.__selector, update, multi=False, upsert=True, collation=self.__collation)
'Update all documents matching the selector. :Parameters: - `update` (dict): the update operations to apply'
def update(self, update):
self.__bulk.add_update(self.__selector, update, multi=True, upsert=True, collation=self.__collation)
'Replace one entire document matching the selector criteria. :Parameters: - `replacement` (dict): the replacement document'
def replace_one(self, replacement):
self.__bulk.add_replace(self.__selector, replacement, upsert=True, collation=self.__collation)
'Update one document matching the selector criteria. :Parameters: - `update` (dict): the update operations to apply'
def update_one(self, update):
self.__bulk.add_update(self.__selector, update, multi=False, collation=self.__collation)
'Update all documents matching the selector criteria. :Parameters: - `update` (dict): the update operations to apply'
def update(self, update):
self.__bulk.add_update(self.__selector, update, multi=True, collation=self.__collation)
'Replace one entire document matching the selector criteria. :Parameters: - `replacement` (dict): the replacement document'
def replace_one(self, replacement):
self.__bulk.add_replace(self.__selector, replacement, collation=self.__collation)
'Remove a single document matching the selector criteria.'
def remove_one(self):
self.__bulk.add_delete(self.__selector, _DELETE_ONE, collation=self.__collation)
'Remove all documents matching the selector criteria.'
def remove(self):
self.__bulk.add_delete(self.__selector, _DELETE_ALL, collation=self.__collation)
'Specify that all chained update operations should be upserts. :Returns: - A :class:`BulkUpsertOperation` instance, used to add update operations to this bulk operation.'
def upsert(self):
return BulkUpsertOperation(self.__selector, self.__bulk, self.__collation)
'**DEPRECATED**: Initialize a new BulkOperationBuilder instance. :Parameters: - `collection`: A :class:`~pymongo.collection.Collection` instance. - `ordered` (optional): If ``True`` all operations will be executed serially, in the order provided, and the entire execution will abort on the first error. If ``False`` operations will be executed in arbitrary order (possibly in parallel on the server), reporting any errors that occurred after attempting all operations. Defaults to ``True``. - `bypass_document_validation`: (optional) If ``True``, allows the write to opt-out of document level validation. Default is ``False``. .. note:: `bypass_document_validation` requires server version **>= 3.2** .. versionchanged:: 3.5 Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write` instead. .. versionchanged:: 3.2 Added bypass_document_validation support'
def __init__(self, collection, ordered=True, bypass_document_validation=False):
self.__bulk = _Bulk(collection, ordered, bypass_document_validation)
'Specify selection criteria for bulk operations. :Parameters: - `selector` (dict): the selection criteria for update and remove operations. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. :Returns: - A :class:`BulkWriteOperation` instance, used to add update and remove operations to this bulk operation. .. versionchanged:: 3.4 Added the `collation` option.'
def find(self, selector, collation=None):
validate_is_mapping('selector', selector) return BulkWriteOperation(selector, self.__bulk, collation)
'Insert a single document. :Parameters: - `document` (dict): the document to insert .. seealso:: :ref:`writes-and-ids`'
def insert(self, document):
self.__bulk.add_insert(document)
'Execute all provided operations. :Parameters: - write_concern (optional): the write concern for this bulk execution.'
def execute(self, write_concern=None):
if (write_concern is not None): validate_is_mapping('write_concern', write_concern) return self.__bulk.execute(write_concern)
'The protocol version chosen when constructing the context. This attribute is read-only.'
@property def protocol(self):
return self._protocol
'Whether to try to verify other peers\' certificates and how to behave if verification fails. This attribute must be one of ssl.CERT_NONE, ssl.CERT_OPTIONAL or ssl.CERT_REQUIRED.'
def __get_verify_mode(self):
return self._verify_mode
'Setter for verify_mode.'
def __set_verify_mode(self, value):
self._verify_mode = value
'Load a private key and the corresponding certificate. The certfile string must be the path to a single file in PEM format containing the certificate as well as any number of CA certificates needed to establish the certificate\'s authenticity. The keyfile string, if present, must point to a file containing the private key. Otherwise the private key will be taken from certfile as well.'
def load_cert_chain(self, certfile, keyfile=None):
self._certfile = certfile self._keyfile = keyfile
'Load a set of "certification authority"(CA) certificates used to validate other peers\' certificates when `~verify_mode` is other than ssl.CERT_NONE.'
def load_verify_locations(self, cafile=None, dummy=None):
self._cafile = cafile
'Wrap an existing Python socket sock and return an ssl.SSLSocket object.'
def wrap_socket(self, sock, server_side=False, do_handshake_on_connect=True, suppress_ragged_eofs=True, dummy=None):
return ssl.wrap_socket(sock, keyfile=self._keyfile, certfile=self._certfile, server_side=server_side, cert_reqs=self._verify_mode, ssl_version=self._protocol, ca_certs=self._cafile, do_handshake_on_connect=do_handshake_on_connect, suppress_ragged_eofs=suppress_ragged_eofs)
'Return this instance\'s socket to the connection pool.'
def close(self):
if (not self.__closed): self.__closed = True self.pool.return_socket(self.sock) (self.sock, self.pool) = (None, None)
'Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors'
def __init__(self, collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None):
self.__id = None self.__exhaust = False self.__exhaust_mgr = None spec = filter if (spec is None): spec = {} validate_is_mapping('filter', spec) if (not isinstance(skip, int)): raise TypeError('skip must be an instance of int') if (not isinstance(limit, int)): raise TypeError('limit must be an instance of int') validate_boolean('no_cursor_timeout', no_cursor_timeout) if (cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST)): raise ValueError('not a valid value for cursor_type') validate_boolean('allow_partial_results', allow_partial_results) validate_boolean('oplog_replay', oplog_replay) if (modifiers is not None): warnings.warn("the 'modifiers' parameter is deprecated", DeprecationWarning, stacklevel=2) validate_is_mapping('modifiers', modifiers) if (not isinstance(batch_size, integer_types)): raise TypeError('batch_size must be an integer') if (batch_size < 0): raise ValueError('batch_size must be >= 0') if (projection is not None): if (not projection): projection = {'_id': 1} projection = helpers._fields_list_to_dict(projection, 'projection') self.__collection = collection self.__spec = spec self.__projection = projection self.__skip = skip self.__limit = limit self.__batch_size = batch_size self.__modifiers = ((modifiers and modifiers.copy()) or {}) self.__ordering = ((sort and helpers._index_document(sort)) or None) self.__max_scan = max_scan self.__explain = False self.__comment = comment self.__max_time_ms = max_time_ms self.__max_await_time_ms = None self.__max = max self.__min = min self.__manipulate = manipulate self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id self.__snapshot = snapshot self.__set_hint(hint) if (cursor_type == CursorType.EXHAUST): if self.__collection.database.client.is_mongos: raise InvalidOperation('Exhaust cursors are not supported by mongos') if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True self.__empty = False self.__data = deque() self.__address = None self.__retrieved = 0 self.__killed = False self.__codec_options = collection.codec_options self.__read_preference = collection.read_preference self.__read_concern = collection.read_concern self.__query_flags = cursor_type if (self.__read_preference != ReadPreference.PRIMARY): self.__query_flags |= _QUERY_OPTIONS['slave_okay'] if no_cursor_timeout: self.__query_flags |= _QUERY_OPTIONS['no_timeout'] if allow_partial_results: self.__query_flags |= _QUERY_OPTIONS['partial'] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS['oplog_replay']
'The :class:`~pymongo.collection.Collection` that this :class:`Cursor` is iterating.'
@property def collection(self):
return self.__collection
'The number of documents retrieved so far.'
@property def retrieved(self):
return self.__retrieved
'Rewind this cursor to its unevaluated state. Reset this cursor if it has been partially or completely evaluated. Any options that are present on the cursor will remain in effect. Future iterating performed on this cursor will cause new queries to be sent to the server, even if the resultant data has already been retrieved by this cursor.'
def rewind(self):
self.__data = deque() self.__id = None self.__address = None self.__retrieved = 0 self.__killed = False return self
'Get a clone of this cursor. Returns a new Cursor instance with options matching those that have been set on the current instance. The clone will be completely unevaluated, even if the current instance has been partially or completely evaluated.'
def clone(self):
return self._clone(True)
'Internal clone helper.'
def _clone(self, deepcopy=True):
clone = self._clone_base() values_to_clone = ('spec', 'projection', 'skip', 'limit', 'max_time_ms', 'max_await_time_ms', 'comment', 'max', 'min', 'ordering', 'explain', 'hint', 'batch_size', 'max_scan', 'manipulate', 'query_flags', 'modifiers', 'collation') data = dict(((k, v) for (k, v) in iteritems(self.__dict__) if (k.startswith('_Cursor__') and (k[9:] in values_to_clone)))) if deepcopy: data = self._deepcopy(data) clone.__dict__.update(data) return clone
'Creates an empty Cursor object for information to be copied into.'
def _clone_base(self):
return Cursor(self.__collection)
'Closes this cursor.'
def __die(self, synchronous=False):
if (self.__id and (not self.__killed)): if (self.__exhaust and self.__exhaust_mgr): self.__exhaust_mgr.sock.close() else: address = _CursorAddress(self.__address, self.__collection.full_name) if synchronous: self.__collection.database.client._close_cursor_now(self.__id, address) else: self.__collection.database.client.close_cursor(self.__id, address) if (self.__exhaust and self.__exhaust_mgr): self.__exhaust_mgr.close() self.__killed = True
'Explicitly close / kill this cursor. Required for PyPy, Jython and other Python implementations that don\'t use reference counting garbage collection.'
def close(self):
self.__die(True)
'Get the spec to use for a query.'
def __query_spec(self):
operators = self.__modifiers.copy() if self.__ordering: operators['$orderby'] = self.__ordering if self.__explain: operators['$explain'] = True if self.__hint: operators['$hint'] = self.__hint if self.__comment: operators['$comment'] = self.__comment if self.__max_scan: operators['$maxScan'] = self.__max_scan if (self.__max_time_ms is not None): operators['$maxTimeMS'] = self.__max_time_ms if self.__max: operators['$max'] = self.__max if self.__min: operators['$min'] = self.__min if self.__return_key: operators['$returnKey'] = self.__return_key if self.__show_record_id: operators['$showDiskLoc'] = self.__show_record_id if self.__snapshot: operators['$snapshot'] = self.__snapshot if operators: spec = self.__spec.copy() if ('$query' not in spec): spec = SON([('$query', spec)]) if (not isinstance(spec, SON)): spec = SON(spec) spec.update(operators) return spec elif (('query' in self.__spec) and ((len(self.__spec) == 1) or (next(iter(self.__spec)) == 'query'))): return SON({'$query': self.__spec}) return self.__spec
'Check if it is okay to chain more options onto this cursor.'
def __check_okay_to_chain(self):
if (self.__retrieved or (self.__id is not None)): raise InvalidOperation('cannot set options after executing query')
'Set arbitrary query flags using a bitmask. To set the tailable flag: cursor.add_option(2)'
def add_option(self, mask):
if (not isinstance(mask, int)): raise TypeError('mask must be an int') self.__check_okay_to_chain() if (mask & _QUERY_OPTIONS['exhaust']): if self.__limit: raise InvalidOperation("Can't use limit and exhaust together.") if self.__collection.database.client.is_mongos: raise InvalidOperation('Exhaust cursors are not supported by mongos') self.__exhaust = True self.__query_flags |= mask return self
'Unset arbitrary query flags using a bitmask. To unset the tailable flag: cursor.remove_option(2)'
def remove_option(self, mask):
if (not isinstance(mask, int)): raise TypeError('mask must be an int') self.__check_okay_to_chain() if (mask & _QUERY_OPTIONS['exhaust']): self.__exhaust = False self.__query_flags &= (~ mask) return self
'Limits the number of results to be returned by this cursor. Raises :exc:`TypeError` if `limit` is not an integer. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. The last `limit` applied to this cursor takes precedence. A limit of ``0`` is equivalent to no limit. :Parameters: - `limit`: the number of results to return .. mongodoc:: limit'
def limit(self, limit):
if (not isinstance(limit, integer_types)): raise TypeError('limit must be an integer') if self.__exhaust: raise InvalidOperation("Can't use limit and exhaust together.") self.__check_okay_to_chain() self.__empty = False self.__limit = limit return self
'Limits the number of documents returned in one batch. Each batch requires a round trip to the server. It can be adjusted to optimize performance and limit data transfer. .. note:: batch_size can not override MongoDB\'s internal limits on the amount of data it will return to the client in a single batch (i.e if you set batch size to 1,000,000,000, MongoDB will currently only return 4-16MB of results per batch). Raises :exc:`TypeError` if `batch_size` is not an integer. Raises :exc:`ValueError` if `batch_size` is less than ``0``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. The last `batch_size` applied to this cursor takes precedence. :Parameters: - `batch_size`: The size of each batch of results requested.'
def batch_size(self, batch_size):
if (not isinstance(batch_size, integer_types)): raise TypeError('batch_size must be an integer') if (batch_size < 0): raise ValueError('batch_size must be >= 0') self.__check_okay_to_chain() self.__batch_size = batch_size return self
'Skips the first `skip` results of this cursor. Raises :exc:`TypeError` if `skip` is not an integer. Raises :exc:`ValueError` if `skip` is less than ``0``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. The last `skip` applied to this cursor takes precedence. :Parameters: - `skip`: the number of results to skip'
def skip(self, skip):
if (not isinstance(skip, integer_types)): raise TypeError('skip must be an integer') if (skip < 0): raise ValueError('skip must be >= 0') self.__check_okay_to_chain() self.__skip = skip return self
'Specifies a time limit for a query operation. If the specified time is exceeded, the operation will be aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` is ``None`` no limit is applied. Raises :exc:`TypeError` if `max_time_ms` is not an integer or ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. :Parameters: - `max_time_ms`: the time limit after which the operation is aborted'
def max_time_ms(self, max_time_ms):
if ((not isinstance(max_time_ms, integer_types)) and (max_time_ms is not None)): raise TypeError('max_time_ms must be an integer or None') self.__check_okay_to_chain() self.__max_time_ms = max_time_ms return self
'Specifies a time limit for a getMore operation on a :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other types of cursor max_await_time_ms is ignored. Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. .. note:: `max_await_time_ms` requires server version **>= 3.2** :Parameters: - `max_await_time_ms`: the time limit after which the operation is aborted .. versionadded:: 3.2'
def max_await_time_ms(self, max_await_time_ms):
if ((not isinstance(max_await_time_ms, integer_types)) and (max_await_time_ms is not None)): raise TypeError('max_await_time_ms must be an integer or None') self.__check_okay_to_chain() if (self.__query_flags & CursorType.TAILABLE_AWAIT): self.__max_await_time_ms = max_await_time_ms return self
'Get a single document or a slice of documents from this cursor. Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. To get a single document use an integral index, e.g.:: >>> db.test.find()[50] An :class:`IndexError` will be raised if the index is negative or greater than the amount of documents in this cursor. Any limit previously applied to this cursor will be ignored. To get a slice of documents use a slice index, e.g.:: >>> db.test.find()[20:25] This will return this cursor with a limit of ``5`` and skip of ``20`` applied. Using a slice index will override any prior limits or skips applied to this cursor (including those applied through previous calls to this method). Raises :class:`IndexError` when the slice has a step, a negative start value, or a stop value less than or equal to the start value. :Parameters: - `index`: An integer or slice index to be applied to this cursor'
def __getitem__(self, index):
self.__check_okay_to_chain() self.__empty = False if isinstance(index, slice): if (index.step is not None): raise IndexError('Cursor instances do not support slice steps') skip = 0 if (index.start is not None): if (index.start < 0): raise IndexError('Cursor instances do not support negative indices') skip = index.start if (index.stop is not None): limit = (index.stop - skip) if (limit < 0): raise IndexError(('stop index must be greater than start index for slice %r' % index)) if (limit == 0): self.__empty = True else: limit = 0 self.__skip = skip self.__limit = limit return self if isinstance(index, integer_types): if (index < 0): raise IndexError('Cursor instances do not support negative indices') clone = self.clone() clone.skip((index + self.__skip)) clone.limit((-1)) for doc in clone: return doc raise IndexError('no such item for Cursor instance') raise TypeError(('index %r cannot be applied to Cursor instances' % index))
'Limit the number of documents to scan when performing the query. Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. Only the last :meth:`max_scan` applied to this cursor has any effect. :Parameters: - `max_scan`: the maximum number of documents to scan'
def max_scan(self, max_scan):
self.__check_okay_to_chain() self.__max_scan = max_scan return self
'Adds `max` operator that specifies upper bound for specific index. :Parameters: - `spec`: a list of field, limit pairs specifying the exclusive upper bound for all keys of a specific index in order. .. versionadded:: 2.7'
def max(self, spec):
if (not isinstance(spec, (list, tuple))): raise TypeError('spec must be an instance of list or tuple') self.__check_okay_to_chain() self.__max = SON(spec) return self
'Adds `min` operator that specifies lower bound for specific index. :Parameters: - `spec`: a list of field, limit pairs specifying the inclusive lower bound for all keys of a specific index in order. .. versionadded:: 2.7'
def min(self, spec):
if (not isinstance(spec, (list, tuple))): raise TypeError('spec must be an instance of list or tuple') self.__check_okay_to_chain() self.__min = SON(spec) return self