desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Create an InsertOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `document`: The document to insert. If the document is missing an
_id field one will be added.'
| def __init__(self, document):
| self._doc = document
|
'Add this operation to the _Bulk instance `bulkobj`.'
| def _add_to_bulk(self, bulkobj):
| bulkobj.add_insert(self._doc)
|
'Create a DeleteOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
.. versionchanged:: 3.5
Added the `collation` option.'
| def __init__(self, filter, collation=None):
| if (filter is not None):
validate_is_mapping('filter', filter)
self._filter = filter
self._collation = collation
|
'Add this operation to the _Bulk instance `bulkobj`.'
| def _add_to_bulk(self, bulkobj):
| bulkobj.add_delete(self._filter, 1, collation=self._collation)
|
'Create a DeleteMany instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the documents to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
.. versionchanged:: 3.5
Added the `collation` option.'
| def __init__(self, filter, collation=None):
| if (filter is not None):
validate_is_mapping('filter', filter)
self._filter = filter
self._collation = collation
|
'Add this operation to the _Bulk instance `bulkobj`.'
| def _add_to_bulk(self, bulkobj):
| bulkobj.add_delete(self._filter, 0, collation=self._collation)
|
'Create a ReplaceOne instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
.. versionchanged:: 3.5
Added the `collation` option.'
| def __init__(self, filter, replacement, upsert=False, collation=None):
| super(ReplaceOne, self).__init__(filter, replacement, upsert, collation)
|
'Add this operation to the _Bulk instance `bulkobj`.'
| def _add_to_bulk(self, bulkobj):
| bulkobj.add_replace(self._filter, self._doc, self._upsert, collation=self._collation)
|
'Represents an update_one operation.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
.. versionchanged:: 3.5
Added the `collation` option.'
| def __init__(self, filter, update, upsert=False, collation=None):
| super(UpdateOne, self).__init__(filter, update, upsert, collation)
|
'Add this operation to the _Bulk instance `bulkobj`.'
| def _add_to_bulk(self, bulkobj):
| bulkobj.add_update(self._filter, self._doc, False, self._upsert, collation=self._collation)
|
'Create an UpdateMany instance.
For use with :meth:`~pymongo.collection.Collection.bulk_write`.
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only
supported on MongoDB 3.4 and above.
.. versionchanged:: 3.5
Added the `collation` option.'
| def __init__(self, filter, update, upsert=False, collation=None):
| super(UpdateMany, self).__init__(filter, update, upsert, collation)
|
'Add this operation to the _Bulk instance `bulkobj`.'
| def _add_to_bulk(self, bulkobj):
| bulkobj.add_update(self._filter, self._doc, True, self._upsert, collation=self._collation)
|
'Create an Index instance.
For use with :meth:`~pymongo.collection.Collection.create_indexes`.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index.
- `collation`: An instance of :class:`~pymongo.collation.Collation`
that specifies the collation to use in MongoDB >= 3.4.
See the MongoDB documentation for a full list of supported options by
server version.
.. note:: `partialFilterExpression` requires server version **>= 3.2**
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.'
| def __init__(self, keys, **kwargs):
| keys = _index_list(keys)
if ('name' not in kwargs):
kwargs['name'] = _gen_index_name(keys)
kwargs['key'] = _index_document(keys)
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__document = kwargs
if (collation is not None):
self.__document['collation'] = collation
|
'An index document suitable for passing to the createIndexes
command.'
| @property
def document(self):
| return self.__document
|
'The name of this read preference.'
| @property
def name(self):
| return self.__class__.__name__
|
'The mongos mode of this read preference.'
| @property
def mongos_mode(self):
| return self.__mongos_mode
|
'Read preference as a document.'
| @property
def document(self):
| doc = {'mode': self.__mongos_mode}
if (self.__tag_sets not in (None, [{}])):
doc['tags'] = self.__tag_sets
if (self.__max_staleness != (-1)):
doc['maxStalenessSeconds'] = self.__max_staleness
return doc
|
'The mode of this read preference instance.'
| @property
def mode(self):
| return self.__mode
|
'Set ``tag_sets`` to a list of dictionaries like [{\'dc\': \'ny\'}] to
read only from members whose ``dc`` tag has the value ``"ny"``.
To specify a priority-order for tag sets, provide a list of
tag sets: ``[{\'dc\': \'ny\'}, {\'dc\': \'la\'}, {}]``. A final, empty tag
set, ``{}``, means "read from any member that matches the mode,
ignoring tags." MongoReplicaSetClient tries each set of tags in turn
until it finds a set of tags with at least one matching member.
.. seealso:: `Data-Center Awareness
<http://www.mongodb.org/display/DOCS/Data+Center+Awareness>`_'
| @property
def tag_sets(self):
| return (list(self.__tag_sets) if self.__tag_sets else [{}])
|
'The maximum estimated length of time (in seconds) a replica set
secondary can fall behind the primary in replication before it will
no longer be selected for operations, or -1 for no maximum.'
| @property
def max_staleness(self):
| return self.__max_staleness
|
'The wire protocol version the server must support.
Some read preferences impose version requirements on all servers (e.g.
maxStalenessSeconds requires MongoDB 3.4 / maxWireVersion 5).
All servers\' maxWireVersion must be at least this read preference\'s
`min_wire_version`, or the driver raises
:exc:`~pymongo.errors.ConfigurationError`.'
| @property
def min_wire_version(self):
| return (0 if (self.__max_staleness == (-1)) else 5)
|
'Return value of object for pickling.
Needed explicitly because __slots__() defined.'
| def __getstate__(self):
| return {'mode': self.__mode, 'tag_sets': self.__tag_sets, 'max_staleness': self.__max_staleness}
|
'Restore from pickling.'
| def __setstate__(self, value):
| self.__mode = value['mode']
self.__mongos_mode = _MONGOS_MODES[self.__mode]
self.__tag_sets = _validate_tag_sets(value['tag_sets'])
self.__max_staleness = _validate_max_staleness(value['max_staleness'])
|
'Apply this read preference to a Selection.'
| def __call__(self, selection):
| return selection.primary_selection
|
'Apply this read preference to Selection.'
| def __call__(self, selection):
| if selection.primary:
return selection.primary_selection
else:
return secondary_with_tags_server_selector(self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection))
|
'Apply this read preference to Selection.'
| def __call__(self, selection):
| return secondary_with_tags_server_selector(self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection))
|
'Apply this read preference to Selection.'
| def __call__(self, selection):
| secondaries = secondary_with_tags_server_selector(self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection))
if secondaries:
return secondaries
else:
return selection.primary_selection
|
'Apply this read preference to Selection.'
| def __call__(self, selection):
| return member_with_tags_server_selector(self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection))
|
'Get the calculated average, or None if no samples yet.'
| def get(self):
| return self.average
|
'Will this SON manipulator make a copy of the incoming document?
Derived classes that do need to make a copy should override this
method, returning True instead of False. All non-copying manipulators
will be applied first (so that the user\'s document will be updated
appropriately), followed by copying manipulators.'
| def will_copy(self):
| return False
|
'Manipulate an incoming SON object.
:Parameters:
- `son`: the SON object to be inserted into the database
- `collection`: the collection the object is being inserted into'
| def transform_incoming(self, son, collection):
| if self.will_copy():
return SON(son)
return son
|
'Manipulate an outgoing SON object.
:Parameters:
- `son`: the SON object being retrieved from the database
- `collection`: the collection this object was stored in'
| def transform_outgoing(self, son, collection):
| if self.will_copy():
return SON(son)
return son
|
'Add an _id field if it is missing.'
| def transform_incoming(self, son, collection):
| if (not ('_id' in son)):
son['_id'] = ObjectId()
return son
|
'We need to copy to be sure that we are dealing with SON, not a dict.'
| def will_copy(self):
| return True
|
'Move _id to the front if it\'s there.'
| def transform_incoming(self, son, collection):
| if (not ('_id' in son)):
return son
transformed = SON({'_id': son['_id']})
transformed.update(son)
return transformed
|
'Add the _ns field to the incoming object'
| def transform_incoming(self, son, collection):
| son['_ns'] = collection.name
return son
|
'We need to copy so the user\'s document doesn\'t get transformed refs.'
| def will_copy(self):
| return True
|
'Replace embedded documents with DBRefs.'
| def transform_incoming(self, son, collection):
| def transform_value(value):
if isinstance(value, collections.MutableMapping):
if (('_id' in value) and ('_ns' in value)):
return DBRef(value['_ns'], transform_value(value['_id']))
else:
return transform_dict(SON(value))
elif isinstance(value, list):
return [transform_value(v) for v in value]
return value
def transform_dict(object):
for (key, value) in object.items():
object[key] = transform_value(value)
return object
return transform_dict(SON(son))
|
'Replace DBRefs with embedded documents.'
| def transform_outgoing(self, son, collection):
| def transform_value(value):
if isinstance(value, DBRef):
return self.database.dereference(value)
elif isinstance(value, list):
return [transform_value(v) for v in value]
elif isinstance(value, collections.MutableMapping):
return transform_dict(SON(value))
return value
def transform_dict(object):
for (key, value) in object.items():
object[key] = transform_value(value)
return object
return transform_dict(SON(son))
|
'Instantiate the manager.
:Parameters:
- `client`: a MongoClient'
| def __init__(self, client):
| warnings.warn('Cursor managers are deprecated.', DeprecationWarning, stacklevel=2)
self.__client = weakref.ref(client)
|
'Kill a cursor.
Raises TypeError if cursor_id is not an instance of (int, long).
:Parameters:
- `cursor_id`: cursor id to close
- `address`: the cursor\'s server\'s (host, port) pair
.. versionchanged:: 3.0
Now requires an `address` argument.'
| def close(self, cursor_id, address):
| if (not isinstance(cursor_id, integer_types)):
raise TypeError('cursor_id must be an integer')
self.__client().kill_cursors([cursor_id], address)
|
'Abstract method to handle a `CommandStartedEvent`.
:Parameters:
- `event`: An instance of :class:`CommandStartedEvent`.'
| def started(self, event):
| raise NotImplementedError
|
'Abstract method to handle a `CommandSucceededEvent`.
:Parameters:
- `event`: An instance of :class:`CommandSucceededEvent`.'
| def succeeded(self, event):
| raise NotImplementedError
|
'Abstract method to handle a `CommandFailedEvent`.
:Parameters:
- `event`: An instance of :class:`CommandFailedEvent`.'
| def failed(self, event):
| raise NotImplementedError
|
'Abstract method to handle a `ServerHeartbeatStartedEvent`.
:Parameters:
- `event`: An instance of :class:`ServerHeartbeatStartedEvent`.'
| def started(self, event):
| raise NotImplementedError
|
'Abstract method to handle a `ServerHeartbeatSucceededEvent`.
:Parameters:
- `event`: An instance of :class:`ServerHeartbeatSucceededEvent`.'
| def succeeded(self, event):
| raise NotImplementedError
|
'Abstract method to handle a `ServerHeartbeatFailedEvent`.
:Parameters:
- `event`: An instance of :class:`ServerHeartbeatFailedEvent`.'
| def failed(self, event):
| raise NotImplementedError
|
'Abstract method to handle a `TopologyOpenedEvent`.
:Parameters:
- `event`: An instance of :class:`TopologyOpenedEvent`.'
| def opened(self, event):
| raise NotImplementedError
|
'Abstract method to handle a `TopologyDescriptionChangedEvent`.
:Parameters:
- `event`: An instance of :class:`TopologyDescriptionChangedEvent`.'
| def description_changed(self, event):
| raise NotImplementedError
|
'Abstract method to handle a `TopologyClosedEvent`.
:Parameters:
- `event`: An instance of :class:`TopologyClosedEvent`.'
| def closed(self, event):
| raise NotImplementedError
|
'Abstract method to handle a `ServerOpeningEvent`.
:Parameters:
- `event`: An instance of :class:`ServerOpeningEvent`.'
| def opened(self, event):
| raise NotImplementedError
|
'Abstract method to handle a `ServerDescriptionChangedEvent`.
:Parameters:
- `event`: An instance of :class:`ServerDescriptionChangedEvent`.'
| def description_changed(self, event):
| raise NotImplementedError
|
'Abstract method to handle a `ServerClosedEvent`.
:Parameters:
- `event`: An instance of :class:`ServerClosedEvent`.'
| def closed(self, event):
| raise NotImplementedError
|
'The command name.'
| @property
def command_name(self):
| return self.__cmd_name
|
'The request id for this operation.'
| @property
def request_id(self):
| return self.__rqst_id
|
'The address (host, port) of the server this command was sent to.'
| @property
def connection_id(self):
| return self.__conn_id
|
'An id for this series of events or None.'
| @property
def operation_id(self):
| return self.__op_id
|
'The command document.'
| @property
def command(self):
| return self.__cmd
|
'The name of the database this command was run against.'
| @property
def database_name(self):
| return self.__db
|
'The duration of this operation in microseconds.'
| @property
def duration_micros(self):
| return self.__duration_micros
|
'The server failure document for this operation.'
| @property
def reply(self):
| return self.__reply
|
'The duration of this operation in microseconds.'
| @property
def duration_micros(self):
| return self.__duration_micros
|
'The server failure document for this operation.'
| @property
def failure(self):
| return self.__failure
|
'The address (host/port pair) of the server'
| @property
def server_address(self):
| return self.__server_address
|
'A unique identifier for the topology this server is a part of.'
| @property
def topology_id(self):
| return self.__topology_id
|
'The previous
:class:`~pymongo.server_description.ServerDescription`.'
| @property
def previous_description(self):
| return self.__previous_description
|
'The new
:class:`~pymongo.server_description.ServerDescription`.'
| @property
def new_description(self):
| return self.__new_description
|
'A unique identifier for the topology this server is a part of.'
| @property
def topology_id(self):
| return self.__topology_id
|
'The previous
:class:`~pymongo.topology_description.TopologyDescription`.'
| @property
def previous_description(self):
| return self.__previous_description
|
'The new
:class:`~pymongo.topology_description.TopologyDescription`.'
| @property
def new_description(self):
| return self.__new_description
|
'The address (host, port) of the server this heartbeat was sent
to.'
| @property
def connection_id(self):
| return self.__connection_id
|
'The duration of this heartbeat in microseconds.'
| @property
def duration(self):
| return self.__duration
|
'An instance of :class:`~pymongo.ismaster.IsMaster`.'
| @property
def reply(self):
| return self.__reply
|
'The duration of this heartbeat in microseconds.'
| @property
def duration(self):
| return self.__duration
|
'A subclass of :exc:`Exception`.'
| @property
def reply(self):
| return self.__reply
|
'Are any CommandListener instances registered?'
| @property
def enabled_for_commands(self):
| return self.__enabled_for_commands
|
'Are any ServerListener instances registered?'
| @property
def enabled_for_server(self):
| return self.__enabled_for_server
|
'Are any ServerHeartbeatListener instances registered?'
| @property
def enabled_for_server_heartbeat(self):
| return self.__enabled_for_server_heartbeat
|
'Are any TopologyListener instances registered?'
| @property
def enabled_for_topology(self):
| return self.__enabled_for_topology
|
'List of registered event listeners.'
| def event_listeners(self):
| return (self.__command_listeners[:], self.__server_heartbeat_listeners[:], self.__server_listeners[:], self.__topology_listeners[:])
|
'Publish a CommandStartedEvent to all command listeners.
:Parameters:
- `command`: The command document.
- `database_name`: The name of the database this command was run
against.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this
command was sent to.
- `op_id`: The (optional) operation id for this operation.'
| def publish_command_start(self, command, database_name, request_id, connection_id, op_id=None):
| if (op_id is None):
op_id = request_id
event = CommandStartedEvent(command, database_name, request_id, connection_id, op_id)
for subscriber in self.__command_listeners:
try:
subscriber.started(event)
except Exception:
_handle_exception()
|
'Publish a CommandSucceededEvent to all command listeners.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `reply`: The server reply document.
- `command_name`: The command name.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this
command was sent to.
- `op_id`: The (optional) operation id for this operation.'
| def publish_command_success(self, duration, reply, command_name, request_id, connection_id, op_id=None):
| if (op_id is None):
op_id = request_id
event = CommandSucceededEvent(duration, reply, command_name, request_id, connection_id, op_id)
for subscriber in self.__command_listeners:
try:
subscriber.succeeded(event)
except Exception:
_handle_exception()
|
'Publish a CommandFailedEvent to all command listeners.
:Parameters:
- `duration`: The command duration as a datetime.timedelta.
- `failure`: The server reply document or failure description
document.
- `command_name`: The command name.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this
command was sent to.
- `op_id`: The (optional) operation id for this operation.'
| def publish_command_failure(self, duration, failure, command_name, request_id, connection_id, op_id=None):
| if (op_id is None):
op_id = request_id
event = CommandFailedEvent(duration, failure, command_name, request_id, connection_id, op_id)
for subscriber in self.__command_listeners:
try:
subscriber.failed(event)
except Exception:
_handle_exception()
|
'Publish a ServerHeartbeatStartedEvent to all server heartbeat
listeners.
:Parameters:
- `connection_id`: The address (host/port pair) of the connection.'
| def publish_server_heartbeat_started(self, connection_id):
| event = ServerHeartbeatStartedEvent(connection_id)
for subscriber in self.__server_heartbeat_listeners:
try:
subscriber.started(event)
except Exception:
_handle_exception()
|
'Publish a ServerHeartbeatSucceededEvent to all server heartbeat
listeners.
:Parameters:
- `connection_id`: The address (host/port pair) of the connection.
- `duration`: The execution time of the event in the highest possible
resolution for the platform.
- `reply`: The command reply.'
| def publish_server_heartbeat_succeeded(self, connection_id, duration, reply):
| event = ServerHeartbeatSucceededEvent(duration, reply, connection_id)
for subscriber in self.__server_heartbeat_listeners:
try:
subscriber.succeeded(event)
except Exception:
_handle_exception()
|
'Publish a ServerHeartbeatFailedEvent to all server heartbeat
listeners.
:Parameters:
- `connection_id`: The address (host/port pair) of the connection.
- `duration`: The execution time of the event in the highest possible
resolution for the platform.
- `reply`: The command reply.'
| def publish_server_heartbeat_failed(self, connection_id, duration, reply):
| event = ServerHeartbeatFailedEvent(duration, reply, connection_id)
for subscriber in self.__server_heartbeat_listeners:
try:
subscriber.failed(event)
except Exception:
_handle_exception()
|
'Publish a ServerOpeningEvent to all server listeners.
:Parameters:
- `server_address`: The address (host/port pair) of the server.
- `topology_id`: A unique identifier for the topology this server
is a part of.'
| def publish_server_opened(self, server_address, topology_id):
| event = ServerOpeningEvent(server_address, topology_id)
for subscriber in self.__server_listeners:
try:
subscriber.opened(event)
except Exception:
_handle_exception()
|
'Publish a ServerClosedEvent to all server listeners.
:Parameters:
- `server_address`: The address (host/port pair) of the server.
- `topology_id`: A unique identifier for the topology this server
is a part of.'
| def publish_server_closed(self, server_address, topology_id):
| event = ServerClosedEvent(server_address, topology_id)
for subscriber in self.__server_listeners:
try:
subscriber.closed(event)
except Exception:
_handle_exception()
|
'Publish a ServerDescriptionChangedEvent to all server listeners.
:Parameters:
- `previous_description`: The previous server description.
- `server_address`: The address (host/port pair) of the server.
- `new_description`: The new server description.
- `topology_id`: A unique identifier for the topology this server
is a part of.'
| def publish_server_description_changed(self, previous_description, new_description, server_address, topology_id):
| event = ServerDescriptionChangedEvent(previous_description, new_description, server_address, topology_id)
for subscriber in self.__server_listeners:
try:
subscriber.description_changed(event)
except Exception:
_handle_exception()
|
'Publish a TopologyOpenedEvent to all topology listeners.
:Parameters:
- `topology_id`: A unique identifier for the topology this server
is a part of.'
| def publish_topology_opened(self, topology_id):
| event = TopologyOpenedEvent(topology_id)
for subscriber in self.__topology_listeners:
try:
subscriber.opened(event)
except Exception:
_handle_exception()
|
'Publish a TopologyClosedEvent to all topology listeners.
:Parameters:
- `topology_id`: A unique identifier for the topology this server
is a part of.'
| def publish_topology_closed(self, topology_id):
| event = TopologyClosedEvent(topology_id)
for subscriber in self.__topology_listeners:
try:
subscriber.closed(event)
except Exception:
_handle_exception()
|
'Publish a TopologyDescriptionChangedEvent to all topology listeners.
:Parameters:
- `previous_description`: The previous topology description.
- `new_description`: The new topology description.
- `topology_id`: A unique identifier for the topology this server
is a part of.'
| def publish_topology_description_changed(self, previous_description, new_description, topology_id):
| event = TopologyDescriptionChangedEvent(previous_description, new_description, topology_id)
for subscriber in self.__topology_listeners:
try:
subscriber.description_changed(event)
except Exception:
_handle_exception()
|
'Represent one MongoDB server.'
| def __init__(self, server_description, pool, monitor, topology_id=None, listeners=None, events=None):
| self._description = server_description
self._pool = pool
self._monitor = monitor
self._topology_id = topology_id
self._publish = ((listeners is not None) and listeners.enabled_for_server)
self._listener = listeners
self._events = None
if self._publish:
self._events = events()
|
'Start monitoring, or restart after a fork.
Multiple calls have no effect.'
| def open(self):
| self._monitor.open()
|
'Clear the connection pool.'
| def reset(self):
| self.pool.reset()
|
'Clear the connection pool and stop the monitor.
Reconnect with open().'
| def close(self):
| if self._publish:
self._events.put((self._listener.publish_server_closed, (self._description.address, self._topology_id)))
self._monitor.close()
self._pool.reset()
|
'Check the server\'s state soon.'
| def request_check(self):
| self._monitor.request_check()
|
'Send an unacknowledged message to MongoDB.
Can raise ConnectionFailure.
:Parameters:
- `message`: (request_id, data).
- `all_credentials`: dict, maps auth source to MongoCredential.'
| def send_message(self, message, all_credentials):
| (_, data, max_doc_size) = self._split_message(message)
with self.get_socket(all_credentials) as sock_info:
sock_info.send_message(data, max_doc_size)
|
'Send a message to MongoDB and return a Response object.
Can raise ConnectionFailure.
:Parameters:
- `operation`: A _Query or _GetMore object.
- `set_slave_okay`: Pass to operation.get_message.
- `all_credentials`: dict, maps auth source to MongoCredential.
- `listeners`: Instance of _EventListeners or None.
- `exhaust` (optional): If True, the socket used stays checked out.
It is returned along with its Pool in the Response.'
| def send_message_with_response(self, operation, set_slave_okay, all_credentials, listeners, exhaust=False):
| with self.get_socket(all_credentials, exhaust) as sock_info:
duration = None
publish = listeners.enabled_for_commands
if publish:
start = datetime.now()
use_find_cmd = False
if (sock_info.max_wire_version >= 4):
if (not exhaust):
use_find_cmd = True
elif (isinstance(operation, _Query) and (not operation.read_concern.ok_for_legacy)):
raise ConfigurationError(('read concern level of %s is not valid with a max wire version of %d.' % (operation.read_concern.level, sock_info.max_wire_version)))
if (isinstance(operation, _Query) and (sock_info.max_wire_version < 5) and (operation.collation is not None)):
raise ConfigurationError(('Specifying a collation is unsupported with a max wire version of %d.' % (sock_info.max_wire_version,)))
message = operation.get_message(set_slave_okay, sock_info.is_mongos, use_find_cmd)
(request_id, data, max_doc_size) = self._split_message(message)
if publish:
encoding_duration = (datetime.now() - start)
(cmd, dbn) = operation.as_command()
listeners.publish_command_start(cmd, dbn, request_id, sock_info.address)
start = datetime.now()
try:
sock_info.send_message(data, max_doc_size)
response_data = sock_info.receive_message(1, request_id)
except Exception as exc:
if publish:
duration = ((datetime.now() - start) + encoding_duration)
failure = _convert_exception(exc)
listeners.publish_command_failure(duration, failure, next(iter(cmd)), request_id, sock_info.address)
raise
if publish:
duration = ((datetime.now() - start) + encoding_duration)
if exhaust:
return ExhaustResponse(data=response_data, address=self._description.address, socket_info=sock_info, pool=self._pool, duration=duration, request_id=request_id, from_command=use_find_cmd)
else:
return Response(data=response_data, address=self._description.address, duration=duration, request_id=request_id, from_command=use_find_cmd)
|
'Return request_id, data, max_doc_size.
:Parameters:
- `message`: (request_id, data, max_doc_size) or (request_id, data)'
| def _split_message(self, message):
| if (len(message) == 3):
return message
else:
(request_id, data) = message
return (request_id, data, 0)
|
Subsets and Splits