desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Uploads a user file to a GridFS bucket with a custom file id.
Reads the contents of the user file from `source` and uploads
it to the file `filename`. Source can be a string or file-like object.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
file_id = fs.upload_from_stream(
ObjectId(),
"test_file",
"data I want to store!",
chunk_size_bytes=4,
metadata={"contentType": "text/plain"})
Raises :exc:`~gridfs.errors.NoFile` if no such version of
that file exists.
Raises :exc:`~ValueError` if `filename` is not a string.
:Parameters:
- `file_id`: The id to use for this file. The id must not have
already been used for another file.
- `filename`: The name of the file to upload.
- `source`: The source stream of the content to be uploaded. Must be
a file-like object that implements :meth:`read` or a string.
- `chunk_size_bytes` (options): The number of bytes per chunk of this
file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`.
- `metadata` (optional): User data for the \'metadata\' field of the
files collection document. If not provided the metadata field will
be omitted from the files collection document.'
| def upload_from_stream_with_id(self, file_id, filename, source, chunk_size_bytes=None, metadata=None):
| with self.open_upload_stream_with_id(file_id, filename, chunk_size_bytes, metadata) as gin:
gin.write(source)
|
'Opens a Stream from which the application can read the contents of
the stored file specified by file_id.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# get _id of file to read.
file_id = fs.upload_from_stream("test_file", "data I want to store!")
grid_out = fs.open_download_stream(file_id)
contents = grid_out.read()
Returns an instance of :class:`~gridfs.grid_file.GridOut`.
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be downloaded.'
| def open_download_stream(self, file_id):
| gout = GridOut(self._collection, file_id)
gout._ensure_file()
return gout
|
'Downloads the contents of the stored file specified by file_id and
writes the contents to `destination`.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get _id of file to read
file_id = fs.upload_from_stream("test_file", "data I want to store!")
# Get file to write to
file = open(\'myfile\',\'wb+\')
fs.download_to_stream(file_id, file)
file.seek(0)
contents = file.read()
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be downloaded.
- `destination`: a file-like object implementing :meth:`write`.'
| def download_to_stream(self, file_id, destination):
| gout = self.open_download_stream(file_id)
for chunk in gout:
destination.write(chunk)
|
'Given an file_id, delete this stored file\'s files collection document
and associated chunks from a GridFS bucket.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get _id of file to delete
file_id = fs.upload_from_stream("test_file", "data I want to store!")
fs.delete(file_id)
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be deleted.'
| def delete(self, file_id):
| res = self._files.delete_one({'_id': file_id})
self._chunks.delete_many({'files_id': file_id})
if (not res.deleted_count):
raise NoFile(('no file could be deleted because none matched %s' % file_id))
|
'Find and return the files collection documents that match ``filter``
Returns a cursor that iterates across files matching
arbitrary queries on the files collection. Can be combined
with other modifiers for additional control.
For example::
for grid_data in fs.find({"filename": "lisa.txt"},
no_cursor_timeout=True):
data = grid_data.read()
would iterate through all versions of "lisa.txt" stored in GridFS.
Note that setting no_cursor_timeout to True may be important to
prevent the cursor from timing out during long multi-file processing
work.
As another example, the call::
most_recent_three = fs.find().sort("uploadDate", -1).limit(3)
would return a cursor to the three most recently uploaded files
in GridFS.
Follows a similar interface to
:meth:`~pymongo.collection.Collection.find`
in :class:`~pymongo.collection.Collection`.
:Parameters:
- `filter`: Search query.
- `batch_size` (optional): The number of documents to return per
batch.
- `limit` (optional): The maximum number of documents to return.
- `no_cursor_timeout` (optional): The server normally times out idle
cursors after an inactivity period (10 minutes) to prevent excess
memory use. Set this option to True prevent that.
- `skip` (optional): The number of documents to skip before
returning.
- `sort` (optional): The order by which to sort results. Defaults to
None.'
| def find(self, *args, **kwargs):
| return GridOutCursor(self._collection, *args, **kwargs)
|
'Opens a Stream from which the application can read the contents of
`filename` and optional `revision`.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
grid_out = fs.open_download_stream_by_name("test_file")
contents = grid_out.read()
Returns an instance of :class:`~gridfs.grid_file.GridOut`.
Raises :exc:`~gridfs.errors.NoFile` if no such version of
that file exists.
Raises :exc:`~ValueError` filename is not a string.
:Parameters:
- `filename`: The name of the file to read from.
- `revision` (optional): Which revision (documents with the same
filename and different uploadDate) of the file to retrieve.
Defaults to -1 (the most recent revision).
:Note: Revision numbers are defined as follows:
- 0 = the original stored file
- 1 = the first revision
- 2 = the second revision
- etc...
- -2 = the second most recent revision
- -1 = the most recent revision'
| def open_download_stream_by_name(self, filename, revision=(-1)):
| validate_string('filename', filename)
query = {'filename': filename}
cursor = self._files.find(query)
if (revision < 0):
skip = (abs(revision) - 1)
cursor.limit((-1)).skip(skip).sort('uploadDate', DESCENDING)
else:
cursor.limit((-1)).skip(revision).sort('uploadDate', ASCENDING)
try:
grid_file = next(cursor)
return GridOut(self._collection, file_document=grid_file)
except StopIteration:
raise NoFile(('no version %d for filename %r' % (revision, filename)))
|
'Write the contents of `filename` (with optional `revision`) to
`destination`.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get file to write to
file = open(\'myfile\',\'wb\')
fs.download_to_stream_by_name("test_file", file)
Raises :exc:`~gridfs.errors.NoFile` if no such version of
that file exists.
Raises :exc:`~ValueError` if `filename` is not a string.
:Parameters:
- `filename`: The name of the file to read from.
- `destination`: A file-like object that implements :meth:`write`.
- `revision` (optional): Which revision (documents with the same
filename and different uploadDate) of the file to retrieve.
Defaults to -1 (the most recent revision).
:Note: Revision numbers are defined as follows:
- 0 = the original stored file
- 1 = the first revision
- 2 = the second revision
- etc...
- -2 = the second most recent revision
- -1 = the most recent revision'
| def download_to_stream_by_name(self, filename, destination, revision=(-1)):
| gout = self.open_download_stream_by_name(filename, revision)
for chunk in gout:
destination.write(chunk)
|
'Renames the stored file with the specified file_id.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get _id of file to rename
file_id = fs.upload_from_stream("test_file", "data I want to store!")
fs.rename(file_id, "new_test_name")
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be renamed.
- `new_filename`: The new name of the file.'
| def rename(self, file_id, new_filename):
| result = self._files.update_one({'_id': file_id}, {'$set': {'filename': new_filename}})
if (not result.matched_count):
raise NoFile(('no files could be renamed %r because none matched file_id %i' % (new_filename, file_id)))
|
'Write a file to GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Raises :class:`TypeError` if `root_collection` is not an
instance of :class:`~pymongo.collection.Collection`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 255 kb)
- ``"encoding"``: encoding used for this file. In Python 2,
any :class:`unicode` that is written to the file will be
converted to a :class:`str`. In Python 3, any :class:`str`
that is written to the file will be converted to
:class:`bytes`.
:Parameters:
- `root_collection`: root collection to write to
- `**kwargs` (optional): file level options (see above)
.. versionchanged:: 3.0
`root_collection` must use an acknowledged
:attr:`~pymongo.collection.Collection.write_concern`'
| def __init__(self, root_collection, **kwargs):
| if (not isinstance(root_collection, Collection)):
raise TypeError('root_collection must be an instance of Collection')
if (not root_collection.write_concern.acknowledged):
raise ConfigurationError('root_collection must use acknowledged write_concern')
if ('content_type' in kwargs):
kwargs['contentType'] = kwargs.pop('content_type')
if ('chunk_size' in kwargs):
kwargs['chunkSize'] = kwargs.pop('chunk_size')
coll = root_collection.with_options(read_preference=ReadPreference.PRIMARY)
kwargs['md5'] = md5()
kwargs['_id'] = kwargs.get('_id', ObjectId())
kwargs['chunkSize'] = kwargs.get('chunkSize', DEFAULT_CHUNK_SIZE)
object.__setattr__(self, '_coll', coll)
object.__setattr__(self, '_chunks', coll.chunks)
object.__setattr__(self, '_file', kwargs)
object.__setattr__(self, '_buffer', StringIO())
object.__setattr__(self, '_position', 0)
object.__setattr__(self, '_chunk_number', 0)
object.__setattr__(self, '_closed', False)
object.__setattr__(self, '_ensured_index', False)
|
'Remove all chunks/files that may have been uploaded and close.'
| def abort(self):
| self._coll.chunks.delete_many({'files_id': self._file['_id']})
self._coll.files.delete_one({'_id': self._file['_id']})
object.__setattr__(self, '_closed', True)
|
'Is this file closed?'
| @property
def closed(self):
| return self._closed
|
'Flush `data` to a chunk.'
| def __flush_data(self, data):
| self.__ensure_indexes()
self._file['md5'].update(data)
if (not data):
return
assert (len(data) <= self.chunk_size)
chunk = {'files_id': self._file['_id'], 'n': self._chunk_number, 'data': Binary(data)}
try:
self._chunks.insert_one(chunk)
except DuplicateKeyError:
self._raise_file_exists(self._file['_id'])
self._chunk_number += 1
self._position += len(data)
|
'Flush the buffer contents out to a chunk.'
| def __flush_buffer(self):
| self.__flush_data(self._buffer.getvalue())
self._buffer.close()
self._buffer = StringIO()
|
'Flush the file to the database.'
| def __flush(self):
| try:
self.__flush_buffer()
self._file['md5'] = self._file['md5'].hexdigest()
self._file['length'] = self._position
self._file['uploadDate'] = datetime.datetime.utcnow()
return self._coll.files.insert_one(self._file)
except DuplicateKeyError:
self._raise_file_exists(self._id)
|
'Raise a FileExists exception for the given file_id.'
| def _raise_file_exists(self, file_id):
| raise FileExists(('file with _id %r already exists' % file_id))
|
'Flush the file and close it.
A closed file cannot be written any more. Calling
:meth:`close` more than once is allowed.'
| def close(self):
| if (not self._closed):
self.__flush()
object.__setattr__(self, '_closed', True)
|
'Write data to the file. There is no return value.
`data` can be either a string of bytes or a file-like object
(implementing :meth:`read`). If the file has an
:attr:`encoding` attribute, `data` can also be a
:class:`unicode` (:class:`str` in python 3) instance, which
will be encoded as :attr:`encoding` before being written.
Due to buffering, the data may not actually be written to the
database until the :meth:`close` method is called. Raises
:class:`ValueError` if this file is already closed. Raises
:class:`TypeError` if `data` is not an instance of
:class:`str` (:class:`bytes` in python 3), a file-like object,
or an instance of :class:`unicode` (:class:`str` in python 3).
Unicode data is only allowed if the file has an :attr:`encoding`
attribute.
:Parameters:
- `data`: string of bytes or file-like object to be written
to the file'
| def write(self, data):
| if self._closed:
raise ValueError('cannot write to a closed file')
try:
read = data.read
except AttributeError:
if (not isinstance(data, (text_type, bytes))):
raise TypeError('can only write strings or file-like objects')
if isinstance(data, text_type):
try:
data = data.encode(self.encoding)
except AttributeError:
raise TypeError(('must specify an encoding for file in order to write %s' % (text_type.__name__,)))
read = StringIO(data).read
if (self._buffer.tell() > 0):
space = (self.chunk_size - self._buffer.tell())
if space:
try:
to_write = read(space)
except:
self.abort()
raise
self._buffer.write(to_write)
if (len(to_write) < space):
return
self.__flush_buffer()
to_write = read(self.chunk_size)
while (to_write and (len(to_write) == self.chunk_size)):
self.__flush_data(to_write)
to_write = read(self.chunk_size)
self._buffer.write(to_write)
|
'Write a sequence of strings to the file.
Does not add seperators.'
| def writelines(self, sequence):
| for line in sequence:
self.write(line)
|
'Support for the context manager protocol.'
| def __enter__(self):
| return self
|
'Support for the context manager protocol.
Close the file and allow exceptions to propagate.'
| def __exit__(self, exc_type, exc_val, exc_tb):
| self.close()
return False
|
'Read a file from GridFS
Application developers should generally not need to
instantiate this class directly - instead see the methods
provided by :class:`~gridfs.GridFS`.
Either `file_id` or `file_document` must be specified,
`file_document` will be given priority if present. Raises
:class:`TypeError` if `root_collection` is not an instance of
:class:`~pymongo.collection.Collection`.
:Parameters:
- `root_collection`: root collection to read from
- `file_id` (optional): value of ``"_id"`` for the file to read
- `file_document` (optional): file document from
`root_collection.files`
.. versionchanged:: 3.0
Creating a GridOut does not immediately retrieve the file metadata
from the server. Metadata is fetched when first needed.'
| def __init__(self, root_collection, file_id=None, file_document=None):
| if (not isinstance(root_collection, Collection)):
raise TypeError('root_collection must be an instance of Collection')
self.__chunks = root_collection.chunks
self.__files = root_collection.files
self.__file_id = file_id
self.__buffer = EMPTY
self.__position = 0
self._file = file_document
|
'Reads a chunk at a time. If the current position is within a
chunk the remainder of the chunk is returned.'
| def readchunk(self):
| received = len(self.__buffer)
chunk_data = EMPTY
chunk_size = int(self.chunk_size)
if (received > 0):
chunk_data = self.__buffer
elif (self.__position < int(self.length)):
chunk_number = int(((received + self.__position) / chunk_size))
chunk = self.__chunks.find_one({'files_id': self._id, 'n': chunk_number})
if (not chunk):
raise CorruptGridFile(('no chunk #%d' % chunk_number))
chunk_data = chunk['data'][(self.__position % chunk_size):]
if (not chunk_data):
raise CorruptGridFile('truncated chunk')
self.__position += len(chunk_data)
self.__buffer = EMPTY
return chunk_data
|
'Read at most `size` bytes from the file (less if there
isn\'t enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read'
| def read(self, size=(-1)):
| self._ensure_file()
if (size == 0):
return EMPTY
remainder = (int(self.length) - self.__position)
if ((size < 0) or (size > remainder)):
size = remainder
received = 0
data = StringIO()
while (received < size):
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
max_chunk_n = math.ceil((self.length / float(self.chunk_size)))
chunk = self.__chunks.find_one({'files_id': self._id, 'n': {'$gte': max_chunk_n}})
if ((chunk is not None) and len(chunk['data'])):
raise CorruptGridFile(('Extra chunk found: expected %i chunks but found chunk with n=%i' % (max_chunk_n, chunk['n'])))
self.__position -= (received - size)
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
|
'Read one line or up to `size` bytes from the file.
:Parameters:
- `size` (optional): the maximum number of bytes to read'
| def readline(self, size=(-1)):
| if (size == 0):
return ''
remainder = (int(self.length) - self.__position)
if ((size < 0) or (size > remainder)):
size = remainder
received = 0
data = StringIO()
while (received < size):
chunk_data = self.readchunk()
pos = chunk_data.find(NEWLN, 0, size)
if (pos != (-1)):
size = ((received + pos) + 1)
received += len(chunk_data)
data.write(chunk_data)
if (pos != (-1)):
break
self.__position -= (received - size)
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size)
|
'Return the current position of this file.'
| def tell(self):
| return self.__position
|
'Set the current position of this file.
:Parameters:
- `pos`: the position (or offset if using relative
positioning) to seek to
- `whence` (optional): where to seek
from. :attr:`os.SEEK_SET` (``0``) for absolute file
positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative
to the current position, :attr:`os.SEEK_END` (``2``) to
seek relative to the file\'s end.'
| def seek(self, pos, whence=_SEEK_SET):
| if (whence == _SEEK_SET):
new_pos = pos
elif (whence == _SEEK_CUR):
new_pos = (self.__position + pos)
elif (whence == _SEEK_END):
new_pos = (int(self.length) + pos)
else:
raise IOError(22, 'Invalid value for `whence`')
if (new_pos < 0):
raise IOError(22, 'Invalid value for `pos` - must be positive')
self.__position = new_pos
self.__buffer = EMPTY
|
'Return an iterator over all of this file\'s data.
The iterator will return chunk-sized instances of
:class:`str` (:class:`bytes` in python 3). This can be
useful when serving files using a webserver that handles
such an iterator efficiently.'
| def __iter__(self):
| return GridOutIterator(self, self.__chunks)
|
'Make GridOut more generically file-like.'
| def close(self):
| pass
|
'Makes it possible to use :class:`GridOut` files
with the context manager protocol.'
| def __enter__(self):
| return self
|
'Makes it possible to use :class:`GridOut` files
with the context manager protocol.'
| def __exit__(self, exc_type, exc_val, exc_tb):
| return False
|
'Create a new cursor, similar to the normal
:class:`~pymongo.cursor.Cursor`.
Should not be called directly by application developers - see
the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead.
.. versionadded 2.7
.. mongodoc:: cursors'
| def __init__(self, collection, filter=None, skip=0, limit=0, no_cursor_timeout=False, sort=None, batch_size=0):
| self.__root_collection = collection
super(GridOutCursor, self).__init__(collection.files, filter, skip=skip, limit=limit, no_cursor_timeout=no_cursor_timeout, sort=sort, batch_size=batch_size)
|
'Get next GridOut object from cursor.'
| def next(self):
| next_file = super(GridOutCursor, self).next()
return GridOut(self.__root_collection, file_document=next_file)
|
'Creates an empty GridOutCursor for information to be copied into.'
| def _clone_base(self):
| return GridOutCursor(self.__root_collection)
|
'Create a new command cursor.'
| def __init__(self, collection, cursor_info, address, retrieved=0):
| self.__collection = collection
self.__id = cursor_info['id']
self.__address = address
self.__data = deque(cursor_info['firstBatch'])
self.__retrieved = retrieved
self.__batch_size = 0
self.__killed = (self.__id == 0)
if ('ns' in cursor_info):
self.__ns = cursor_info['ns']
else:
self.__ns = collection.full_name
|
'Closes this cursor.'
| def __die(self):
| if (self.__id and (not self.__killed)):
self.__collection.database.client.close_cursor(self.__id, _CursorAddress(self.__address, self.__ns))
self.__killed = True
|
'Explicitly close / kill this cursor. Required for PyPy, Jython and
other Python implementations that don\'t use reference counting
garbage collection.'
| def close(self):
| self.__die()
|
'Limits the number of documents returned in one batch. Each batch
requires a round trip to the server. It can be adjusted to optimize
performance and limit data transfer.
.. note:: batch_size can not override MongoDB\'s internal limits on the
amount of data it will return to the client in a single batch (i.e
if you set batch size to 1,000,000,000, MongoDB will currently only
return 4-16MB of results per batch).
Raises :exc:`TypeError` if `batch_size` is not an integer.
Raises :exc:`ValueError` if `batch_size` is less than ``0``.
:Parameters:
- `batch_size`: The size of each batch of results requested.'
| def batch_size(self, batch_size):
| if (not isinstance(batch_size, integer_types)):
raise TypeError('batch_size must be an integer')
if (batch_size < 0):
raise ValueError('batch_size must be >= 0')
self.__batch_size = (((batch_size == 1) and 2) or batch_size)
return self
|
'Send a getmore message and handle the response.'
| def __send_message(self, operation):
| client = self.__collection.database.client
listeners = client._event_listeners
publish = listeners.enabled_for_commands
try:
response = client._send_message_with_response(operation, address=self.__address)
except AutoReconnect:
self.__killed = True
raise
cmd_duration = response.duration
rqst_id = response.request_id
from_command = response.from_command
if publish:
start = datetime.datetime.now()
try:
doc = helpers._unpack_response(response.data, self.__id, self.__collection.codec_options)
if from_command:
helpers._check_command_response(doc['data'][0])
except OperationFailure as exc:
self.__killed = True
if publish:
duration = ((datetime.datetime.now() - start) + cmd_duration)
listeners.publish_command_failure(duration, exc.details, 'getMore', rqst_id, self.__address)
raise
except NotMasterError as exc:
self.__killed = True
if publish:
duration = ((datetime.datetime.now() - start) + cmd_duration)
listeners.publish_command_failure(duration, exc.details, 'getMore', rqst_id, self.__address)
client._reset_server_and_request_check(self.address)
raise
except Exception as exc:
if publish:
duration = ((datetime.datetime.now() - start) + cmd_duration)
listeners.publish_command_failure(duration, _convert_exception(exc), 'getMore', rqst_id, self.__address)
raise
if from_command:
cursor = doc['data'][0]['cursor']
documents = cursor['nextBatch']
self.__id = cursor['id']
self.__retrieved += len(documents)
else:
documents = doc['data']
self.__id = doc['cursor_id']
self.__retrieved += doc['number_returned']
if publish:
duration = ((datetime.datetime.now() - start) + cmd_duration)
res = {'cursor': {'id': self.__id, 'ns': self.__collection.full_name, 'nextBatch': documents}, 'ok': 1}
listeners.publish_command_success(duration, res, 'getMore', rqst_id, self.__address)
if (self.__id == 0):
self.__killed = True
self.__data = deque(documents)
|
'Refreshes the cursor with more data from the server.
Returns the length of self.__data after refresh. Will exit early if
self.__data is already non-empty. Raises OperationFailure when the
cursor cannot be refreshed due to an error on the query.'
| def _refresh(self):
| if (len(self.__data) or self.__killed):
return len(self.__data)
if self.__id:
(dbname, collname) = self.__ns.split('.', 1)
self.__send_message(_GetMore(dbname, collname, self.__batch_size, self.__id, self.__collection.codec_options))
else:
self.__killed = True
return len(self.__data)
|
'Does this cursor have the potential to return more data?
Even if :attr:`alive` is ``True``, :meth:`next` can raise
:exc:`StopIteration`. Best to use a for loop::
for doc in collection.aggregate(pipeline):
print(doc)
.. note:: :attr:`alive` can be True while iterating a cursor from
a failed server. In this case :attr:`alive` will return False after
:meth:`next` fails to retrieve the next batch of results from the
server.'
| @property
def alive(self):
| return bool((len(self.__data) or (not self.__killed)))
|
'Returns the id of the cursor.'
| @property
def cursor_id(self):
| return self.__id
|
'The (host, port) of the server used, or None.
.. versionadded:: 3.0'
| @property
def address(self):
| return self.__address
|
'Advance the cursor.'
| def next(self):
| if (len(self.__data) or self._refresh()):
coll = self.__collection
return coll.database._fix_outgoing(self.__data.popleft(), coll)
else:
raise StopIteration
|
'"Run a target function periodically on a background thread.
If the target\'s return value is false, the executor stops.
:Parameters:
- `interval`: Seconds between calls to `target`.
- `min_interval`: Minimum seconds between calls if `wake` is
called very often.
- `target`: A function.
- `name`: A name to give the underlying thread.'
| def __init__(self, interval, min_interval, target, name=None):
| self._event = False
self._interval = interval
self._min_interval = min_interval
self._target = target
self._stopped = False
self._thread = None
self._name = name
self._thread_will_exit = False
self._lock = threading.Lock()
|
'Start. Multiple calls have no effect.
Not safe to call from multiple threads at once.'
| def open(self):
| with self._lock:
if self._thread_will_exit:
try:
self._thread.join()
except ReferenceError:
pass
self._thread_will_exit = False
self._stopped = False
started = False
try:
started = (self._thread and self._thread.is_alive())
except ReferenceError:
pass
if (not started):
thread = threading.Thread(target=self._run, name=self._name)
thread.daemon = True
self._thread = weakref.proxy(thread)
_register_executor(self)
thread.start()
|
'Stop. To restart, call open().
The dummy parameter allows an executor\'s close method to be a weakref
callback; see monitor.py.'
| def close(self, dummy=None):
| self._stopped = True
|
'Execute the target function soon.'
| def wake(self):
| self._event = True
|
'Representation of a deployment of MongoDB servers.
:Parameters:
- `topology_type`: initial type
- `server_descriptions`: dict of (address, ServerDescription) for
all seeds
- `replica_set_name`: replica set name or None
- `max_set_version`: greatest setVersion seen from a primary, or None
- `max_election_id`: greatest electionId seen from a primary, or None
- `topology_settings`: a TopologySettings'
| def __init__(self, topology_type, server_descriptions, replica_set_name, max_set_version, max_election_id, topology_settings):
| self._topology_type = topology_type
self._replica_set_name = replica_set_name
self._server_descriptions = server_descriptions
self._max_set_version = max_set_version
self._max_election_id = max_election_id
self._topology_settings = topology_settings
self._incompatible_err = None
for s in self._server_descriptions.values():
server_too_new = ((s.min_wire_version is not None) and (s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION))
server_too_old = ((s.max_wire_version is not None) and (s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION))
if (server_too_new or server_too_old):
self._incompatible_err = ('Server at %s:%d uses wire protocol versions %d through %d, but PyMongo only supports %d through %d' % (s.address[0], s.address[1], s.min_wire_version, s.max_wire_version, common.MIN_SUPPORTED_WIRE_VERSION, common.MAX_SUPPORTED_WIRE_VERSION))
break
|
'Raise ConfigurationError if any server is incompatible.
A server is incompatible if its wire protocol version range does not
overlap with PyMongo\'s.'
| def check_compatible(self):
| if self._incompatible_err:
raise ConfigurationError(self._incompatible_err)
|
'A copy of this description, with one server marked Unknown.'
| def reset_server(self, address):
| return updated_topology_description(self, ServerDescription(address))
|
'A copy of this description, with all servers marked Unknown.'
| def reset(self):
| if (self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary):
topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary
else:
topology_type = self._topology_type
sds = dict(((address, ServerDescription(address)) for address in self._server_descriptions))
return TopologyDescription(topology_type, sds, self._replica_set_name, self._max_set_version, self._max_election_id, self._topology_settings)
|
'Dict of (address,
:class:`~pymongo.server_description.ServerDescription`).'
| def server_descriptions(self):
| return self._server_descriptions.copy()
|
'The type of this topology.'
| @property
def topology_type(self):
| return self._topology_type
|
'The topology type as a human readable string.
.. versionadded:: 3.4'
| @property
def topology_type_name(self):
| return TOPOLOGY_TYPE._fields[self._topology_type]
|
'The replica set name.'
| @property
def replica_set_name(self):
| return self._replica_set_name
|
'Greatest setVersion seen from a primary, or None.'
| @property
def max_set_version(self):
| return self._max_set_version
|
'Greatest electionId seen from a primary, or None.'
| @property
def max_election_id(self):
| return self._max_election_id
|
'List of Servers of types besides Unknown.'
| @property
def known_servers(self):
| return [s for s in self._server_descriptions.values() if s.is_server_type_known]
|
'Minimum of all servers\' max wire versions, or None.'
| @property
def common_wire_version(self):
| servers = self.known_servers
if servers:
return min((s.max_wire_version for s in self.known_servers))
return None
|
'Does this topology have any readable servers available matching the
given read preference?
:Parameters:
- `read_preference`: an instance of a read preference from
:mod:`~pymongo.read_preferences`. Defaults to
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`.
.. note:: When connected directly to a single server this method
always returns ``True``.
.. versionadded:: 3.4'
| def has_readable_server(self, read_preference=ReadPreference.PRIMARY):
| common.validate_read_preference('read_preference', read_preference)
return any(self.apply_selector(read_preference, None))
|
'Does this topology have a writable server available?
.. note:: When connected directly to a single server this method
always returns ``True``.
.. versionadded:: 3.4'
| def has_writable_server(self):
| return self.has_readable_server(ReadPreference.PRIMARY)
|
'The document representation of this collation.
.. note::
:class:`Collation` is immutable. Mutating the value of
:attr:`document` does not mutate this :class:`Collation`.'
| @property
def document(self):
| return self.__document.copy()
|
'The address (host, port) of this server.'
| @property
def address(self):
| return self._address
|
'The type of this server.'
| @property
def server_type(self):
| return self._server_type
|
'The server type as a human readable string.
.. versionadded:: 3.4'
| @property
def server_type_name(self):
| return SERVER_TYPE._fields[self._server_type]
|
'List of hosts, passives, and arbiters known to this server.'
| @property
def all_hosts(self):
| return self._all_hosts
|
'Replica set name or None.'
| @property
def replica_set_name(self):
| return self._replica_set_name
|
'This server\'s opinion about who the primary is, or None.'
| @property
def primary(self):
| return self._primary
|
'The current average latency or None.'
| @property
def round_trip_time(self):
| if (self._address in self._host_to_round_trip_time):
return self._host_to_round_trip_time[self._address]
return self._round_trip_time
|
'The last error attempting to connect to the server, or None.'
| @property
def error(self):
| return self._error
|
'Represent a response from the server.
:Parameters:
- `data`: Raw BSON bytes.
- `address`: (host, port) of the source server.
- `request_id`: The request id of this operation.
- `duration`: The duration of the operation.
- `from_command`: if the response is the result of a db command.'
| def __init__(self, data, address, request_id, duration, from_command):
| self._data = data
self._address = address
self._request_id = request_id
self._duration = duration
self._from_command = from_command
|
'Server response\'s raw BSON bytes.'
| @property
def data(self):
| return self._data
|
'(host, port) of the source server.'
| @property
def address(self):
| return self._address
|
'The request id of this operation.'
| @property
def request_id(self):
| return self._request_id
|
'The duration of the operation.'
| @property
def duration(self):
| return self._duration
|
'If the response is a result from a db command.'
| @property
def from_command(self):
| return self._from_command
|
'Represent a response to an exhaust cursor\'s initial query.
:Parameters:
- `data`: Raw BSON bytes.
- `address`: (host, port) of the source server.
- `socket_info`: The SocketInfo used for the initial query.
- `pool`: The Pool from which the SocketInfo came.
- `request_id`: The request id of this operation.
- `duration`: The duration of the operation.
- `from_command`: If the response is the result of a db command.'
| def __init__(self, data, address, socket_info, pool, request_id, duration, from_command):
| super(ExhaustResponse, self).__init__(data, address, request_id, duration, from_command)
self._socket_info = socket_info
self._pool = pool
|
'The SocketInfo used for the initial query.
The server will send batches on this socket, without waiting for
getMores from the client, until the result set is exhausted or there
is an error.'
| @property
def socket_info(self):
| return self._socket_info
|
'The Pool from which the SocketInfo came.'
| @property
def pool(self):
| return self._pool
|
'The original options used to create this ClientOptions.'
| @property
def _options(self):
| return self.__options
|
'Whether to begin discovering a MongoDB topology automatically.'
| @property
def connect(self):
| return self.__connect
|
'A :class:`~bson.codec_options.CodecOptions` instance.'
| @property
def codec_options(self):
| return self.__codec_options
|
'A :class:`~pymongo.auth.MongoCredentials` instance or None.'
| @property
def credentials(self):
| return self.__credentials
|
'The local threshold for this instance.'
| @property
def local_threshold_ms(self):
| return self.__local_threshold_ms
|
'The server selection timeout for this instance in seconds.'
| @property
def server_selection_timeout(self):
| return self.__server_selection_timeout
|
'The monitoring frequency in seconds.'
| @property
def heartbeat_frequency(self):
| return self.__heartbeat_frequency
|
'A :class:`~pymongo.pool.PoolOptions` instance.'
| @property
def pool_options(self):
| return self.__pool_options
|
'A read preference instance.'
| @property
def read_preference(self):
| return self.__read_preference
|
'Replica set name or None.'
| @property
def replica_set_name(self):
| return self.__replica_set_name
|
'A :class:`~pymongo.write_concern.WriteConcern` instance.'
| @property
def write_concern(self):
| return self.__write_concern
|
'A :class:`~pymongo.read_concern.ReadConcern` instance.'
| @property
def read_concern(self):
| return self.__read_concern
|
'Get a database by client and name.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
database name.
:Parameters:
- `client`: A :class:`~pymongo.mongo_client.MongoClient` instance.
- `name`: The database name.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) client.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) client.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) client.write_concern is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) client.read_concern is used.
.. mongodoc:: databases
.. versionchanged:: 3.2
Added the read_concern option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
:class:`~pymongo.database.Database` no longer returns an instance
of :class:`~pymongo.collection.Collection` for attribute names
with leading underscores. You must use dict-style lookups instead::
db[\'__my_collection__\']
Not:
db.__my_collection__'
| def __init__(self, client, name, codec_options=None, read_preference=None, write_concern=None, read_concern=None):
| super(Database, self).__init__((codec_options or client.codec_options), (read_preference or client.read_preference), (write_concern or client.write_concern), (read_concern or client.read_concern))
if (not isinstance(name, string_type)):
raise TypeError(('name must be an instance of %s' % (string_type.__name__,)))
if (name != '$external'):
_check_name(name)
self.__name = _unicode(name)
self.__client = client
self.__incoming_manipulators = []
self.__incoming_copying_manipulators = []
self.__outgoing_manipulators = []
self.__outgoing_copying_manipulators = []
|
'Add a new son manipulator to this database.
**DEPRECATED** - `add_son_manipulator` is deprecated.
.. versionchanged:: 3.0
Deprecated add_son_manipulator.'
| def add_son_manipulator(self, manipulator):
| warnings.warn('add_son_manipulator is deprecated', DeprecationWarning, stacklevel=2)
base = SONManipulator()
def method_overwritten(instance, method):
'Test if this method has been overridden.'
return (getattr(instance, method).__func__ != getattr(base, method).__func__)
if manipulator.will_copy():
if method_overwritten(manipulator, 'transform_incoming'):
self.__incoming_copying_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, 'transform_outgoing'):
self.__outgoing_copying_manipulators.insert(0, manipulator)
else:
if method_overwritten(manipulator, 'transform_incoming'):
self.__incoming_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, 'transform_outgoing'):
self.__outgoing_manipulators.insert(0, manipulator)
|
'**DEPRECATED**: :class:`SystemJS` helper for this :class:`Database`.
See the documentation for :class:`SystemJS` for more details.'
| @property
def system_js(self):
| return SystemJS(self)
|
'The client instance for this :class:`Database`.'
| @property
def client(self):
| return self.__client
|
'The name of this :class:`Database`.'
| @property
def name(self):
| return self.__name
|
'**DEPRECATED**: All incoming SON manipulators.
.. versionchanged:: 3.5
Deprecated.
.. versionadded:: 2.0'
| @property
def incoming_manipulators(self):
| warnings.warn('Database.incoming_manipulators() is deprecated', DeprecationWarning, stacklevel=2)
return [manipulator.__class__.__name__ for manipulator in self.__incoming_manipulators]
|
'**DEPRECATED**: All incoming SON copying manipulators.
.. versionchanged:: 3.5
Deprecated.
.. versionadded:: 2.0'
| @property
def incoming_copying_manipulators(self):
| warnings.warn('Database.incoming_copying_manipulators() is deprecated', DeprecationWarning, stacklevel=2)
return [manipulator.__class__.__name__ for manipulator in self.__incoming_copying_manipulators]
|
'**DEPRECATED**: All outgoing SON manipulators.
.. versionchanged:: 3.5
Deprecated.
.. versionadded:: 2.0'
| @property
def outgoing_manipulators(self):
| warnings.warn('Database.outgoing_manipulators() is deprecated', DeprecationWarning, stacklevel=2)
return [manipulator.__class__.__name__ for manipulator in self.__outgoing_manipulators]
|
'**DEPRECATED**: All outgoing SON copying manipulators.
.. versionchanged:: 3.5
Deprecated.
.. versionadded:: 2.0'
| @property
def outgoing_copying_manipulators(self):
| warnings.warn('Database.outgoing_copying_manipulators() is deprecated', DeprecationWarning, stacklevel=2)
return [manipulator.__class__.__name__ for manipulator in self.__outgoing_copying_manipulators]
|
Subsets and Splits