@mytec: pushed back before 1.1
This commit is contained in:
29
backend/venv/lib/python3.12/site-packages/motor/__init__.py
Normal file
29
backend/venv/lib/python3.12/site-packages/motor/__init__.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# Copyright 2011-present MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Motor, an asynchronous driver for MongoDB."""
|
||||
from ._version import get_version_string, version, version_tuple # noqa: F401
|
||||
|
||||
"""Current version of Motor."""
|
||||
|
||||
|
||||
try:
|
||||
import tornado
|
||||
except ImportError:
|
||||
tornado = None # type:ignore[assignment]
|
||||
else:
|
||||
# For backwards compatibility with Motor 0.4, export Motor's Tornado classes
|
||||
# at module root. This may change in the future.
|
||||
from .motor_tornado import * # noqa: F403
|
||||
from .motor_tornado import __all__ # noqa: F401
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
41
backend/venv/lib/python3.12/site-packages/motor/_version.py
Normal file
41
backend/venv/lib/python3.12/site-packages/motor/_version.py
Normal file
@@ -0,0 +1,41 @@
|
||||
# Copyright 2022-present MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Version-related data for motor."""
|
||||
import re
|
||||
from typing import List, Tuple, Union
|
||||
|
||||
__version__ = "3.7.1"
|
||||
|
||||
|
||||
def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]:
|
||||
pattern = r"(?P<major>\d+).(?P<minor>\d+).(?P<patch>\d+)(?P<rest>.*)"
|
||||
match = re.match(pattern, version)
|
||||
if match:
|
||||
parts: List[Union[int, str]] = [int(match[part]) for part in ["major", "minor", "patch"]]
|
||||
if match["rest"]:
|
||||
parts.append(match["rest"])
|
||||
elif re.match(r"\d+.\d+", version):
|
||||
parts = [int(part) for part in version.split(".")]
|
||||
else:
|
||||
raise ValueError("Could not parse version")
|
||||
return tuple(parts)
|
||||
|
||||
|
||||
version_tuple = get_version_tuple(__version__)
|
||||
version = __version__
|
||||
|
||||
|
||||
def get_version_string() -> str:
|
||||
return __version__
|
||||
@@ -0,0 +1,257 @@
|
||||
# Copyright 2016 MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Serve GridFS files with Motor and aiohttp.
|
||||
|
||||
Requires Python 3.5 or later and aiohttp 3.0 or later.
|
||||
|
||||
See the :doc:`/examples/aiohttp_gridfs_example`.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import mimetypes
|
||||
|
||||
import aiohttp.web
|
||||
import gridfs
|
||||
|
||||
from motor.motor_asyncio import AsyncIOMotorDatabase, AsyncIOMotorGridFSBucket
|
||||
from motor.motor_gridfs import _hash_gridout
|
||||
|
||||
# mypy: disable-error-code="no-untyped-def,no-untyped-call"
|
||||
|
||||
|
||||
def get_gridfs_file(bucket, filename, request):
|
||||
"""Override to choose a GridFS file to serve at a URL.
|
||||
|
||||
By default, if a URL pattern like ``/fs/{filename}`` is mapped to this
|
||||
:class:`AIOHTTPGridFS`, then the filename portion of the URL is used as the
|
||||
filename, so a request for "/fs/image.png" results in a call to
|
||||
:meth:`.AsyncIOMotorGridFSBucket.open_download_stream_by_name` with
|
||||
"image.png" as the ``filename`` argument. To customize the mapping of path
|
||||
to GridFS file, override ``get_gridfs_file`` and return a
|
||||
:class:`asyncio.Future` that resolves to a
|
||||
:class:`~motor.motor_asyncio.AsyncIOMotorGridOut`.
|
||||
|
||||
For example, to retrieve the file by ``_id`` instead of filename::
|
||||
|
||||
def get_gridfile_by_id(bucket, filename, request):
|
||||
# "filename" is interpreted as _id instead of name.
|
||||
# Return a Future AsyncIOMotorGridOut.
|
||||
return bucket.open_download_stream(file_id=filename)
|
||||
|
||||
client = AsyncIOMotorClient()
|
||||
gridfs_handler = AIOHTTPGridFS(client.my_database,
|
||||
get_gridfs_file=get_gridfile_by_id)
|
||||
|
||||
:Parameters:
|
||||
- `bucket`: An :class:`~motor.motor_asyncio.AsyncIOMotorGridFSBucket`
|
||||
- `filename`: A string, the URL portion matching {filename} in the URL
|
||||
pattern
|
||||
- `request`: An :class:`aiohttp.web.Request`
|
||||
"""
|
||||
# A Future AsyncIOMotorGridOut.
|
||||
return bucket.open_download_stream_by_name(filename)
|
||||
|
||||
|
||||
def get_cache_time(filename, modified, mime_type):
|
||||
"""Override to customize cache control behavior.
|
||||
|
||||
Return a positive number of seconds to trigger aggressive caching or 0
|
||||
to mark resource as cacheable, only. 0 is the default.
|
||||
|
||||
For example, to allow image caching::
|
||||
|
||||
def image_cache_time(filename, modified, mime_type):
|
||||
if mime_type.startswith('image/'):
|
||||
return 3600
|
||||
|
||||
return 0
|
||||
|
||||
client = AsyncIOMotorClient()
|
||||
gridfs_handler = AIOHTTPGridFS(client.my_database,
|
||||
get_cache_time=image_cache_time)
|
||||
|
||||
:Parameters:
|
||||
- `filename`: A string, the URL portion matching {filename} in the URL
|
||||
pattern
|
||||
- `modified`: A datetime, when the matching GridFS file was created
|
||||
- `mime_type`: The file's type, a string like "application/octet-stream"
|
||||
"""
|
||||
return 0
|
||||
|
||||
|
||||
def set_extra_headers(response, gridout):
|
||||
"""Override to modify the response before sending to client.
|
||||
|
||||
For example, to allow image caching::
|
||||
|
||||
def gzip_header(response, gridout):
|
||||
response.headers['Content-Encoding'] = 'gzip'
|
||||
|
||||
client = AsyncIOMotorClient()
|
||||
gridfs_handler = AIOHTTPGridFS(client.my_database,
|
||||
set_extra_headers=gzip_header)
|
||||
|
||||
:Parameters:
|
||||
- `response`: An :class:`aiohttp.web.Response`
|
||||
- `gridout`: The :class:`~motor.motor_asyncio.AsyncIOMotorGridOut` we
|
||||
will serve to the client
|
||||
"""
|
||||
|
||||
|
||||
def _config_error(request):
|
||||
try:
|
||||
formatter = request.match_info.route.resource.get_info()["formatter"]
|
||||
msg = 'Bad AIOHTTPGridFS route "%s", requires a {filename} variable' % formatter
|
||||
except (KeyError, AttributeError):
|
||||
# aiohttp API changed? Fall back to simpler error message.
|
||||
msg = "Bad AIOHTTPGridFS route for request: %s" % request
|
||||
|
||||
raise aiohttp.web.HTTPInternalServerError(text=msg) from None
|
||||
|
||||
|
||||
class AIOHTTPGridFS:
|
||||
"""Serve files from `GridFS`_.
|
||||
|
||||
This class is a :ref:`request handler <aiohttp-web-handler>` that serves
|
||||
GridFS files, similar to aiohttp's built-in static file server.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
client = AsyncIOMotorClient()
|
||||
gridfs_handler = AIOHTTPGridFS(client.my_database)
|
||||
|
||||
app = aiohttp.web.Application()
|
||||
|
||||
# The GridFS URL pattern must have a "{filename}" variable.
|
||||
resource = app.router.add_resource("/fs/{filename}")
|
||||
resource.add_route("GET", gridfs_handler)
|
||||
resource.add_route("HEAD", gridfs_handler)
|
||||
|
||||
app_handler = app.make_handler()
|
||||
server = loop.create_server(app_handler, port=80)
|
||||
|
||||
By default, requests' If-Modified-Since headers are honored, but no
|
||||
specific cache-control timeout is sent to clients. Thus each request for
|
||||
a GridFS file requires a quick check of the file's ``uploadDate`` in
|
||||
MongoDB. Pass a custom :func:`get_cache_time` to customize this.
|
||||
|
||||
:Parameters:
|
||||
- `database`: An :class:`AsyncIOMotorDatabase`
|
||||
- `get_gridfs_file`: Optional override for :func:`get_gridfs_file`
|
||||
- `get_cache_time`: Optional override for :func:`get_cache_time`
|
||||
- `set_extra_headers`: Optional override for :func:`set_extra_headers`
|
||||
|
||||
.. _GridFS: https://www.mongodb.com/docs/manual/core/gridfs/
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
database,
|
||||
root_collection="fs",
|
||||
get_gridfs_file=get_gridfs_file,
|
||||
get_cache_time=get_cache_time,
|
||||
set_extra_headers=set_extra_headers,
|
||||
):
|
||||
if not isinstance(database, AsyncIOMotorDatabase):
|
||||
raise TypeError(
|
||||
"First argument to AIOHTTPGridFS must be AsyncIOMotorDatabase, not %r" % database
|
||||
)
|
||||
|
||||
self._database = database
|
||||
self._bucket = AsyncIOMotorGridFSBucket(self._database, root_collection)
|
||||
self._get_gridfs_file = get_gridfs_file
|
||||
self._get_cache_time = get_cache_time
|
||||
self._set_extra_headers = set_extra_headers
|
||||
|
||||
async def __call__(self, request):
|
||||
"""Send filepath to client using request."""
|
||||
try:
|
||||
filename = request.match_info["filename"]
|
||||
except KeyError:
|
||||
_config_error(request)
|
||||
|
||||
if request.method not in ("GET", "HEAD"):
|
||||
raise aiohttp.web.HTTPMethodNotAllowed(
|
||||
method=request.method, allowed_methods={"GET", "HEAD"}
|
||||
)
|
||||
|
||||
try:
|
||||
gridout = await self._get_gridfs_file(self._bucket, filename, request)
|
||||
except gridfs.NoFile as e:
|
||||
raise aiohttp.web.HTTPNotFound(text=request.path) from e
|
||||
|
||||
resp = aiohttp.web.StreamResponse()
|
||||
|
||||
# Get the hash for the GridFS file.
|
||||
checksum = _hash_gridout(gridout)
|
||||
|
||||
self._set_standard_headers(request.path, resp, gridout, checksum)
|
||||
|
||||
# Overridable method set_extra_headers.
|
||||
self._set_extra_headers(resp, gridout)
|
||||
|
||||
# Check the If-Modified-Since, and don't send the result if the
|
||||
# content has not been modified
|
||||
ims_value = request.if_modified_since
|
||||
if ims_value is not None:
|
||||
# If our MotorClient is tz-aware, assume the naive ims_value is in
|
||||
# its time zone.
|
||||
if_since = ims_value.replace(tzinfo=gridout.upload_date.tzinfo)
|
||||
modified = gridout.upload_date.replace(microsecond=0)
|
||||
if if_since >= modified:
|
||||
resp.set_status(304)
|
||||
return resp
|
||||
|
||||
# Same for Etag
|
||||
etag = request.headers.get("If-None-Match")
|
||||
if etag is not None and etag.strip('"') == checksum:
|
||||
resp.set_status(304)
|
||||
return resp
|
||||
|
||||
resp.content_length = gridout.length
|
||||
await resp.prepare(request)
|
||||
|
||||
if request.method == "GET":
|
||||
written = 0
|
||||
while written < gridout.length:
|
||||
# Reading chunk_size at a time minimizes buffering.
|
||||
chunk = await gridout.read(gridout.chunk_size)
|
||||
await resp.write(chunk)
|
||||
written += len(chunk)
|
||||
return resp
|
||||
|
||||
def _set_standard_headers(self, path, resp, gridout, checksum):
|
||||
resp.last_modified = gridout.upload_date
|
||||
content_type = gridout.content_type
|
||||
if content_type is None:
|
||||
content_type, encoding = mimetypes.guess_type(path)
|
||||
|
||||
if content_type:
|
||||
resp.content_type = content_type
|
||||
|
||||
resp.headers["Etag"] = '"%s"' % checksum
|
||||
|
||||
# Overridable method get_cache_time.
|
||||
cache_time = self._get_cache_time(path, gridout.upload_date, gridout.content_type)
|
||||
|
||||
if cache_time > 0:
|
||||
resp.headers["Expires"] = (
|
||||
datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
+ datetime.timedelta(seconds=cache_time)
|
||||
).strftime("%a, %d %b %Y %H:%M:%S GMT")
|
||||
|
||||
resp.headers["Cache-Control"] = "max-age=" + str(cache_time)
|
||||
else:
|
||||
resp.headers["Cache-Control"] = "public"
|
||||
Binary file not shown.
2289
backend/venv/lib/python3.12/site-packages/motor/core.py
Normal file
2289
backend/venv/lib/python3.12/site-packages/motor/core.py
Normal file
File diff suppressed because it is too large
Load Diff
894
backend/venv/lib/python3.12/site-packages/motor/core.pyi
Normal file
894
backend/venv/lib/python3.12/site-packages/motor/core.pyi
Normal file
@@ -0,0 +1,894 @@
|
||||
# Copyright 2023-present MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from asyncio import Future
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Coroutine,
|
||||
Generic,
|
||||
Iterable,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Sequence,
|
||||
TypeVar,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
import pymongo.common
|
||||
import pymongo.database
|
||||
import pymongo.errors
|
||||
import pymongo.mongo_client
|
||||
import typing_extensions
|
||||
from bson import Binary, Code, CodecOptions, DBRef, Timestamp
|
||||
from bson.codec_options import TypeRegistry
|
||||
from bson.raw_bson import RawBSONDocument
|
||||
from pymongo import IndexModel, ReadPreference, WriteConcern
|
||||
from pymongo.change_stream import ChangeStream
|
||||
from pymongo.client_options import ClientOptions
|
||||
from pymongo.client_session import ClientSession, SessionOptions, TransactionOptions
|
||||
from pymongo.collection import Collection, ReturnDocument # noqa: F401
|
||||
from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor
|
||||
from pymongo.cursor import Cursor, RawBatchCursor
|
||||
from pymongo.cursor_shared import _Hint, _Sort
|
||||
from pymongo.database import Database
|
||||
from pymongo.encryption import ClientEncryption, RewrapManyDataKeyResult
|
||||
from pymongo.encryption_options import RangeOpts
|
||||
from pymongo.operations import _IndexKeyHint, _IndexList
|
||||
from pymongo.read_concern import ReadConcern
|
||||
from pymongo.read_preferences import _ServerMode
|
||||
from pymongo.results import (
|
||||
BulkWriteResult,
|
||||
ClientBulkWriteResult,
|
||||
DeleteResult,
|
||||
InsertManyResult,
|
||||
InsertOneResult,
|
||||
UpdateResult,
|
||||
)
|
||||
from pymongo.synchronous.client_session import _T
|
||||
from pymongo.synchronous.collection import _WriteOp
|
||||
from pymongo.topology_description import TopologyDescription
|
||||
from pymongo.typings import (
|
||||
_Address,
|
||||
_CollationIn,
|
||||
_DocumentType,
|
||||
_DocumentTypeArg,
|
||||
_Pipeline,
|
||||
)
|
||||
|
||||
try:
|
||||
from pymongo.operations import SearchIndexModel
|
||||
except ImportError:
|
||||
SearchIndexModel: typing_extensions.TypeAlias = Any # type:ignore[no-redef]
|
||||
|
||||
_WITH_TRANSACTION_RETRY_TIME_LIMIT: int
|
||||
|
||||
_CodecDocumentType = TypeVar("_CodecDocumentType", bound=Mapping[str, Any])
|
||||
|
||||
def _within_time_limit(start_time: float) -> bool: ...
|
||||
def _max_time_expired_error(exc: Exception) -> bool: ...
|
||||
|
||||
class AgnosticBase:
|
||||
delegate: Any
|
||||
|
||||
def __eq__(self, other: object) -> bool: ...
|
||||
def __init__(self, delegate: Any) -> None: ...
|
||||
|
||||
class AgnosticBaseProperties(AgnosticBase, Generic[_DocumentType]):
|
||||
codec_options: CodecOptions[_DocumentType]
|
||||
read_preference: _ServerMode
|
||||
read_concern: ReadConcern
|
||||
write_concern: WriteConcern
|
||||
|
||||
class AgnosticClient(AgnosticBaseProperties[_DocumentType]):
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[pymongo.MongoClient[_DocumentType]]
|
||||
|
||||
def address(self) -> Optional[tuple[str, int]]: ...
|
||||
def arbiters(self) -> set[tuple[str, int]]: ...
|
||||
def close(self) -> None: ...
|
||||
def __hash__(self) -> int: ...
|
||||
async def drop_database(
|
||||
self,
|
||||
name_or_database: Union[str, AgnosticDatabase[_DocumentTypeArg]],
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
) -> None: ...
|
||||
def options(self) -> ClientOptions: ...
|
||||
def get_database(
|
||||
self,
|
||||
name: Optional[str] = None,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> AgnosticDatabase[_DocumentType]: ...
|
||||
def get_default_database(
|
||||
self,
|
||||
default: Optional[str] = None,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> AgnosticDatabase[_DocumentType]: ...
|
||||
async def bulk_write(
|
||||
self,
|
||||
models: Sequence[_WriteOp[_DocumentType]],
|
||||
session: Optional[ClientSession] = None,
|
||||
ordered: bool = True,
|
||||
verbose_results: bool = False,
|
||||
bypass_document_validation: Optional[bool] = None,
|
||||
comment: Optional[Any] = None,
|
||||
let: Optional[Mapping] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
) -> ClientBulkWriteResult: ...
|
||||
|
||||
HOST: str
|
||||
|
||||
def is_mongos(self) -> bool: ...
|
||||
def is_primary(self) -> bool: ...
|
||||
async def list_databases(
|
||||
self,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgnosticCommandCursor[dict[str, Any]]: ...
|
||||
async def list_database_names(
|
||||
self,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
) -> list[str]: ...
|
||||
def nodes(self) -> frozenset[_Address]: ...
|
||||
PORT: int
|
||||
def primary(self) -> Optional[tuple[str, int]]: ...
|
||||
read_concern: ReadConcern
|
||||
def secondaries(self) -> set[tuple[str, int]]: ...
|
||||
async def server_info(
|
||||
self, session: Optional[AgnosticClientSession] = None
|
||||
) -> dict[str, Any]: ...
|
||||
def topology_description(self) -> TopologyDescription: ...
|
||||
async def start_session(
|
||||
self,
|
||||
causal_consistency: Optional[bool] = None,
|
||||
default_transaction_options: Optional[TransactionOptions] = None,
|
||||
snapshot: Optional[bool] = False,
|
||||
) -> AgnosticClientSession: ...
|
||||
|
||||
_io_loop: Optional[Any]
|
||||
_framework: Any
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host: Optional[Union[str, Sequence[str]]] = None,
|
||||
port: Optional[int] = None,
|
||||
document_class: Optional[type[_DocumentType]] = None,
|
||||
tz_aware: Optional[bool] = None,
|
||||
connect: Optional[bool] = None,
|
||||
type_registry: Optional[TypeRegistry] = None,
|
||||
**kwargs: Any,
|
||||
) -> None: ...
|
||||
@property
|
||||
def io_loop(self) -> Any: ...
|
||||
def get_io_loop(self) -> Any: ...
|
||||
def watch(
|
||||
self,
|
||||
pipeline: Optional[_Pipeline] = None,
|
||||
full_document: Optional[str] = None,
|
||||
resume_after: Optional[Mapping[str, Any]] = None,
|
||||
max_await_time_ms: Optional[int] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
collation: Optional[_CollationIn] = None,
|
||||
start_at_operation_time: Optional[Timestamp] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
start_after: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[str] = None,
|
||||
full_document_before_change: Optional[str] = None,
|
||||
show_expanded_events: Optional[bool] = None,
|
||||
) -> AgnosticChangeStream[_DocumentType]: ...
|
||||
def __getattr__(self, name: str) -> AgnosticDatabase[_DocumentType]: ...
|
||||
def __getitem__(self, name: str) -> AgnosticDatabase[_DocumentType]: ...
|
||||
def wrap(self, obj: Any) -> Any: ...
|
||||
|
||||
class _MotorTransactionContext:
|
||||
_session: AgnosticClientSession
|
||||
|
||||
def __init__(self, session: AgnosticClientSession): ...
|
||||
async def __aenter__(self) -> _MotorTransactionContext: ...
|
||||
async def __aexit__(self, exc_type: object, exc_val: object, exc_tb: object) -> None: ...
|
||||
|
||||
class AgnosticClientSession(AgnosticBase):
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[ClientSession]
|
||||
|
||||
async def commit_transaction(self) -> None: ...
|
||||
async def abort_transaction(self) -> None: ...
|
||||
async def end_session(self) -> None: ...
|
||||
def cluster_time(self) -> Optional[Mapping[str, Any]]: ...
|
||||
def has_ended(self) -> bool: ...
|
||||
def in_transaction(self) -> bool: ...
|
||||
def options(self) -> SessionOptions: ...
|
||||
def operation_time(self) -> Optional[Timestamp]: ...
|
||||
def session_id(self) -> Mapping[str, Any]: ...
|
||||
def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: ...
|
||||
def advance_operation_time(self, operation_time: Timestamp) -> None: ...
|
||||
def __init__(self, delegate: ClientSession, motor_client: AgnosticClient): ...
|
||||
def get_io_loop(self) -> Any: ...
|
||||
async def with_transaction(
|
||||
self,
|
||||
coro: Callable[..., Coroutine[Any, Any, Any]],
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
max_commit_time_ms: Optional[int] = None,
|
||||
) -> _T: ...
|
||||
def start_transaction(
|
||||
self,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
max_commit_time_ms: Optional[int] = None,
|
||||
) -> _MotorTransactionContext: ...
|
||||
@property
|
||||
def client(self) -> AgnosticClient: ...
|
||||
async def __aenter__(self) -> AgnosticClientSession: ...
|
||||
async def __aexit__(self, exc_type: object, exc_val: object, exc_tb: object) -> None: ...
|
||||
def __enter__(self) -> None: ...
|
||||
def __exit__(self, exc_type: object, exc_val: object, exc_tb: object) -> None: ...
|
||||
|
||||
class AgnosticDatabase(AgnosticBaseProperties[_DocumentType]):
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[Database[_DocumentType]]
|
||||
|
||||
def __hash__(self) -> int: ...
|
||||
def __bool__(self) -> int: ...
|
||||
async def cursor_command(
|
||||
self,
|
||||
command: Union[str, MutableMapping[str, Any]],
|
||||
value: Any = 1,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
codec_options: Optional[CodecOptions[_CodecDocumentType]] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
max_await_time_ms: Optional[int] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgnosticCommandCursor[_DocumentType]: ...
|
||||
@overload
|
||||
async def command(
|
||||
self,
|
||||
command: Union[str, MutableMapping[str, Any]],
|
||||
value: Any = ...,
|
||||
check: bool = ...,
|
||||
allowable_errors: Optional[Sequence[Union[str, int]]] = ...,
|
||||
read_preference: Optional[_ServerMode] = ...,
|
||||
codec_options: None = ...,
|
||||
session: Optional[AgnosticClientSession] = ...,
|
||||
comment: Optional[Any] = ...,
|
||||
**kwargs: Any,
|
||||
) -> dict[str, Any]: ...
|
||||
@overload
|
||||
async def command(
|
||||
self,
|
||||
command: Union[str, MutableMapping[str, Any]],
|
||||
value: Any = 1,
|
||||
check: bool = True,
|
||||
allowable_errors: Optional[Sequence[Union[str, int]]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
codec_options: CodecOptions[_CodecDocumentType] = ...,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> _CodecDocumentType: ...
|
||||
@overload
|
||||
async def command(
|
||||
self,
|
||||
command: Union[str, MutableMapping[str, Any]],
|
||||
value: Any = 1,
|
||||
check: bool = True,
|
||||
allowable_errors: Optional[Sequence[Union[str, int]]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
codec_options: Optional[CodecOptions[_CodecDocumentType]] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[dict[str, Any], _CodecDocumentType]: ...
|
||||
async def create_collection(
|
||||
self,
|
||||
name: str,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
check_exists: Optional[bool] = True,
|
||||
**kwargs: Any,
|
||||
) -> AgnosticCollection[_DocumentType]: ...
|
||||
async def dereference(
|
||||
self,
|
||||
dbref: DBRef,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> Optional[_DocumentType]: ...
|
||||
async def drop_collection(
|
||||
self,
|
||||
name_or_collection: Union[str, AgnosticCollection[_DocumentTypeArg]],
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
encrypted_fields: Optional[Mapping[str, Any]] = None,
|
||||
) -> dict[str, Any]: ...
|
||||
def get_collection(
|
||||
self,
|
||||
name: str,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> AgnosticCollection[_DocumentType]: ...
|
||||
async def list_collection_names(
|
||||
self,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
filter: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> list[str]: ...
|
||||
async def list_collections(
|
||||
self,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
filter: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgnosticCommandCursor[MutableMapping[str, Any]]: ...
|
||||
@property
|
||||
def name(self) -> str: ...
|
||||
async def validate_collection(
|
||||
self,
|
||||
name_or_collection: Union[str, AgnosticCollection[_DocumentTypeArg]],
|
||||
scandata: bool = False,
|
||||
full: bool = False,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
background: Optional[bool] = None,
|
||||
comment: Optional[Any] = None,
|
||||
) -> dict[str, Any]: ...
|
||||
def with_options(
|
||||
self,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> AgnosticDatabase[_DocumentType]: ...
|
||||
async def _async_aggregate(
|
||||
self, pipeline: _Pipeline, session: Optional[AgnosticClientSession] = None, **kwargs: Any
|
||||
) -> AgnosticCommandCursor[_DocumentType]: ...
|
||||
def __init__(self, client: AgnosticClient[_DocumentType], name: str, **kwargs: Any) -> None: ...
|
||||
def aggregate(
|
||||
self, pipeline: _Pipeline, *args: Any, **kwargs: Any
|
||||
) -> AgnosticLatentCommandCursor[_DocumentType]: ...
|
||||
def watch(
|
||||
self,
|
||||
pipeline: Optional[_Pipeline] = None,
|
||||
full_document: Optional[str] = None,
|
||||
resume_after: Optional[Mapping[str, Any]] = None,
|
||||
max_await_time_ms: Optional[int] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
collation: Optional[_CollationIn] = None,
|
||||
start_at_operation_time: Optional[Timestamp] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
start_after: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
full_document_before_change: Optional[str] = None,
|
||||
show_expanded_events: Optional[bool] = None,
|
||||
) -> AgnosticChangeStream[_DocumentType]: ...
|
||||
@property
|
||||
def client(self) -> AgnosticClient[_DocumentType]: ...
|
||||
def __getattr__(self, name: str) -> AgnosticCollection[_DocumentType]: ...
|
||||
def __getitem__(self, name: str) -> AgnosticCollection[_DocumentType]: ...
|
||||
def __call__(self, *args: Any, **kwargs: Any) -> None: ...
|
||||
def wrap(self, obj: Any) -> Any: ...
|
||||
def get_io_loop(self) -> Any: ...
|
||||
|
||||
class AgnosticCollection(AgnosticBaseProperties[_DocumentType]):
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[Collection[_DocumentType]]
|
||||
|
||||
def __hash__(self) -> int: ...
|
||||
def __bool__(self) -> bool: ...
|
||||
async def bulk_write(
|
||||
self,
|
||||
requests: Sequence[_WriteOp[_DocumentType]],
|
||||
ordered: bool = True,
|
||||
bypass_document_validation: bool = False,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
let: Optional[Mapping] = None,
|
||||
) -> BulkWriteResult: ...
|
||||
async def count_documents(
|
||||
self,
|
||||
filter: Mapping[str, Any],
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> int: ...
|
||||
async def create_index(
|
||||
self,
|
||||
keys: _IndexKeyHint,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> str: ...
|
||||
async def create_indexes(
|
||||
self,
|
||||
indexes: Sequence[IndexModel],
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> list[str]: ...
|
||||
async def delete_many(
|
||||
self,
|
||||
filter: Mapping[str, Any],
|
||||
collation: Optional[_CollationIn] = None,
|
||||
hint: Optional[_IndexKeyHint] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
let: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
) -> DeleteResult: ...
|
||||
async def delete_one(
|
||||
self,
|
||||
filter: Mapping[str, Any],
|
||||
collation: Optional[_CollationIn] = None,
|
||||
hint: Optional[_IndexKeyHint] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
let: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
) -> DeleteResult: ...
|
||||
async def distinct(
|
||||
self,
|
||||
key: str,
|
||||
filter: Optional[Mapping[str, Any]] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> list[Any]: ...
|
||||
async def drop(
|
||||
self,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
encrypted_fields: Optional[Mapping[str, Any]] = None,
|
||||
) -> None: ...
|
||||
async def drop_index(
|
||||
self,
|
||||
index_or_name: _IndexKeyHint,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> None: ...
|
||||
async def drop_indexes(
|
||||
self,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> None: ...
|
||||
async def estimated_document_count(
|
||||
self, comment: Optional[Any] = None, **kwargs: Any
|
||||
) -> int: ...
|
||||
async def find_one(
|
||||
self, filter: Optional[Any] = None, *args: Any, **kwargs: Any
|
||||
) -> Optional[_DocumentType]: ...
|
||||
async def find_one_and_delete(
|
||||
self,
|
||||
filter: Mapping[str, Any],
|
||||
projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None,
|
||||
sort: Optional[_IndexList] = None,
|
||||
hint: Optional[_IndexKeyHint] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
let: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> _DocumentType: ...
|
||||
async def find_one_and_replace(
|
||||
self,
|
||||
filter: Mapping[str, Any],
|
||||
replacement: Mapping[str, Any],
|
||||
projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None,
|
||||
sort: Optional[_IndexList] = None,
|
||||
upsert: bool = False,
|
||||
return_document: bool = ...,
|
||||
hint: Optional[_IndexKeyHint] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
let: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> _DocumentType: ...
|
||||
async def find_one_and_update(
|
||||
self,
|
||||
filter: Mapping[str, Any],
|
||||
update: Union[Mapping[str, Any], _Pipeline],
|
||||
projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None,
|
||||
sort: Optional[_IndexList] = None,
|
||||
upsert: bool = False,
|
||||
return_document: bool = ...,
|
||||
array_filters: Optional[Sequence[Mapping[str, Any]]] = None,
|
||||
hint: Optional[_IndexKeyHint] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
let: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> _DocumentType: ...
|
||||
def full_name(self) -> str: ...
|
||||
async def index_information(
|
||||
self, session: Optional[AgnosticClientSession] = None, comment: Optional[Any] = None
|
||||
) -> MutableMapping[str, Any]: ...
|
||||
async def insert_many(
|
||||
self,
|
||||
documents: Iterable[Union[_DocumentType, RawBSONDocument]],
|
||||
ordered: bool = True,
|
||||
bypass_document_validation: bool = False,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
) -> InsertManyResult: ...
|
||||
async def insert_one(
|
||||
self,
|
||||
document: Union[_DocumentType, RawBSONDocument],
|
||||
bypass_document_validation: bool = False,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
) -> InsertOneResult: ...
|
||||
@property
|
||||
def name(self) -> str: ...
|
||||
async def options(
|
||||
self, session: Optional[AgnosticClientSession] = None, comment: Optional[Any] = None
|
||||
) -> MutableMapping[str, Any]: ...
|
||||
async def rename(
|
||||
self,
|
||||
new_name: str,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> MutableMapping[str, Any]: ...
|
||||
async def replace_one(
|
||||
self,
|
||||
filter: Mapping[str, Any],
|
||||
replacement: Mapping[str, Any],
|
||||
upsert: bool = False,
|
||||
bypass_document_validation: bool = False,
|
||||
collation: Optional[_CollationIn] = None,
|
||||
hint: Optional[_IndexKeyHint] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
let: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
) -> UpdateResult: ...
|
||||
async def update_many(
|
||||
self,
|
||||
filter: Mapping[str, Any],
|
||||
update: Union[Mapping[str, Any], _Pipeline],
|
||||
upsert: bool = False,
|
||||
array_filters: Optional[Sequence[Mapping[str, Any]]] = None,
|
||||
bypass_document_validation: Optional[bool] = None,
|
||||
collation: Optional[_CollationIn] = None,
|
||||
hint: Optional[_IndexKeyHint] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
let: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
) -> UpdateResult: ...
|
||||
async def update_one(
|
||||
self,
|
||||
filter: Mapping[str, Any],
|
||||
update: Union[Mapping[str, Any], _Pipeline],
|
||||
upsert: bool = False,
|
||||
bypass_document_validation: bool = False,
|
||||
collation: Optional[_CollationIn] = None,
|
||||
array_filters: Optional[Sequence[Mapping[str, Any]]] = None,
|
||||
hint: Optional[_IndexKeyHint] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
let: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
) -> UpdateResult: ...
|
||||
def with_options(
|
||||
self,
|
||||
codec_options: Optional[CodecOptions] = None,
|
||||
read_preference: Optional[ReadPreference] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> AgnosticCollection[_DocumentType]: ...
|
||||
def list_search_indexes(
|
||||
self,
|
||||
name: Optional[str] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> AgnosticLatentCommandCursor[Mapping[str, Any]]: ...
|
||||
async def create_search_index(
|
||||
self,
|
||||
model: Union[Mapping[str, SearchIndexModel], Any],
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Any = None,
|
||||
**kwargs: Any,
|
||||
) -> str: ...
|
||||
async def create_search_indexes(
|
||||
self,
|
||||
models: list[SearchIndexModel],
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> list[str]: ...
|
||||
async def drop_search_index(
|
||||
self,
|
||||
name: str,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> None: ...
|
||||
async def update_search_index(
|
||||
self,
|
||||
name: str,
|
||||
definition: Mapping[str, Any],
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> None: ...
|
||||
def __init__(
|
||||
self,
|
||||
database: Database[_DocumentType],
|
||||
name: str,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
_delegate: Any = None,
|
||||
**kwargs: Any,
|
||||
) -> None: ...
|
||||
def __getattr__(self, name: str) -> AgnosticCollection[_DocumentType]: ...
|
||||
def __getitem__(self, name: str) -> AgnosticCollection[_DocumentType]: ...
|
||||
def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
|
||||
def find(self, *args: Any, **kwargs: Any) -> AgnosticCursor[_DocumentType]: ...
|
||||
def find_raw_batches(
|
||||
self, *args: Any, **kwargs: Any
|
||||
) -> AgnosticRawBatchCursor[_DocumentType]: ...
|
||||
def aggregate(
|
||||
self, pipeline: _Pipeline, *args: Any, **kwargs: Any
|
||||
) -> AgnosticCommandCursor[_DocumentType]: ...
|
||||
def aggregate_raw_batches(
|
||||
self, pipeline: _Pipeline, **kwargs: Any
|
||||
) -> AgnosticRawBatchCursor[_DocumentType]: ...
|
||||
def watch(
|
||||
self,
|
||||
pipeline: Optional[_Pipeline] = None,
|
||||
full_document: Optional[str] = None,
|
||||
resume_after: Optional[Mapping[str, Any]] = None,
|
||||
max_await_time_ms: Optional[int] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
collation: Optional[_CollationIn] = None,
|
||||
start_at_operation_time: Optional[Timestamp] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
start_after: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
full_document_before_change: Optional[str] = None,
|
||||
show_expanded_events: Optional[bool] = None,
|
||||
) -> Any: ...
|
||||
def list_indexes(
|
||||
self, session: Optional[AgnosticClientSession] = None, **kwargs: Any
|
||||
) -> AgnosticLatentCommandCursor[MutableMapping[str, Any]]: ...
|
||||
def wrap(self, obj: Any) -> Any: ...
|
||||
def get_io_loop(self) -> Any: ...
|
||||
|
||||
class AgnosticBaseCursor(AgnosticBase, Generic[_DocumentType]):
|
||||
def __init__(
|
||||
self,
|
||||
cursor: Union[
|
||||
Cursor[_DocumentType], CommandCursor[_DocumentType], _LatentCursor[_DocumentType]
|
||||
],
|
||||
collection: AgnosticCollection[_DocumentType],
|
||||
) -> None: ...
|
||||
def address(self) -> Optional[_Address]: ...
|
||||
def cursor_id(self) -> Optional[int]: ...
|
||||
def alive(self) -> bool: ...
|
||||
def session(self) -> Optional[AgnosticClientSession]: ...
|
||||
async def _async_close(self) -> None: ...
|
||||
async def _refresh(self) -> int: ...
|
||||
def __aiter__(self) -> Any: ...
|
||||
async def next(self) -> _DocumentType: ...
|
||||
__anext__ = next
|
||||
async def __aenter__(self) -> Any: ...
|
||||
async def __aexit__(self, exc_type: object, exc_val: object, exc_tb: object) -> Any: ...
|
||||
def _get_more(self) -> int: ...
|
||||
@property
|
||||
def fetch_next(self) -> Future[Any]: ...
|
||||
def next_object(self) -> Any: ...
|
||||
def each(self, callback: Callable) -> None: ...
|
||||
def _each_got_more(self, callback: Callable, future: Any) -> None: ...
|
||||
def to_list(self, length: Optional[int] = ...) -> Future[list[_DocumentType]]: ...
|
||||
def _to_list(
|
||||
self, length: Union[int, None], the_list: list, future: Any, get_more_result: Any
|
||||
) -> None: ...
|
||||
def get_io_loop(self) -> Any: ...
|
||||
def batch_size(self, batch_size: int) -> AgnosticBaseCursor[_DocumentType]: ...
|
||||
def _buffer_size(self) -> int: ...
|
||||
def _query_flags(self) -> Optional[int]: ...
|
||||
def _data(self) -> None: ...
|
||||
def _killed(self) -> None: ...
|
||||
async def close(self) -> None: ...
|
||||
|
||||
class AgnosticCursor(AgnosticBaseCursor[_DocumentType]):
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[Cursor]
|
||||
def collation(self, collation: Optional[_CollationIn]) -> AgnosticCursor[_DocumentType]: ...
|
||||
async def distinct(self, key: str) -> list: ...
|
||||
async def explain(self) -> _DocumentType: ...
|
||||
def add_option(self, mask: int) -> AgnosticCursor[_DocumentType]: ...
|
||||
def remove_option(self, mask: int) -> AgnosticCursor[_DocumentType]: ...
|
||||
def limit(self, limit: int) -> AgnosticCursor[_DocumentType]: ...
|
||||
def skip(self, skip: int) -> AgnosticCursor[_DocumentType]: ...
|
||||
def max_scan(self, max_scan: Optional[int]) -> AgnosticCursor[_DocumentType]: ...
|
||||
def sort(
|
||||
self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None
|
||||
) -> AgnosticCursor[_DocumentType]: ...
|
||||
def hint(self, index: Optional[_Hint]) -> AgnosticCursor[_DocumentType]: ...
|
||||
def where(self, code: Union[str, Code]) -> AgnosticCursor[_DocumentType]: ...
|
||||
def max_await_time_ms(
|
||||
self, max_await_time_ms: Optional[int]
|
||||
) -> AgnosticCursor[_DocumentType]: ...
|
||||
def max_time_ms(self, max_time_ms: Optional[int]) -> AgnosticCursor[_DocumentType]: ...
|
||||
def min(self, spec: _Sort) -> AgnosticCursor[_DocumentType]: ...
|
||||
def max(self, spec: _Sort) -> AgnosticCursor[_DocumentType]: ...
|
||||
def comment(self, comment: Any) -> AgnosticCursor[_DocumentType]: ...
|
||||
def allow_disk_use(self, allow_disk_use: bool) -> AgnosticCursor[_DocumentType]: ...
|
||||
def rewind(self) -> AgnosticCursor[_DocumentType]: ...
|
||||
def clone(self) -> AgnosticCursor[_DocumentType]: ...
|
||||
def __copy__(self) -> AgnosticCursor[_DocumentType]: ...
|
||||
def __deepcopy__(self, memo: Any) -> AgnosticCursor[_DocumentType]: ...
|
||||
def _query_flags(self) -> int: ...
|
||||
def _data(self) -> Any: ...
|
||||
def _killed(self) -> Any: ...
|
||||
|
||||
class AgnosticRawBatchCursor(AgnosticCursor[_DocumentType]):
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[RawBatchCursor]
|
||||
|
||||
class AgnosticCommandCursor(AgnosticBaseCursor[_DocumentType]):
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[CommandCursor]
|
||||
|
||||
def _query_flags(self) -> int: ...
|
||||
def _data(self) -> Any: ...
|
||||
def _killed(self) -> Any: ...
|
||||
|
||||
class AgnosticRawBatchCommandCursor(AgnosticCommandCursor[_DocumentType]):
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[RawBatchCommandCursor]
|
||||
|
||||
class _LatentCursor(Generic[_DocumentType]):
|
||||
def __init__(self, collection: AgnosticCollection[_DocumentType]): ...
|
||||
def _end_session(self, *args: Any, **kwargs: Any) -> None: ...
|
||||
def clone(self) -> _LatentCursor[_DocumentType]: ...
|
||||
def rewind(self) -> _LatentCursor[_DocumentType]: ...
|
||||
|
||||
class AgnosticLatentCommandCursor(AgnosticCommandCursor[_DocumentType]):
|
||||
__motor_class_name__: str
|
||||
def __init__(
|
||||
self, collection: AgnosticCollection[_DocumentType], start: Any, *args: Any, **kwargs: Any
|
||||
): ...
|
||||
def _on_started(self, original_future: Any, future: Any) -> None: ...
|
||||
|
||||
class AgnosticChangeStream(AgnosticBase, Generic[_DocumentType]):
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[ChangeStream]
|
||||
|
||||
async def _close(self) -> None: ...
|
||||
@property
|
||||
def resume_token(self) -> Optional[Mapping[str, Any]]: ...
|
||||
def __init__(
|
||||
self,
|
||||
target: Union[
|
||||
pymongo.MongoClient[_DocumentType], Database[_DocumentType], Collection[_DocumentType]
|
||||
],
|
||||
pipeline: Optional[_Pipeline],
|
||||
full_document: Optional[str],
|
||||
resume_after: Optional[Mapping[str, Any]],
|
||||
max_await_time_ms: Optional[int],
|
||||
batch_size: Optional[int],
|
||||
collation: Optional[_CollationIn],
|
||||
start_at_operation_time: Optional[Timestamp],
|
||||
session: Optional[AgnosticClientSession],
|
||||
start_after: Optional[Mapping[str, Any]],
|
||||
comment: Optional[Any] = None,
|
||||
full_document_before_change: Optional[str] = None,
|
||||
show_expanded_events: Optional[bool] = None,
|
||||
): ...
|
||||
def _lazy_init(self) -> None: ...
|
||||
def _try_next(self) -> Optional[_DocumentType]: ...
|
||||
def alive(self) -> bool: ...
|
||||
async def next(self) -> _DocumentType: ...
|
||||
async def try_next(self) -> Optional[_DocumentType]: ...
|
||||
async def close(self) -> None: ...
|
||||
def __aiter__(self) -> AgnosticChangeStream[_DocumentType]: ...
|
||||
__anext__ = next
|
||||
async def __aenter__(self) -> AgnosticChangeStream[_DocumentType]: ...
|
||||
async def __aexit__(self, exc_type: object, exc_val: object, exc_tb: object) -> None: ...
|
||||
def get_io_loop(self) -> Any: ...
|
||||
def __enter__(self) -> None: ...
|
||||
def __exit__(self, exc_type: object, exc_val: object, exc_tb: object) -> None: ...
|
||||
|
||||
class AgnosticClientEncryption(AgnosticBase, Generic[_DocumentType]):
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[ClientEncryption]
|
||||
def __init__(
|
||||
self,
|
||||
kms_providers: Mapping[str, Any],
|
||||
key_vault_namespace: str,
|
||||
key_vault_client: AgnosticClient[_DocumentTypeArg],
|
||||
codec_options: CodecOptions,
|
||||
io_loop: Optional[Any] = None,
|
||||
kms_tls_options: Optional[Mapping[str, Any]] = None,
|
||||
): ...
|
||||
async def create_data_key(
|
||||
self,
|
||||
kms_provider: str,
|
||||
master_key: Optional[Mapping[str, Any]] = None,
|
||||
key_alt_names: Optional[Sequence[str]] = None,
|
||||
key_material: Optional[bytes] = None,
|
||||
) -> Binary: ...
|
||||
async def encrypt(
|
||||
self,
|
||||
value: Any,
|
||||
algorithm: str,
|
||||
key_id: Optional[Binary] = None,
|
||||
key_alt_name: Optional[str] = None,
|
||||
query_type: Optional[str] = None,
|
||||
contention_factor: Optional[int] = None,
|
||||
range_opts: Optional[RangeOpts] = None,
|
||||
) -> Binary: ...
|
||||
async def decrypt(self, value: Binary) -> Any: ...
|
||||
async def close(self) -> None: ...
|
||||
async def rewrap_many_data_key(
|
||||
self,
|
||||
filter: Mapping[str, Any],
|
||||
provider: Optional[str] = None,
|
||||
master_key: Optional[Mapping[str, Any]] = None,
|
||||
) -> RewrapManyDataKeyResult: ...
|
||||
async def delete_key(self, id: Binary) -> DeleteResult: ...
|
||||
async def get_key(self, id: Binary) -> Optional[RawBSONDocument]: ...
|
||||
async def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: ...
|
||||
async def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: ...
|
||||
async def remove_key_alt_name(
|
||||
self, id: Binary, key_alt_name: str
|
||||
) -> Optional[RawBSONDocument]: ...
|
||||
async def encrypt_expression(
|
||||
self,
|
||||
expression: Mapping[str, Any],
|
||||
algorithm: str,
|
||||
key_id: Optional[Binary] = None,
|
||||
key_alt_name: Optional[str] = None,
|
||||
query_type: Optional[str] = None,
|
||||
contention_factor: Optional[int] = None,
|
||||
range_opts: Optional[RangeOpts] = None,
|
||||
) -> RawBSONDocument: ...
|
||||
@property
|
||||
def io_loop(self) -> Any: ...
|
||||
def get_io_loop(self) -> Any: ...
|
||||
async def __aenter__(self) -> AgnosticClientEncryption[_DocumentType]: ...
|
||||
async def __aexit__(self, exc_type: object, exc_val: object, exc_tb: object) -> None: ...
|
||||
def __enter__(self) -> NoReturn: ...
|
||||
def __exit__(self, exc_type: object, exc_val: object, exc_tb: object) -> None: ...
|
||||
async def get_keys(self) -> AgnosticCursor[RawBSONDocument]: ...
|
||||
async def create_encrypted_collection(
|
||||
self,
|
||||
database: AgnosticDatabase[_DocumentTypeArg],
|
||||
name: str,
|
||||
encrypted_fields: Mapping[str, Any],
|
||||
kms_provider: Optional[str] = None,
|
||||
master_key: Optional[Mapping[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> tuple[AgnosticCollection[_DocumentTypeArg], Mapping[str, Any]]: ...
|
||||
1637
backend/venv/lib/python3.12/site-packages/motor/docstrings.py
Normal file
1637
backend/venv/lib/python3.12/site-packages/motor/docstrings.py
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@@ -0,0 +1,170 @@
|
||||
# Copyright 2014-2016 MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""asyncio compatibility layer for Motor, an asynchronous MongoDB driver.
|
||||
|
||||
See "Frameworks" in the Developer Guide.
|
||||
"""
|
||||
import asyncio
|
||||
import asyncio.tasks
|
||||
import functools
|
||||
import multiprocessing
|
||||
import os
|
||||
import warnings
|
||||
from asyncio import get_event_loop # noqa: F401 - For framework interface.
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
# mypy: ignore-errors
|
||||
|
||||
try:
|
||||
import contextvars
|
||||
except ImportError:
|
||||
contextvars = None
|
||||
|
||||
try:
|
||||
from asyncio import coroutine
|
||||
except ImportError:
|
||||
|
||||
def coroutine():
|
||||
raise RuntimeError(
|
||||
"The coroutine decorator was removed in Python 3.11. Use 'async def' instead"
|
||||
)
|
||||
|
||||
|
||||
CLASS_PREFIX = "AsyncIO"
|
||||
|
||||
|
||||
def is_event_loop(loop):
|
||||
return isinstance(loop, asyncio.AbstractEventLoop)
|
||||
|
||||
|
||||
def check_event_loop(loop):
|
||||
if not is_event_loop(loop):
|
||||
raise TypeError("io_loop must be instance of asyncio-compatible event loop, not %r" % loop)
|
||||
|
||||
|
||||
def get_future(loop):
|
||||
return loop.create_future()
|
||||
|
||||
|
||||
if "MOTOR_MAX_WORKERS" in os.environ:
|
||||
max_workers = int(os.environ["MOTOR_MAX_WORKERS"])
|
||||
else:
|
||||
max_workers = multiprocessing.cpu_count() * 5
|
||||
|
||||
_EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
|
||||
|
||||
|
||||
def _reset_global_executor():
|
||||
"""Re-initialize the global ThreadPoolExecutor"""
|
||||
global _EXECUTOR # noqa: PLW0603
|
||||
_EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
|
||||
|
||||
|
||||
if hasattr(os, "register_at_fork"):
|
||||
# We need this to make sure that creating new clients in subprocesses doesn't deadlock.
|
||||
os.register_at_fork(after_in_child=_reset_global_executor)
|
||||
|
||||
|
||||
def run_on_executor(loop, fn, *args, **kwargs):
|
||||
if contextvars:
|
||||
context = contextvars.copy_context()
|
||||
fn = functools.partial(context.run, fn)
|
||||
|
||||
return loop.run_in_executor(_EXECUTOR, functools.partial(fn, *args, **kwargs))
|
||||
|
||||
|
||||
# Adapted from tornado.gen.
|
||||
def chain_future(a, b):
|
||||
def copy(future):
|
||||
assert future is a
|
||||
if b.done():
|
||||
return
|
||||
if a.exception() is not None:
|
||||
b.set_exception(a.exception())
|
||||
else:
|
||||
b.set_result(a.result())
|
||||
|
||||
a.add_done_callback(copy)
|
||||
|
||||
|
||||
def chain_return_value(future, loop, return_value):
|
||||
"""Compatible way to return a value in all Pythons.
|
||||
|
||||
PEP 479, raise StopIteration(value) from a coroutine won't work forever,
|
||||
but "return value" doesn't work in Python 2. Instead, Motor methods that
|
||||
return values resolve a Future with it, and are implemented with callbacks
|
||||
rather than a coroutine internally.
|
||||
"""
|
||||
chained = loop.create_future()
|
||||
|
||||
def copy(_future):
|
||||
# Return early if the task was cancelled.
|
||||
if chained.done():
|
||||
return
|
||||
if _future.exception() is not None:
|
||||
chained.set_exception(_future.exception())
|
||||
else:
|
||||
chained.set_result(return_value)
|
||||
|
||||
future.add_done_callback(functools.partial(loop.call_soon_threadsafe, copy))
|
||||
return chained
|
||||
|
||||
|
||||
def is_future(f):
|
||||
return asyncio.isfuture(f)
|
||||
|
||||
|
||||
def call_soon(loop, callback, *args, **kwargs):
|
||||
if kwargs:
|
||||
loop.call_soon(functools.partial(callback, *args, **kwargs))
|
||||
else:
|
||||
loop.call_soon(callback, *args)
|
||||
|
||||
|
||||
def add_future(loop, future, callback, *args):
|
||||
future.add_done_callback(functools.partial(loop.call_soon_threadsafe, callback, *args))
|
||||
|
||||
|
||||
def pymongo_class_wrapper(f, pymongo_class):
|
||||
"""Executes the coroutine f and wraps its result in a Motor class.
|
||||
|
||||
See WrapAsync.
|
||||
"""
|
||||
|
||||
@functools.wraps(f)
|
||||
async def _wrapper(self, *args, **kwargs):
|
||||
result = await f(self, *args, **kwargs)
|
||||
|
||||
# Don't call isinstance(), not checking subclasses.
|
||||
if result.__class__ == pymongo_class:
|
||||
# Delegate to the current object to wrap the result.
|
||||
return self.wrap(result)
|
||||
else:
|
||||
return result
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
def yieldable(future):
|
||||
warnings.warn(
|
||||
"The yieldable function is deprecated and may be removed in a future major release",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return next(iter(future))
|
||||
|
||||
|
||||
def platform_info():
|
||||
return "asyncio"
|
||||
Binary file not shown.
@@ -0,0 +1,152 @@
|
||||
# Copyright 2014-2016 MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Tornado compatibility layer for Motor, an asynchronous MongoDB driver.
|
||||
|
||||
See "Frameworks" in the Developer Guide.
|
||||
"""
|
||||
|
||||
import functools
|
||||
import os
|
||||
import warnings
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
import tornado.process
|
||||
from tornado import concurrent, ioloop
|
||||
from tornado import version as tornado_version
|
||||
from tornado.gen import chain_future, coroutine # noqa: F401 - For framework interface.
|
||||
|
||||
try:
|
||||
import contextvars
|
||||
except ImportError:
|
||||
contextvars = None
|
||||
|
||||
# mypy: ignore-errors
|
||||
|
||||
CLASS_PREFIX = ""
|
||||
|
||||
|
||||
def get_event_loop():
|
||||
return ioloop.IOLoop.current()
|
||||
|
||||
|
||||
def is_event_loop(loop):
|
||||
return isinstance(loop, ioloop.IOLoop)
|
||||
|
||||
|
||||
def check_event_loop(loop):
|
||||
if not is_event_loop(loop):
|
||||
raise TypeError("io_loop must be instance of IOLoop, not %r" % loop)
|
||||
|
||||
|
||||
def get_future(loop):
|
||||
return concurrent.Future()
|
||||
|
||||
|
||||
if "MOTOR_MAX_WORKERS" in os.environ:
|
||||
max_workers = int(os.environ["MOTOR_MAX_WORKERS"])
|
||||
else:
|
||||
max_workers = tornado.process.cpu_count() * 5
|
||||
|
||||
_EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
|
||||
|
||||
|
||||
def _reset_global_executor():
|
||||
"""Re-initialize the global ThreadPoolExecutor"""
|
||||
global _EXECUTOR # noqa: PLW0603
|
||||
_EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
|
||||
|
||||
|
||||
if hasattr(os, "register_at_fork"):
|
||||
# We need this to make sure that creating new clients in subprocesses doesn't deadlock.
|
||||
os.register_at_fork(after_in_child=_reset_global_executor)
|
||||
|
||||
|
||||
def run_on_executor(loop, fn, *args, **kwargs):
|
||||
if contextvars:
|
||||
context = contextvars.copy_context()
|
||||
fn = functools.partial(context.run, fn)
|
||||
|
||||
return loop.run_in_executor(_EXECUTOR, functools.partial(fn, *args, **kwargs))
|
||||
|
||||
|
||||
def chain_return_value(future, loop, return_value):
|
||||
"""Compatible way to return a value in all Pythons.
|
||||
|
||||
PEP 479, raise StopIteration(value) from a coroutine won't work forever,
|
||||
but "return value" doesn't work in Python 2. Instead, Motor methods that
|
||||
return values resolve a Future with it, and are implemented with callbacks
|
||||
rather than a coroutine internally.
|
||||
"""
|
||||
chained = concurrent.Future()
|
||||
|
||||
def copy(_future):
|
||||
# Return early if the task was cancelled.
|
||||
if chained.done():
|
||||
return
|
||||
if _future.exception() is not None:
|
||||
chained.set_exception(_future.exception())
|
||||
else:
|
||||
chained.set_result(return_value)
|
||||
|
||||
future.add_done_callback(functools.partial(loop.add_callback, copy))
|
||||
return chained
|
||||
|
||||
|
||||
def is_future(f):
|
||||
return isinstance(f, concurrent.Future)
|
||||
|
||||
|
||||
def call_soon(loop, callback, *args, **kwargs):
|
||||
if args or kwargs:
|
||||
loop.add_callback(functools.partial(callback, *args, **kwargs))
|
||||
else:
|
||||
loop.add_callback(callback)
|
||||
|
||||
|
||||
def add_future(loop, future, callback, *args):
|
||||
loop.add_future(future, functools.partial(callback, *args))
|
||||
|
||||
|
||||
def pymongo_class_wrapper(f, pymongo_class):
|
||||
"""Executes the coroutine f and wraps its result in a Motor class.
|
||||
|
||||
See WrapAsync.
|
||||
"""
|
||||
|
||||
@functools.wraps(f)
|
||||
async def _wrapper(self, *args, **kwargs):
|
||||
result = await f(self, *args, **kwargs)
|
||||
|
||||
# Don't call isinstance(), not checking subclasses.
|
||||
if result.__class__ == pymongo_class:
|
||||
# Delegate to the current object to wrap the result.
|
||||
return self.wrap(result)
|
||||
else:
|
||||
return result
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
def yieldable(future):
|
||||
warnings.warn(
|
||||
"The yieldable function is deprecated and may be removed in a future major release.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return future
|
||||
|
||||
|
||||
def platform_info():
|
||||
return f"Tornado {tornado_version}"
|
||||
Binary file not shown.
@@ -0,0 +1,297 @@
|
||||
# Copyright 2014 MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Dynamic class-creation for Motor."""
|
||||
import functools
|
||||
import inspect
|
||||
from typing import Any, Callable, Dict, TypeVar
|
||||
|
||||
_class_cache: Dict[Any, Any] = {}
|
||||
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
def asynchronize(framework, sync_method: Callable, doc=None, wrap_class=None, unwrap_class=None):
|
||||
"""Decorate `sync_method` so it returns a Future.
|
||||
|
||||
The method runs on a thread and resolves the Future when it completes.
|
||||
|
||||
:Parameters:
|
||||
- `motor_class`: Motor class being created, e.g. MotorClient.
|
||||
- `framework`: An asynchronous framework
|
||||
- `sync_method`: Unbound method of pymongo Collection, Database,
|
||||
MongoClient, etc.
|
||||
- `doc`: Optionally override sync_method's docstring
|
||||
- `wrap_class`: Optional PyMongo class, wrap a returned object of
|
||||
this PyMongo class in the equivalent Motor class
|
||||
- `unwrap_class` Optional Motor class name, unwrap an argument with
|
||||
this Motor class name and pass the wrapped PyMongo
|
||||
object instead
|
||||
"""
|
||||
|
||||
@functools.wraps(sync_method)
|
||||
def method(self, *args, **kwargs):
|
||||
if unwrap_class is not None:
|
||||
# Don't call isinstance(), not checking subclasses.
|
||||
unwrapped_args = [
|
||||
obj.delegate
|
||||
if obj.__class__.__name__.endswith((unwrap_class, "MotorClientSession"))
|
||||
else obj
|
||||
for obj in args
|
||||
]
|
||||
unwrapped_kwargs = {
|
||||
key: (
|
||||
obj.delegate
|
||||
if obj.__class__.__name__.endswith((unwrap_class, "MotorClientSession"))
|
||||
else obj
|
||||
)
|
||||
for key, obj in kwargs.items()
|
||||
}
|
||||
else:
|
||||
# For speed, don't call unwrap_args_session/unwrap_kwargs_session.
|
||||
unwrapped_args = [
|
||||
obj.delegate if obj.__class__.__name__.endswith("MotorClientSession") else obj
|
||||
for obj in args
|
||||
]
|
||||
unwrapped_kwargs = {
|
||||
key: (
|
||||
obj.delegate if obj.__class__.__name__.endswith("MotorClientSession") else obj
|
||||
)
|
||||
for key, obj in kwargs.items()
|
||||
}
|
||||
|
||||
loop = self.get_io_loop()
|
||||
return framework.run_on_executor(
|
||||
loop, sync_method, self.delegate, *unwrapped_args, **unwrapped_kwargs
|
||||
)
|
||||
|
||||
if wrap_class is not None:
|
||||
method = framework.pymongo_class_wrapper(method, wrap_class)
|
||||
method.is_wrap_method = True # For Synchro.
|
||||
|
||||
# This is for the benefit of motor_extensions.py, which needs this info to
|
||||
# generate documentation with Sphinx.
|
||||
method.is_async_method = True
|
||||
name = sync_method.__name__
|
||||
method.pymongo_method_name = name
|
||||
|
||||
if doc is not None:
|
||||
method.__doc__ = doc
|
||||
|
||||
return method
|
||||
|
||||
|
||||
def unwrap_args_session(args):
|
||||
return (
|
||||
obj.delegate if obj.__class__.__name__.endswith("MotorClientSession") else obj
|
||||
for obj in args
|
||||
)
|
||||
|
||||
|
||||
def unwrap_kwargs_session(kwargs):
|
||||
return {
|
||||
key: (obj.delegate if obj.__class__.__name__.endswith("MotorClientSession") else obj)
|
||||
for key, obj in kwargs.items()
|
||||
}
|
||||
|
||||
|
||||
_coro_token = object()
|
||||
|
||||
|
||||
def coroutine_annotation(f):
|
||||
"""In docs, annotate a function that returns a Future with 'coroutine'.
|
||||
|
||||
This doesn't affect behavior.
|
||||
"""
|
||||
# Like:
|
||||
# @coroutine_annotation
|
||||
# def method(self):
|
||||
#
|
||||
f.coroutine_annotation = True
|
||||
return f
|
||||
|
||||
|
||||
class MotorAttributeFactory:
|
||||
"""Used by Motor classes to mark attributes that delegate in some way to
|
||||
PyMongo. At module import time, create_class_with_framework calls
|
||||
create_attribute() for each attr to create the final class attribute.
|
||||
"""
|
||||
|
||||
def __init__(self, doc=None):
|
||||
self.doc = doc
|
||||
|
||||
def create_attribute(self, cls, attr_name):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Async(MotorAttributeFactory):
|
||||
def __init__(self, attr_name, doc=None):
|
||||
"""A descriptor that wraps a PyMongo method, such as insert_one,
|
||||
and returns an asynchronous version of the method that returns a Future.
|
||||
|
||||
:Parameters:
|
||||
- `attr_name`: The name of the attribute on the PyMongo class, if
|
||||
different from attribute on the Motor class
|
||||
"""
|
||||
super().__init__(doc)
|
||||
self.attr_name = attr_name
|
||||
self.wrap_class = None
|
||||
self.unwrap_class = None
|
||||
|
||||
def create_attribute(self, cls, attr_name):
|
||||
name = self.attr_name or attr_name
|
||||
method = getattr(cls.__delegate_class__, name)
|
||||
return asynchronize(
|
||||
framework=cls._framework,
|
||||
sync_method=method,
|
||||
doc=self.doc,
|
||||
wrap_class=self.wrap_class,
|
||||
unwrap_class=self.unwrap_class,
|
||||
)
|
||||
|
||||
def wrap(self, original_class):
|
||||
self.wrap_class = original_class
|
||||
return self
|
||||
|
||||
def unwrap(self, class_name):
|
||||
self.unwrap_class = class_name
|
||||
return self
|
||||
|
||||
|
||||
class AsyncRead(Async):
|
||||
def __init__(self, attr_name=None, doc=None):
|
||||
"""A descriptor that wraps a PyMongo read method like find_one() that
|
||||
returns a Future.
|
||||
"""
|
||||
Async.__init__(self, attr_name=attr_name, doc=doc)
|
||||
|
||||
|
||||
class AsyncWrite(Async):
|
||||
def __init__(self, attr_name=None, doc=None):
|
||||
"""A descriptor that wraps a PyMongo write method like update_one() that
|
||||
accepts getLastError options and returns a Future.
|
||||
"""
|
||||
Async.__init__(self, attr_name=attr_name, doc=doc)
|
||||
|
||||
|
||||
class AsyncCommand(Async):
|
||||
def __init__(self, attr_name=None, doc=None):
|
||||
"""A descriptor that wraps a PyMongo command like copy_database() that
|
||||
returns a Future and does not accept getLastError options.
|
||||
"""
|
||||
Async.__init__(self, attr_name=attr_name, doc=doc)
|
||||
|
||||
|
||||
class ReadOnlyProperty(MotorAttributeFactory):
|
||||
"""Creates a readonly attribute on the wrapped PyMongo object."""
|
||||
|
||||
def create_attribute(self, cls, attr_name):
|
||||
def fget(obj):
|
||||
return getattr(obj.delegate, attr_name)
|
||||
|
||||
if self.doc:
|
||||
doc = self.doc
|
||||
else:
|
||||
doc = getattr(cls.__delegate_class__, attr_name).__doc__
|
||||
|
||||
if doc:
|
||||
return property(fget=fget, doc=doc)
|
||||
else:
|
||||
return property(fget=fget)
|
||||
|
||||
|
||||
class DelegateMethod(ReadOnlyProperty):
|
||||
"""A method on the wrapped PyMongo object that does no I/O and can be called
|
||||
synchronously"""
|
||||
|
||||
def __init__(self, doc=None):
|
||||
ReadOnlyProperty.__init__(self, doc)
|
||||
self.wrap_class = None
|
||||
|
||||
def wrap(self, original_class):
|
||||
self.wrap_class = original_class
|
||||
return self
|
||||
|
||||
def create_attribute(self, cls, attr_name):
|
||||
if self.wrap_class is None:
|
||||
return ReadOnlyProperty.create_attribute(self, cls, attr_name)
|
||||
|
||||
method = getattr(cls.__delegate_class__, attr_name)
|
||||
original_class = self.wrap_class
|
||||
|
||||
@functools.wraps(method)
|
||||
def wrapper(self_, *args, **kwargs):
|
||||
result = method(self_.delegate, *args, **kwargs)
|
||||
|
||||
# Don't call isinstance(), not checking subclasses.
|
||||
if result.__class__ == original_class:
|
||||
# Delegate to the current object to wrap the result.
|
||||
return self_.wrap(result)
|
||||
else:
|
||||
return result
|
||||
|
||||
if self.doc:
|
||||
wrapper.__doc__ = self.doc
|
||||
|
||||
wrapper.is_wrap_method = True # For Synchro.
|
||||
return wrapper
|
||||
|
||||
|
||||
class MotorCursorChainingMethod(MotorAttributeFactory):
|
||||
def create_attribute(self, cls, attr_name):
|
||||
cursor_method = getattr(cls.__delegate_class__, attr_name)
|
||||
|
||||
@functools.wraps(cursor_method)
|
||||
def return_clone(self, *args, **kwargs):
|
||||
cursor_method(self.delegate, *args, **kwargs)
|
||||
return self
|
||||
|
||||
# This is for the benefit of Synchro, and motor_extensions.py
|
||||
return_clone.is_motorcursor_chaining_method = True
|
||||
return_clone.pymongo_method_name = attr_name
|
||||
if self.doc:
|
||||
return_clone.__doc__ = self.doc
|
||||
|
||||
return return_clone
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def create_class_with_framework(cls: T, framework: Any, module_name: str) -> T:
|
||||
motor_class_name = framework.CLASS_PREFIX + cls.__motor_class_name__
|
||||
cache_key = (cls, motor_class_name, framework)
|
||||
cached_class = _class_cache.get(cache_key)
|
||||
if cached_class:
|
||||
return cached_class
|
||||
|
||||
new_class = type(str(motor_class_name), (cls,), {})
|
||||
new_class.__module__ = module_name
|
||||
new_class._framework = framework
|
||||
|
||||
assert hasattr(new_class, "__delegate_class__")
|
||||
|
||||
# If we're constructing MotorClient from AgnosticClient, for example,
|
||||
# the method resolution order is (AgnosticClient, AgnosticBase, object).
|
||||
# Iterate over bases looking for attributes and coroutines that must be
|
||||
# replaced with framework-specific ones.
|
||||
for base in reversed(inspect.getmro(cls)):
|
||||
# Turn attribute factories into real methods or descriptors.
|
||||
for name, attr in base.__dict__.items():
|
||||
if isinstance(attr, MotorAttributeFactory):
|
||||
new_class_attr = attr.create_attribute(new_class, name)
|
||||
setattr(new_class, name, new_class_attr)
|
||||
|
||||
_class_cache[cache_key] = new_class
|
||||
return new_class
|
||||
@@ -0,0 +1,76 @@
|
||||
# Copyright 2011-2015 MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Asyncio support for Motor, an asynchronous driver for MongoDB."""
|
||||
from . import core, motor_gridfs
|
||||
from .frameworks import asyncio as asyncio_framework
|
||||
from .metaprogramming import T, create_class_with_framework
|
||||
|
||||
__all__ = [
|
||||
"AsyncIOMotorClient",
|
||||
"AsyncIOMotorClientSession",
|
||||
"AsyncIOMotorDatabase",
|
||||
"AsyncIOMotorCollection",
|
||||
"AsyncIOMotorCursor",
|
||||
"AsyncIOMotorCommandCursor",
|
||||
"AsyncIOMotorChangeStream",
|
||||
"AsyncIOMotorGridFSBucket",
|
||||
"AsyncIOMotorGridIn",
|
||||
"AsyncIOMotorGridOut",
|
||||
"AsyncIOMotorGridOutCursor",
|
||||
"AsyncIOMotorClientEncryption",
|
||||
]
|
||||
|
||||
|
||||
def create_asyncio_class(cls: T) -> T:
|
||||
return create_class_with_framework(cls, asyncio_framework, "motor.motor_asyncio")
|
||||
|
||||
|
||||
AsyncIOMotorClient = create_asyncio_class(core.AgnosticClient)
|
||||
|
||||
|
||||
AsyncIOMotorClientSession = create_asyncio_class(core.AgnosticClientSession)
|
||||
|
||||
|
||||
AsyncIOMotorDatabase = create_asyncio_class(core.AgnosticDatabase)
|
||||
|
||||
|
||||
AsyncIOMotorCollection = create_asyncio_class(core.AgnosticCollection)
|
||||
|
||||
|
||||
AsyncIOMotorCursor = create_asyncio_class(core.AgnosticCursor)
|
||||
|
||||
|
||||
AsyncIOMotorCommandCursor = create_asyncio_class(core.AgnosticCommandCursor)
|
||||
|
||||
|
||||
AsyncIOMotorLatentCommandCursor = create_asyncio_class(core.AgnosticLatentCommandCursor)
|
||||
|
||||
|
||||
AsyncIOMotorChangeStream = create_asyncio_class(core.AgnosticChangeStream)
|
||||
|
||||
|
||||
AsyncIOMotorGridFSBucket = create_asyncio_class(motor_gridfs.AgnosticGridFSBucket)
|
||||
|
||||
|
||||
AsyncIOMotorGridIn = create_asyncio_class(motor_gridfs.AgnosticGridIn)
|
||||
|
||||
|
||||
AsyncIOMotorGridOut = create_asyncio_class(motor_gridfs.AgnosticGridOut)
|
||||
|
||||
|
||||
AsyncIOMotorGridOutCursor = create_asyncio_class(motor_gridfs.AgnosticGridOutCursor)
|
||||
|
||||
|
||||
AsyncIOMotorClientEncryption = create_asyncio_class(core.AgnosticClientEncryption)
|
||||
@@ -0,0 +1,263 @@
|
||||
from typing import Any, Mapping, MutableMapping, Optional, Union
|
||||
|
||||
from bson import Code, CodecOptions, Timestamp
|
||||
from bson.raw_bson import RawBSONDocument
|
||||
from pymongo.client_session import TransactionOptions
|
||||
from pymongo.cursor_shared import _Hint, _Sort
|
||||
from pymongo.read_concern import ReadConcern
|
||||
from pymongo.read_preferences import ReadPreference, _ServerMode
|
||||
from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline
|
||||
from pymongo.write_concern import WriteConcern
|
||||
|
||||
from motor import core, motor_gridfs
|
||||
|
||||
__all__: list[str] = [
|
||||
"AsyncIOMotorClient",
|
||||
"AsyncIOMotorClientSession",
|
||||
"AsyncIOMotorDatabase",
|
||||
"AsyncIOMotorCollection",
|
||||
"AsyncIOMotorCursor",
|
||||
"AsyncIOMotorCommandCursor",
|
||||
"AsyncIOMotorChangeStream",
|
||||
"AsyncIOMotorGridFSBucket",
|
||||
"AsyncIOMotorGridIn",
|
||||
"AsyncIOMotorGridOut",
|
||||
"AsyncIOMotorGridOutCursor",
|
||||
"AsyncIOMotorClientEncryption",
|
||||
"AsyncIOMotorLatentCommandCursor",
|
||||
]
|
||||
|
||||
class AsyncIOMotorClient(core.AgnosticClient[_DocumentType]):
|
||||
def get_database(
|
||||
self,
|
||||
name: Optional[str] = None,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> AsyncIOMotorDatabase[_DocumentType]: ...
|
||||
def get_default_database(
|
||||
self,
|
||||
default: Optional[str] = None,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> AsyncIOMotorDatabase[_DocumentType]: ...
|
||||
async def list_databases(
|
||||
self,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIOMotorCommandCursor[dict[str, Any]]: ...
|
||||
async def start_session(
|
||||
self,
|
||||
causal_consistency: Optional[bool] = None,
|
||||
default_transaction_options: Optional[TransactionOptions] = None,
|
||||
snapshot: Optional[bool] = False,
|
||||
) -> AsyncIOMotorClientSession: ...
|
||||
def watch(
|
||||
self,
|
||||
pipeline: Optional[_Pipeline] = None,
|
||||
full_document: Optional[str] = None,
|
||||
resume_after: Optional[Mapping[str, Any]] = None,
|
||||
max_await_time_ms: Optional[int] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
collation: Optional[_CollationIn] = None,
|
||||
start_at_operation_time: Optional[Timestamp] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
start_after: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[str] = None,
|
||||
full_document_before_change: Optional[str] = None,
|
||||
show_expanded_events: Optional[bool] = None,
|
||||
) -> AsyncIOMotorChangeStream[_DocumentType]: ...
|
||||
def __getattr__(self, name: str) -> AsyncIOMotorDatabase[_DocumentType]: ...
|
||||
def __getitem__(self, name: str) -> AsyncIOMotorDatabase[_DocumentType]: ...
|
||||
|
||||
class AsyncIOMotorClientSession(core.AgnosticClientSession):
|
||||
@property
|
||||
def client(self) -> AsyncIOMotorClient: ...
|
||||
async def __aenter__(self) -> AsyncIOMotorClientSession: ...
|
||||
|
||||
class AsyncIOMotorDatabase(core.AgnosticDatabase[_DocumentType]):
|
||||
async def cursor_command(
|
||||
self,
|
||||
command: Union[str, MutableMapping[str, Any]],
|
||||
value: Any = 1,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
codec_options: Optional[CodecOptions[core._CodecDocumentType]] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
max_await_time_ms: Optional[int] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIOMotorCommandCursor[_DocumentType]: ...
|
||||
async def create_collection(
|
||||
self,
|
||||
name: str,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
check_exists: Optional[bool] = True,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIOMotorCollection[_DocumentType]: ...
|
||||
def get_collection(
|
||||
self,
|
||||
name: str,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> AsyncIOMotorCollection[_DocumentType]: ...
|
||||
async def list_collections(
|
||||
self,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
filter: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIOMotorCommandCursor[MutableMapping[str, Any]]: ...
|
||||
def with_options(
|
||||
self,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> AsyncIOMotorDatabase[_DocumentType]: ...
|
||||
def aggregate(
|
||||
self, pipeline: _Pipeline, *args: Any, **kwargs: Any
|
||||
) -> AsyncIOMotorLatentCommandCursor[_DocumentType]: ...
|
||||
def watch(
|
||||
self,
|
||||
pipeline: Optional[_Pipeline] = None,
|
||||
full_document: Optional[str] = None,
|
||||
resume_after: Optional[Mapping[str, Any]] = None,
|
||||
max_await_time_ms: Optional[int] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
collation: Optional[_CollationIn] = None,
|
||||
start_at_operation_time: Optional[Timestamp] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
start_after: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
full_document_before_change: Optional[str] = None,
|
||||
show_expanded_events: Optional[bool] = None,
|
||||
) -> AsyncIOMotorChangeStream[_DocumentType]: ...
|
||||
@property
|
||||
def client(self) -> AsyncIOMotorClient[_DocumentType]: ...
|
||||
def __getattr__(self, name: str) -> AsyncIOMotorCollection[_DocumentType]: ...
|
||||
def __getitem__(self, name: str) -> AsyncIOMotorCollection[_DocumentType]: ...
|
||||
|
||||
class AsyncIOMotorCollection(core.AgnosticCollection[_DocumentType]):
|
||||
def with_options(
|
||||
self,
|
||||
codec_options: Optional[CodecOptions] = None,
|
||||
read_preference: Optional[ReadPreference] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> AsyncIOMotorCollection[_DocumentType]: ...
|
||||
def list_search_indexes(
|
||||
self,
|
||||
name: Optional[str] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> AsyncIOMotorLatentCommandCursor[Mapping[str, Any]]: ...
|
||||
def __getattr__(self, name: str) -> AsyncIOMotorCollection[_DocumentType]: ...
|
||||
def __getitem__(self, name: str) -> AsyncIOMotorCollection[_DocumentType]: ...
|
||||
def find(self, *args: Any, **kwargs: Any) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def find_raw_batches(
|
||||
self, *args: Any, **kwargs: Any
|
||||
) -> AsyncIOMotorRawBatchCursor[_DocumentType]: ...
|
||||
def aggregate(
|
||||
self, pipeline: _Pipeline, *args: Any, **kwargs: Any
|
||||
) -> AsyncIOMotorCommandCursor[_DocumentType]: ...
|
||||
def aggregate_raw_batches(
|
||||
self, pipeline: _Pipeline, **kwargs: Any
|
||||
) -> AsyncIOMotorRawBatchCursor[_DocumentType]: ...
|
||||
def list_indexes(
|
||||
self, session: Optional[core.AgnosticClientSession] = None, **kwargs: Any
|
||||
) -> AsyncIOMotorLatentCommandCursor[MutableMapping[str, Any]]: ...
|
||||
|
||||
class AsyncIOMotorLatentCommandCursor(core.AgnosticLatentCommandCursor[_DocumentType]): ...
|
||||
|
||||
class AsyncIOMotorCursor(core.AgnosticCursor[_DocumentType]):
|
||||
def collation(self, collation: Optional[_CollationIn]) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def add_option(self, mask: int) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def remove_option(self, mask: int) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def limit(self, limit: int) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def skip(self, skip: int) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def max_scan(self, max_scan: Optional[int]) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def sort(
|
||||
self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None
|
||||
) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def hint(self, index: Optional[_Hint]) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def where(self, code: Union[str, Code]) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def max_await_time_ms(
|
||||
self, max_await_time_ms: Optional[int]
|
||||
) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def max_time_ms(self, max_time_ms: Optional[int]) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def min(self, spec: _Sort) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def max(self, spec: _Sort) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def comment(self, comment: Any) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def allow_disk_use(self, allow_disk_use: bool) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def rewind(self) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def clone(self) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def __copy__(self) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
def __deepcopy__(self, memo: Any) -> AsyncIOMotorCursor[_DocumentType]: ...
|
||||
|
||||
class AsyncIOMotorRawBatchCursor(core.AgnosticRawBatchCursor[_DocumentType]): ...
|
||||
class AsyncIOMotorCommandCursor(core.AgnosticCommandCursor[_DocumentType]): ...
|
||||
class AsyncIOMotorRawBatchCommandCursor(core.AgnosticRawBatchCommandCursor[_DocumentType]): ...
|
||||
|
||||
class AsyncIOMotorChangeStream(core.AgnosticChangeStream[_DocumentType]):
|
||||
def __aiter__(self) -> AsyncIOMotorChangeStream[_DocumentType]: ...
|
||||
async def __aenter__(self) -> AsyncIOMotorChangeStream[_DocumentType]: ...
|
||||
|
||||
class AsyncIOMotorClientEncryption(core.AgnosticClientEncryption[_DocumentType]):
|
||||
async def __aenter__(self) -> AsyncIOMotorClientEncryption[_DocumentType]: ...
|
||||
async def get_keys(self) -> AsyncIOMotorCursor[RawBSONDocument]: ...
|
||||
async def create_encrypted_collection(
|
||||
self,
|
||||
database: core.AgnosticDatabase[_DocumentTypeArg],
|
||||
name: str,
|
||||
encrypted_fields: Mapping[str, Any],
|
||||
kms_provider: Optional[str] = None,
|
||||
master_key: Optional[Mapping[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> tuple[AsyncIOMotorCollection[_DocumentTypeArg], Mapping[str, Any]]: ...
|
||||
|
||||
class AsyncIOMotorGridOutCursor(motor_gridfs.AgnosticGridOutCursor):
|
||||
def next_object(self) -> AsyncIOMotorGridOutCursor: ...
|
||||
|
||||
class AsyncIOMotorGridOut(motor_gridfs.AgnosticGridOut):
|
||||
def __aiter__(self) -> AsyncIOMotorGridOut: ...
|
||||
|
||||
class AsyncIOMotorGridIn(motor_gridfs.AgnosticGridIn):
|
||||
async def __aenter__(self) -> AsyncIOMotorGridIn: ...
|
||||
|
||||
class AsyncIOMotorGridFSBucket(motor_gridfs.AgnosticGridFSBucket):
|
||||
async def open_download_stream_by_name(
|
||||
self,
|
||||
filename: str,
|
||||
revision: int = -1,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
) -> AsyncIOMotorGridOut: ...
|
||||
async def open_download_stream(
|
||||
self, file_id: Any, session: Optional[core.AgnosticClientSession] = None
|
||||
) -> AsyncIOMotorGridOut: ...
|
||||
def open_upload_stream(
|
||||
self,
|
||||
filename: str,
|
||||
chunk_size_bytes: Optional[int] = None,
|
||||
metadata: Optional[Mapping[str, Any]] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
) -> AsyncIOMotorGridIn: ...
|
||||
def open_upload_stream_with_id(
|
||||
self,
|
||||
file_id: Any,
|
||||
filename: str,
|
||||
chunk_size_bytes: Optional[int] = None,
|
||||
metadata: Optional[Mapping[str, Any]] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
) -> AsyncIOMotorGridIn: ...
|
||||
def find(self, *args: Any, **kwargs: Any) -> AsyncIOMotorGridOutCursor: ...
|
||||
@@ -0,0 +1,16 @@
|
||||
# Copyright 2011-2015 MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Common code to support all async frameworks."""
|
||||
callback_type_error = TypeError("callback must be a callable")
|
||||
504
backend/venv/lib/python3.12/site-packages/motor/motor_gridfs.py
Normal file
504
backend/venv/lib/python3.12/site-packages/motor/motor_gridfs.py
Normal file
@@ -0,0 +1,504 @@
|
||||
# Copyright 2011-2015 MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""GridFS implementation for Motor, an asynchronous driver for MongoDB."""
|
||||
import hashlib
|
||||
import warnings
|
||||
|
||||
import gridfs
|
||||
import pymongo
|
||||
import pymongo.errors
|
||||
from gridfs import DEFAULT_CHUNK_SIZE, grid_file
|
||||
|
||||
from motor import docstrings
|
||||
from motor.core import AgnosticCollection, AgnosticCursor, AgnosticDatabase
|
||||
from motor.metaprogramming import (
|
||||
AsyncCommand,
|
||||
AsyncRead,
|
||||
DelegateMethod,
|
||||
ReadOnlyProperty,
|
||||
coroutine_annotation,
|
||||
create_class_with_framework,
|
||||
)
|
||||
|
||||
|
||||
class AgnosticGridOutCursor(AgnosticCursor):
|
||||
__motor_class_name__ = "MotorGridOutCursor"
|
||||
__delegate_class__ = gridfs.GridOutCursor
|
||||
|
||||
def next_object(self):
|
||||
"""**DEPRECATED** - Get next GridOut object from cursor."""
|
||||
# Note: the super() call will raise a warning for the deprecation.
|
||||
grid_out = super().next_object()
|
||||
if grid_out:
|
||||
grid_out_class = create_class_with_framework(
|
||||
AgnosticGridOut, self._framework, self.__module__
|
||||
)
|
||||
|
||||
return grid_out_class(self.collection, delegate=grid_out)
|
||||
else:
|
||||
# Exhausted.
|
||||
return None
|
||||
|
||||
|
||||
class MotorGridOutProperty(ReadOnlyProperty):
|
||||
"""Creates a readonly attribute on the wrapped PyMongo GridOut."""
|
||||
|
||||
def create_attribute(self, cls, attr_name):
|
||||
def fget(obj):
|
||||
if not obj.delegate._file:
|
||||
raise pymongo.errors.InvalidOperation(
|
||||
"You must call MotorGridOut.open() before accessing "
|
||||
"the %s property" % attr_name
|
||||
)
|
||||
|
||||
return getattr(obj.delegate, attr_name)
|
||||
|
||||
doc = getattr(cls.__delegate_class__, attr_name).__doc__
|
||||
return property(fget=fget, doc=doc)
|
||||
|
||||
|
||||
class AgnosticGridOut:
|
||||
"""Class to read data out of GridFS.
|
||||
|
||||
MotorGridOut supports the same attributes as PyMongo's
|
||||
:class:`~gridfs.grid_file.GridOut`, such as ``_id``, ``content_type``,
|
||||
etc.
|
||||
|
||||
You don't need to instantiate this class directly - use the
|
||||
methods provided by :class:`~motor.MotorGridFSBucket`. If it **is**
|
||||
instantiated directly, call :meth:`open`, :meth:`read`, or
|
||||
:meth:`readline` before accessing its attributes.
|
||||
"""
|
||||
|
||||
__motor_class_name__ = "MotorGridOut"
|
||||
__delegate_class__ = gridfs.GridOut
|
||||
|
||||
_id = MotorGridOutProperty()
|
||||
aliases = MotorGridOutProperty()
|
||||
chunk_size = MotorGridOutProperty()
|
||||
close = MotorGridOutProperty()
|
||||
content_type = MotorGridOutProperty()
|
||||
filename = MotorGridOutProperty()
|
||||
length = MotorGridOutProperty()
|
||||
metadata = MotorGridOutProperty()
|
||||
name = MotorGridOutProperty()
|
||||
_open = AsyncCommand(attr_name="open")
|
||||
read = AsyncRead()
|
||||
readable = DelegateMethod()
|
||||
readchunk = AsyncRead()
|
||||
readline = AsyncRead()
|
||||
seek = DelegateMethod()
|
||||
seekable = DelegateMethod()
|
||||
tell = DelegateMethod()
|
||||
upload_date = MotorGridOutProperty()
|
||||
write = DelegateMethod()
|
||||
|
||||
def __init__(
|
||||
self, root_collection, file_id=None, file_document=None, delegate=None, session=None
|
||||
):
|
||||
collection_class = create_class_with_framework(
|
||||
AgnosticCollection, self._framework, self.__module__
|
||||
)
|
||||
|
||||
if not isinstance(root_collection, collection_class):
|
||||
raise TypeError(
|
||||
"First argument to MotorGridOut must be "
|
||||
"MotorCollection, not %r" % root_collection
|
||||
)
|
||||
|
||||
if delegate:
|
||||
self.delegate = delegate
|
||||
else:
|
||||
self.delegate = self.__delegate_class__(
|
||||
root_collection.delegate, file_id, file_document, session=session
|
||||
)
|
||||
|
||||
self.io_loop = root_collection.get_io_loop()
|
||||
|
||||
def __aiter__(self):
|
||||
return self
|
||||
|
||||
async def __anext__(self):
|
||||
chunk = await self.readchunk()
|
||||
if chunk:
|
||||
return chunk
|
||||
raise StopAsyncIteration()
|
||||
|
||||
def __getattr__(self, item):
|
||||
if not self.delegate._file:
|
||||
raise pymongo.errors.InvalidOperation(
|
||||
"You must call MotorGridOut.open() before accessing the %s property" % item
|
||||
)
|
||||
|
||||
return getattr(self.delegate, item)
|
||||
|
||||
@coroutine_annotation
|
||||
def open(self):
|
||||
"""Retrieve this file's attributes from the server.
|
||||
|
||||
Returns a Future.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
No longer accepts a callback argument.
|
||||
|
||||
.. versionchanged:: 0.2
|
||||
:class:`~motor.MotorGridOut` now opens itself on demand, calling
|
||||
``open`` explicitly is rarely needed.
|
||||
"""
|
||||
return self._framework.chain_return_value(self._open(), self.get_io_loop(), self)
|
||||
|
||||
def get_io_loop(self):
|
||||
return self.io_loop
|
||||
|
||||
async def stream_to_handler(self, request_handler):
|
||||
"""Write the contents of this file to a
|
||||
:class:`tornado.web.RequestHandler`. This method calls
|
||||
:meth:`~tornado.web.RequestHandler.flush` on
|
||||
the RequestHandler, so ensure all headers have already been set.
|
||||
For a more complete example see the implementation of
|
||||
:class:`~motor.web.GridFSHandler`.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class FileHandler(tornado.web.RequestHandler):
|
||||
@tornado.web.asynchronous
|
||||
@gen.coroutine
|
||||
def get(self, filename):
|
||||
db = self.settings["db"]
|
||||
fs = await motor.MotorGridFSBucket(db())
|
||||
try:
|
||||
gridout = await fs.open_download_stream_by_name(filename)
|
||||
except gridfs.NoFile:
|
||||
raise tornado.web.HTTPError(404)
|
||||
|
||||
self.set_header("Content-Type", gridout.content_type)
|
||||
self.set_header("Content-Length", gridout.length)
|
||||
await gridout.stream_to_handler(self)
|
||||
self.finish()
|
||||
|
||||
.. seealso:: Tornado `RequestHandler <http://tornadoweb.org/en/stable/web.html#request-handlers>`_
|
||||
"""
|
||||
written = 0
|
||||
while written < self.length:
|
||||
# Reading chunk_size at a time minimizes buffering.
|
||||
chunk = await self.read(self.chunk_size)
|
||||
|
||||
# write() simply appends the output to a list; flush() sends it
|
||||
# over the network and minimizes buffering in the handler.
|
||||
request_handler.write(chunk)
|
||||
request_handler.flush()
|
||||
written += len(chunk)
|
||||
|
||||
|
||||
class AgnosticGridIn:
|
||||
__motor_class_name__ = "MotorGridIn"
|
||||
__delegate_class__ = gridfs.GridIn
|
||||
|
||||
__getattr__ = DelegateMethod()
|
||||
_id = ReadOnlyProperty()
|
||||
abort = AsyncCommand()
|
||||
chunk_size = ReadOnlyProperty()
|
||||
closed = ReadOnlyProperty()
|
||||
close = AsyncCommand()
|
||||
content_type = ReadOnlyProperty()
|
||||
filename = ReadOnlyProperty()
|
||||
length = ReadOnlyProperty()
|
||||
name = ReadOnlyProperty()
|
||||
read = DelegateMethod()
|
||||
readable = DelegateMethod()
|
||||
seekable = DelegateMethod()
|
||||
upload_date = ReadOnlyProperty()
|
||||
write = AsyncCommand().unwrap("MotorGridOut")
|
||||
writeable = DelegateMethod()
|
||||
writelines = AsyncCommand().unwrap("MotorGridOut")
|
||||
_exit = AsyncCommand("__exit__")
|
||||
set = AsyncCommand(
|
||||
attr_name="__setattr__",
|
||||
doc="""
|
||||
Set an arbitrary metadata attribute on the file. Stores value on the server
|
||||
as a key-value pair within the file document once the file is closed. If
|
||||
the file is already closed, calling :meth:`set` will immediately update the file
|
||||
document on the server.
|
||||
|
||||
Metadata set on the file appears as attributes on a
|
||||
:class:`~motor.MotorGridOut` object created from the file.
|
||||
|
||||
:Parameters:
|
||||
- `name`: Name of the attribute, will be stored as a key in the file
|
||||
document on the server
|
||||
- `value`: Value of the attribute
|
||||
""",
|
||||
)
|
||||
|
||||
def __init__(self, root_collection, delegate=None, session=None, **kwargs):
|
||||
"""
|
||||
Class to write data to GridFS. Application developers should not
|
||||
generally need to instantiate this class - see
|
||||
:meth:`~motor.MotorGridFSBucket.open_upload_stream`.
|
||||
|
||||
Any of the file level options specified in the `GridFS Spec
|
||||
<http://dochub.mongodb.org/core/gridfs/>`_ may be passed as
|
||||
keyword arguments. Any additional keyword arguments will be
|
||||
set as additional fields on the file document. Valid keyword
|
||||
arguments include:
|
||||
|
||||
- ``"_id"``: unique ID for this file (default:
|
||||
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
|
||||
not have already been used for another file
|
||||
|
||||
- ``"filename"``: human name for the file
|
||||
|
||||
- ``"contentType"`` or ``"content_type"``: valid mime-type
|
||||
for the file
|
||||
|
||||
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
|
||||
chunks, in bytes (default: 256 kb)
|
||||
|
||||
- ``"encoding"``: encoding used for this file. In Python 2,
|
||||
any :class:`unicode` that is written to the file will be
|
||||
converted to a :class:`str`. In Python 3, any :class:`str`
|
||||
that is written to the file will be converted to
|
||||
:class:`bytes`.
|
||||
|
||||
:Parameters:
|
||||
- `root_collection`: root collection to write to
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession` to use for all
|
||||
commands
|
||||
- `**kwargs` (optional): file level options (see above)
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Removed support for the `disable_md5` parameter (to match the
|
||||
GridIn class in PyMongo).
|
||||
.. versionchanged:: 0.2
|
||||
``open`` method removed, no longer needed.
|
||||
"""
|
||||
collection_class = create_class_with_framework(
|
||||
AgnosticCollection, self._framework, self.__module__
|
||||
)
|
||||
|
||||
if not isinstance(root_collection, collection_class):
|
||||
raise TypeError(
|
||||
"First argument to MotorGridIn must be MotorCollection, not %r" % root_collection
|
||||
)
|
||||
|
||||
self.io_loop = root_collection.get_io_loop()
|
||||
# Short cut.
|
||||
self.delegate = delegate or self.__delegate_class__(
|
||||
root_collection.delegate, session=session, **kwargs
|
||||
)
|
||||
|
||||
# Support "async with bucket.open_upload_stream() as f:"
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self._exit(exc_type, exc_val, exc_tb)
|
||||
|
||||
def get_io_loop(self):
|
||||
return self.io_loop
|
||||
|
||||
|
||||
class AgnosticGridFSBucket:
|
||||
__motor_class_name__ = "MotorGridFSBucket"
|
||||
__delegate_class__ = gridfs.GridFSBucket
|
||||
|
||||
delete = AsyncCommand(doc=docstrings.gridfs_delete_doc)
|
||||
download_to_stream = AsyncCommand(doc=docstrings.gridfs_download_to_stream_doc)
|
||||
download_to_stream_by_name = AsyncCommand(doc=docstrings.gridfs_download_to_stream_by_name_doc)
|
||||
open_download_stream = AsyncCommand(doc=docstrings.gridfs_open_download_stream_doc).wrap(
|
||||
gridfs.GridOut
|
||||
)
|
||||
open_download_stream_by_name = AsyncCommand(
|
||||
doc=docstrings.gridfs_open_download_stream_by_name_doc
|
||||
).wrap(gridfs.GridOut)
|
||||
open_upload_stream = DelegateMethod(doc=docstrings.gridfs_open_upload_stream_doc).wrap(
|
||||
gridfs.GridIn
|
||||
)
|
||||
open_upload_stream_with_id = DelegateMethod(
|
||||
doc=docstrings.gridfs_open_upload_stream_with_id_doc
|
||||
).wrap(gridfs.GridIn)
|
||||
rename = AsyncCommand(doc=docstrings.gridfs_rename_doc)
|
||||
upload_from_stream = AsyncCommand(doc=docstrings.gridfs_upload_from_stream_doc)
|
||||
upload_from_stream_with_id = AsyncCommand(doc=docstrings.gridfs_upload_from_stream_with_id_doc)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
database,
|
||||
bucket_name="fs",
|
||||
chunk_size_bytes=DEFAULT_CHUNK_SIZE,
|
||||
write_concern=None,
|
||||
read_preference=None,
|
||||
collection=None,
|
||||
):
|
||||
"""Create a handle to a GridFS bucket.
|
||||
|
||||
Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern`
|
||||
is not acknowledged.
|
||||
|
||||
This class conforms to the `GridFS API Spec
|
||||
<https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst>`_
|
||||
for MongoDB drivers.
|
||||
|
||||
:Parameters:
|
||||
- `database`: database to use.
|
||||
- `bucket_name` (optional): The name of the bucket. Defaults to 'fs'.
|
||||
- `chunk_size_bytes` (optional): The chunk size in bytes. Defaults
|
||||
to 255KB.
|
||||
- `write_concern` (optional): The
|
||||
:class:`~pymongo.write_concern.WriteConcern` to use. If ``None``
|
||||
(the default) db.write_concern is used.
|
||||
- `read_preference` (optional): The read preference to use. If
|
||||
``None`` (the default) db.read_preference is used.
|
||||
- `collection` (optional): Deprecated, an alias for `bucket_name`
|
||||
that exists solely to provide backwards compatibility.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Removed support for the `disable_md5` parameter (to match the
|
||||
GridFSBucket class in PyMongo).
|
||||
.. versionchanged:: 2.1
|
||||
Added support for the `bucket_name`, `chunk_size_bytes`,
|
||||
`write_concern`, and `read_preference` parameters.
|
||||
Deprecated the `collection` parameter which is now an alias to
|
||||
`bucket_name` (to match the GridFSBucket class in PyMongo).
|
||||
.. versionadded:: 1.0
|
||||
|
||||
.. mongodoc:: gridfs
|
||||
"""
|
||||
# Preserve backwards compatibility of "collection" parameter
|
||||
if collection is not None:
|
||||
warnings.warn(
|
||||
'the "collection" parameter is deprecated, use "bucket_name" instead',
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
bucket_name = collection
|
||||
|
||||
db_class = create_class_with_framework(AgnosticDatabase, self._framework, self.__module__)
|
||||
|
||||
if not isinstance(database, db_class):
|
||||
raise TypeError(
|
||||
f"First argument to {self.__class__} must be MotorDatabase, not {database!r}"
|
||||
)
|
||||
|
||||
self.io_loop = database.get_io_loop()
|
||||
self.collection = database.get_collection(
|
||||
bucket_name, write_concern=write_concern, read_preference=read_preference
|
||||
)
|
||||
self.delegate = self.__delegate_class__(
|
||||
database.delegate,
|
||||
bucket_name,
|
||||
chunk_size_bytes=chunk_size_bytes,
|
||||
write_concern=write_concern,
|
||||
read_preference=read_preference,
|
||||
)
|
||||
|
||||
def get_io_loop(self):
|
||||
return self.io_loop
|
||||
|
||||
def wrap(self, obj):
|
||||
if obj.__class__ is grid_file.GridIn:
|
||||
grid_in_class = create_class_with_framework(
|
||||
AgnosticGridIn, self._framework, self.__module__
|
||||
)
|
||||
|
||||
return grid_in_class(root_collection=self.collection, delegate=obj)
|
||||
|
||||
elif obj.__class__ is grid_file.GridOut:
|
||||
grid_out_class = create_class_with_framework(
|
||||
AgnosticGridOut, self._framework, self.__module__
|
||||
)
|
||||
|
||||
return grid_out_class(root_collection=self.collection, delegate=obj)
|
||||
|
||||
elif obj.__class__ is gridfs.GridOutCursor:
|
||||
grid_out_class = create_class_with_framework(
|
||||
AgnosticGridOutCursor, self._framework, self.__module__
|
||||
)
|
||||
|
||||
return grid_out_class(cursor=obj, collection=self.collection)
|
||||
|
||||
def find(self, *args, **kwargs):
|
||||
"""Find and return the files collection documents that match ``filter``.
|
||||
|
||||
Returns a cursor that iterates across files matching
|
||||
arbitrary queries on the files collection. Can be combined
|
||||
with other modifiers for additional control.
|
||||
|
||||
For example::
|
||||
|
||||
cursor = bucket.find({"filename": "lisa.txt"}, no_cursor_timeout=True)
|
||||
while (await cursor.fetch_next):
|
||||
grid_out = cursor.next_object()
|
||||
data = await grid_out.read()
|
||||
|
||||
This iterates through all versions of "lisa.txt" stored in GridFS.
|
||||
Note that setting no_cursor_timeout to True may be important to
|
||||
prevent the cursor from timing out during long multi-file processing
|
||||
work.
|
||||
|
||||
As another example, the call::
|
||||
|
||||
most_recent_three = fs.find().sort("uploadDate", -1).limit(3)
|
||||
|
||||
would return a cursor to the three most recently uploaded files
|
||||
in GridFS.
|
||||
|
||||
Follows a similar interface to
|
||||
:meth:`~motor.MotorCollection.find`
|
||||
in :class:`~motor.MotorCollection`.
|
||||
|
||||
:Parameters:
|
||||
- `filter`: Search query.
|
||||
- `batch_size` (optional): The number of documents to return per
|
||||
batch.
|
||||
- `limit` (optional): The maximum number of documents to return.
|
||||
- `no_cursor_timeout` (optional): The server normally times out idle
|
||||
cursors after an inactivity period (10 minutes) to prevent excess
|
||||
memory use. Set this option to True prevent that.
|
||||
- `skip` (optional): The number of documents to skip before
|
||||
returning.
|
||||
- `sort` (optional): The order by which to sort results. Defaults to
|
||||
None.
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`, created with
|
||||
:meth:`~MotorClient.start_session`.
|
||||
|
||||
If a :class:`~pymongo.client_session.ClientSession` is passed to
|
||||
:meth:`find`, all returned :class:`MotorGridOut` instances
|
||||
are associated with that session.
|
||||
|
||||
.. versionchanged:: 1.2
|
||||
Added session parameter.
|
||||
"""
|
||||
cursor = self.delegate.find(*args, **kwargs)
|
||||
grid_out_cursor = create_class_with_framework(
|
||||
AgnosticGridOutCursor, self._framework, self.__module__
|
||||
)
|
||||
|
||||
return grid_out_cursor(cursor, self.collection)
|
||||
|
||||
|
||||
def _hash_gridout(gridout):
|
||||
"""Compute the effective hash of a GridOut object for use with an Etag header.
|
||||
|
||||
Create a FIPS-compliant Etag HTTP header hash using sha256
|
||||
We use the _id + length + upload_date as a proxy for
|
||||
uniqueness to avoid reading the entire file.
|
||||
"""
|
||||
grid_hash = hashlib.sha256(str(gridout._id).encode("utf8"))
|
||||
grid_hash.update(str(gridout.length).encode("utf8"))
|
||||
grid_hash.update(str(gridout.upload_date).encode("utf8"))
|
||||
return grid_hash.hexdigest()
|
||||
181
backend/venv/lib/python3.12/site-packages/motor/motor_gridfs.pyi
Normal file
181
backend/venv/lib/python3.12/site-packages/motor/motor_gridfs.pyi
Normal file
@@ -0,0 +1,181 @@
|
||||
# Copyright 2023-present MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import os
|
||||
from typing import Any, Iterable, Mapping, NoReturn, Optional
|
||||
|
||||
from bson import ObjectId
|
||||
from gridfs import DEFAULT_CHUNK_SIZE, GridFSBucket, GridIn, GridOut, GridOutCursor # noqa: F401
|
||||
from pymongo import WriteConcern
|
||||
from pymongo.read_preferences import _ServerMode
|
||||
|
||||
from motor.core import (
|
||||
AgnosticClientSession,
|
||||
AgnosticCollection,
|
||||
AgnosticCursor,
|
||||
AgnosticDatabase,
|
||||
)
|
||||
|
||||
_SEEK_SET = os.SEEK_SET
|
||||
_SEEK_CUR = os.SEEK_CUR
|
||||
_SEEK_END = os.SEEK_END
|
||||
|
||||
class AgnosticGridOutCursor(AgnosticCursor):
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[GridOutCursor]
|
||||
def next_object(self) -> AgnosticGridOutCursor: ...
|
||||
|
||||
class AgnosticGridOut:
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[GridOut]
|
||||
_id: Any
|
||||
aliases: Optional[list[str]]
|
||||
chunk_size: int
|
||||
filename: Optional[str]
|
||||
name: Optional[str]
|
||||
content_type: Optional[str]
|
||||
length: int
|
||||
upload_date: datetime.datetime
|
||||
metadata: Optional[Mapping[str, Any]]
|
||||
async def _open(self) -> None: ...
|
||||
def close(self) -> None: ...
|
||||
async def read(self, size: int = -1) -> NoReturn: ...
|
||||
def readable(self) -> bool: ...
|
||||
async def readchunk(self) -> bytes: ...
|
||||
async def readline(self, size: int = -1) -> bytes: ...
|
||||
def seek(self, pos: int, whence: int = ...) -> int: ...
|
||||
def seekable(self) -> bool: ...
|
||||
def tell(self) -> int: ...
|
||||
def write(self, data: Any) -> None: ...
|
||||
def __init__(
|
||||
self,
|
||||
root_collection: AgnosticCollection,
|
||||
file_id: Optional[int] = None,
|
||||
file_document: Optional[Any] = None,
|
||||
delegate: Any = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
) -> None: ...
|
||||
def __aiter__(self) -> AgnosticGridOut: ...
|
||||
async def __anext__(self) -> bytes: ...
|
||||
def __getattr__(self, item: str) -> Any: ...
|
||||
def open(self) -> Any: ...
|
||||
def get_io_loop(self) -> Any: ...
|
||||
async def stream_to_handler(self, request_handler: Any) -> None: ...
|
||||
|
||||
class AgnosticGridIn:
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[GridIn]
|
||||
__getattr__: Any
|
||||
_id: Any
|
||||
filename: str
|
||||
name: str
|
||||
content_type: Optional[str]
|
||||
length: int
|
||||
chunk_size: int
|
||||
upload_date: datetime.datetime
|
||||
|
||||
async def abort(self) -> None: ...
|
||||
def closed(self) -> bool: ...
|
||||
async def close(self) -> None: ...
|
||||
def read(self, size: int = -1) -> NoReturn: ...
|
||||
def readable(self) -> bool: ...
|
||||
def seekable(self) -> bool: ...
|
||||
async def write(self, data: Any) -> None: ...
|
||||
def writeable(self) -> bool: ...
|
||||
async def writelines(self, sequence: Iterable[Any]) -> None: ...
|
||||
async def _exit__(self, exc_type: object, exc_val: object, exc_tb: object) -> Any: ...
|
||||
async def set(self, name: str, value: Any) -> None: ...
|
||||
def __init__(
|
||||
self,
|
||||
root_collection: AgnosticCollection,
|
||||
delegate: Any = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
**kwargs: Any,
|
||||
) -> None: ...
|
||||
async def __aenter__(self) -> AgnosticGridIn: ...
|
||||
async def __aexit__(self, exc_type: object, exc_val: object, exc_tb: object) -> None: ...
|
||||
def get_io_loop(self) -> Any: ...
|
||||
|
||||
class AgnosticGridFSBucket:
|
||||
__motor_class_name__: str
|
||||
__delegate_class__: type[GridFSBucket]
|
||||
async def delete(
|
||||
self, file_id: Any, session: Optional[AgnosticClientSession] = None
|
||||
) -> None: ...
|
||||
async def download_to_stream(
|
||||
self, file_id: Any, destination: Any, session: Optional[AgnosticClientSession] = None
|
||||
) -> None: ...
|
||||
async def download_to_stream_by_name(
|
||||
self,
|
||||
filename: str,
|
||||
destination: Any,
|
||||
revision: int = -1,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
) -> None: ...
|
||||
async def open_download_stream_by_name(
|
||||
self, filename: str, revision: int = -1, session: Optional[AgnosticClientSession] = None
|
||||
) -> AgnosticGridOut: ...
|
||||
async def open_download_stream(
|
||||
self, file_id: Any, session: Optional[AgnosticClientSession] = None
|
||||
) -> AgnosticGridOut: ...
|
||||
def open_upload_stream(
|
||||
self,
|
||||
filename: str,
|
||||
chunk_size_bytes: Optional[int] = None,
|
||||
metadata: Optional[Mapping[str, Any]] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
) -> AgnosticGridIn: ...
|
||||
def open_upload_stream_with_id(
|
||||
self,
|
||||
file_id: Any,
|
||||
filename: str,
|
||||
chunk_size_bytes: Optional[int] = None,
|
||||
metadata: Optional[Mapping[str, Any]] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
) -> AgnosticGridIn: ...
|
||||
async def rename(
|
||||
self, file_id: Any, new_filename: str, session: Optional[AgnosticClientSession] = None
|
||||
) -> None: ...
|
||||
async def upload_from_stream(
|
||||
self,
|
||||
filename: str,
|
||||
source: Any,
|
||||
chunk_size_bytes: Optional[int] = None,
|
||||
metadata: Optional[Mapping[str, Any]] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
) -> ObjectId: ...
|
||||
async def upload_from_stream_with_id(
|
||||
self,
|
||||
file_id: Any,
|
||||
filename: str,
|
||||
source: Any,
|
||||
chunk_size_bytes: Optional[int] = None,
|
||||
metadata: Optional[Mapping[str, Any]] = None,
|
||||
session: Optional[AgnosticClientSession] = None,
|
||||
) -> None: ...
|
||||
def __init__(
|
||||
self,
|
||||
database: AgnosticDatabase,
|
||||
bucket_name: str = "fs",
|
||||
chunk_size_bytes: int = ...,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
collection: Optional[str] = None,
|
||||
) -> None: ...
|
||||
def get_io_loop(self) -> Any: ...
|
||||
def wrap(self, obj: Any) -> Any: ...
|
||||
def find(self, *args: Any, **kwargs: Any) -> AgnosticGridOutCursor: ...
|
||||
|
||||
def _hash_gridout(gridout: AgnosticGridOut) -> str: ...
|
||||
@@ -0,0 +1,77 @@
|
||||
# Copyright 2011-2015 MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Tornado support for Motor, an asynchronous driver for MongoDB."""
|
||||
|
||||
from . import core, motor_gridfs
|
||||
from .frameworks import tornado as tornado_framework
|
||||
from .metaprogramming import T, create_class_with_framework
|
||||
|
||||
__all__ = [
|
||||
"MotorClient",
|
||||
"MotorClientSession",
|
||||
"MotorDatabase",
|
||||
"MotorCollection",
|
||||
"MotorCursor",
|
||||
"MotorCommandCursor",
|
||||
"MotorChangeStream",
|
||||
"MotorGridFSBucket",
|
||||
"MotorGridIn",
|
||||
"MotorGridOut",
|
||||
"MotorGridOutCursor",
|
||||
"MotorClientEncryption",
|
||||
]
|
||||
|
||||
|
||||
def create_motor_class(cls: T) -> T:
|
||||
return create_class_with_framework(cls, tornado_framework, "motor.motor_tornado")
|
||||
|
||||
|
||||
MotorClient = create_motor_class(core.AgnosticClient)
|
||||
|
||||
|
||||
MotorClientSession = create_motor_class(core.AgnosticClientSession)
|
||||
|
||||
|
||||
MotorDatabase = create_motor_class(core.AgnosticDatabase)
|
||||
|
||||
|
||||
MotorCollection = create_motor_class(core.AgnosticCollection)
|
||||
|
||||
|
||||
MotorCursor = create_motor_class(core.AgnosticCursor)
|
||||
|
||||
|
||||
MotorCommandCursor = create_motor_class(core.AgnosticCommandCursor)
|
||||
|
||||
|
||||
MotorLatentCommandCursor = create_motor_class(core.AgnosticLatentCommandCursor)
|
||||
|
||||
|
||||
MotorChangeStream = create_motor_class(core.AgnosticChangeStream)
|
||||
|
||||
|
||||
MotorGridFSBucket = create_motor_class(motor_gridfs.AgnosticGridFSBucket)
|
||||
|
||||
|
||||
MotorGridIn = create_motor_class(motor_gridfs.AgnosticGridIn)
|
||||
|
||||
|
||||
MotorGridOut = create_motor_class(motor_gridfs.AgnosticGridOut)
|
||||
|
||||
|
||||
MotorGridOutCursor = create_motor_class(motor_gridfs.AgnosticGridOutCursor)
|
||||
|
||||
|
||||
MotorClientEncryption = create_motor_class(core.AgnosticClientEncryption)
|
||||
@@ -0,0 +1,258 @@
|
||||
from typing import Any, Mapping, MutableMapping, Optional, Union
|
||||
|
||||
from bson import Code, CodecOptions, Timestamp
|
||||
from bson.raw_bson import RawBSONDocument
|
||||
from pymongo.client_session import TransactionOptions
|
||||
from pymongo.cursor_shared import _Hint, _Sort
|
||||
from pymongo.read_concern import ReadConcern
|
||||
from pymongo.read_preferences import ReadPreference, _ServerMode
|
||||
from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline
|
||||
from pymongo.write_concern import WriteConcern
|
||||
|
||||
from motor import core, motor_gridfs
|
||||
|
||||
__all__: list[str] = [
|
||||
"MotorClient",
|
||||
"MotorClientSession",
|
||||
"MotorDatabase",
|
||||
"MotorCollection",
|
||||
"MotorCursor",
|
||||
"MotorCommandCursor",
|
||||
"MotorChangeStream",
|
||||
"MotorGridFSBucket",
|
||||
"MotorGridIn",
|
||||
"MotorGridOut",
|
||||
"MotorGridOutCursor",
|
||||
"MotorClientEncryption",
|
||||
]
|
||||
|
||||
class MotorClient(core.AgnosticClient[_DocumentType]):
|
||||
def get_database(
|
||||
self,
|
||||
name: Optional[str] = None,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> MotorDatabase[_DocumentType]: ...
|
||||
def get_default_database(
|
||||
self,
|
||||
default: Optional[str] = None,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> MotorDatabase[_DocumentType]: ...
|
||||
async def list_databases(
|
||||
self,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> MotorCommandCursor[dict[str, Any]]: ...
|
||||
async def start_session(
|
||||
self,
|
||||
causal_consistency: Optional[bool] = None,
|
||||
default_transaction_options: Optional[TransactionOptions] = None,
|
||||
snapshot: Optional[bool] = False,
|
||||
) -> MotorClientSession: ...
|
||||
def watch(
|
||||
self,
|
||||
pipeline: Optional[_Pipeline] = None,
|
||||
full_document: Optional[str] = None,
|
||||
resume_after: Optional[Mapping[str, Any]] = None,
|
||||
max_await_time_ms: Optional[int] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
collation: Optional[_CollationIn] = None,
|
||||
start_at_operation_time: Optional[Timestamp] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
start_after: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[str] = None,
|
||||
full_document_before_change: Optional[str] = None,
|
||||
show_expanded_events: Optional[bool] = None,
|
||||
) -> MotorChangeStream[_DocumentType]: ...
|
||||
def __getattr__(self, name: str) -> MotorDatabase[_DocumentType]: ...
|
||||
def __getitem__(self, name: str) -> MotorDatabase[_DocumentType]: ...
|
||||
|
||||
class MotorClientSession(core.AgnosticClientSession):
|
||||
@property
|
||||
def client(self) -> MotorClient: ...
|
||||
async def __aenter__(self) -> MotorClientSession: ...
|
||||
|
||||
class MotorDatabase(core.AgnosticDatabase[_DocumentType]):
|
||||
async def cursor_command(
|
||||
self,
|
||||
command: Union[str, MutableMapping[str, Any]],
|
||||
value: Any = 1,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
codec_options: Optional[CodecOptions[core._CodecDocumentType]] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
max_await_time_ms: Optional[int] = None,
|
||||
**kwargs: Any,
|
||||
) -> MotorCommandCursor[_DocumentType]: ...
|
||||
async def create_collection(
|
||||
self,
|
||||
name: str,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
check_exists: Optional[bool] = True,
|
||||
**kwargs: Any,
|
||||
) -> MotorCollection[_DocumentType]: ...
|
||||
def get_collection(
|
||||
self,
|
||||
name: str,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> MotorCollection[_DocumentType]: ...
|
||||
async def list_collections(
|
||||
self,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
filter: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> MotorCommandCursor[MutableMapping[str, Any]]: ...
|
||||
def with_options(
|
||||
self,
|
||||
codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None,
|
||||
read_preference: Optional[_ServerMode] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> MotorDatabase[_DocumentType]: ...
|
||||
def aggregate(
|
||||
self, pipeline: _Pipeline, *args: Any, **kwargs: Any
|
||||
) -> MotorLatentCommandCursor[_DocumentType]: ...
|
||||
def watch(
|
||||
self,
|
||||
pipeline: Optional[_Pipeline] = None,
|
||||
full_document: Optional[str] = None,
|
||||
resume_after: Optional[Mapping[str, Any]] = None,
|
||||
max_await_time_ms: Optional[int] = None,
|
||||
batch_size: Optional[int] = None,
|
||||
collation: Optional[_CollationIn] = None,
|
||||
start_at_operation_time: Optional[Timestamp] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
start_after: Optional[Mapping[str, Any]] = None,
|
||||
comment: Optional[Any] = None,
|
||||
full_document_before_change: Optional[str] = None,
|
||||
show_expanded_events: Optional[bool] = None,
|
||||
) -> MotorChangeStream[_DocumentType]: ...
|
||||
@property
|
||||
def client(self) -> MotorClient[_DocumentType]: ...
|
||||
def __getattr__(self, name: str) -> MotorCollection[_DocumentType]: ...
|
||||
def __getitem__(self, name: str) -> MotorCollection[_DocumentType]: ...
|
||||
|
||||
class MotorCollection(core.AgnosticCollection[_DocumentType]):
|
||||
def with_options(
|
||||
self,
|
||||
codec_options: Optional[CodecOptions] = None,
|
||||
read_preference: Optional[ReadPreference] = None,
|
||||
write_concern: Optional[WriteConcern] = None,
|
||||
read_concern: Optional[ReadConcern] = None,
|
||||
) -> MotorCollection[_DocumentType]: ...
|
||||
def list_search_indexes(
|
||||
self,
|
||||
name: Optional[str] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
comment: Optional[Any] = None,
|
||||
**kwargs: Any,
|
||||
) -> MotorLatentCommandCursor[Mapping[str, Any]]: ...
|
||||
def __getattr__(self, name: str) -> MotorCollection[_DocumentType]: ...
|
||||
def __getitem__(self, name: str) -> MotorCollection[_DocumentType]: ...
|
||||
def find(self, *args: Any, **kwargs: Any) -> MotorCursor[_DocumentType]: ...
|
||||
def find_raw_batches(self, *args: Any, **kwargs: Any) -> MotorRawBatchCursor[_DocumentType]: ...
|
||||
def aggregate(
|
||||
self, pipeline: _Pipeline, *args: Any, **kwargs: Any
|
||||
) -> MotorCommandCursor[_DocumentType]: ...
|
||||
def aggregate_raw_batches(
|
||||
self, pipeline: _Pipeline, **kwargs: Any
|
||||
) -> MotorRawBatchCursor[_DocumentType]: ...
|
||||
def list_indexes(
|
||||
self, session: Optional[core.AgnosticClientSession] = None, **kwargs: Any
|
||||
) -> MotorLatentCommandCursor[MutableMapping[str, Any]]: ...
|
||||
|
||||
class MotorLatentCommandCursor(core.AgnosticLatentCommandCursor[_DocumentType]): ...
|
||||
|
||||
class MotorCursor(core.AgnosticCursor[_DocumentType]):
|
||||
def collation(self, collation: Optional[_CollationIn]) -> MotorCursor[_DocumentType]: ...
|
||||
def add_option(self, mask: int) -> MotorCursor[_DocumentType]: ...
|
||||
def remove_option(self, mask: int) -> MotorCursor[_DocumentType]: ...
|
||||
def limit(self, limit: int) -> MotorCursor[_DocumentType]: ...
|
||||
def skip(self, skip: int) -> MotorCursor[_DocumentType]: ...
|
||||
def max_scan(self, max_scan: Optional[int]) -> MotorCursor[_DocumentType]: ...
|
||||
def sort(
|
||||
self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None
|
||||
) -> MotorCursor[_DocumentType]: ...
|
||||
def hint(self, index: Optional[_Hint]) -> MotorCursor[_DocumentType]: ...
|
||||
def where(self, code: Union[str, Code]) -> MotorCursor[_DocumentType]: ...
|
||||
def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> MotorCursor[_DocumentType]: ...
|
||||
def max_time_ms(self, max_time_ms: Optional[int]) -> MotorCursor[_DocumentType]: ...
|
||||
def min(self, spec: _Sort) -> MotorCursor[_DocumentType]: ...
|
||||
def max(self, spec: _Sort) -> MotorCursor[_DocumentType]: ...
|
||||
def comment(self, comment: Any) -> MotorCursor[_DocumentType]: ...
|
||||
def allow_disk_use(self, allow_disk_use: bool) -> MotorCursor[_DocumentType]: ...
|
||||
def rewind(self) -> MotorCursor[_DocumentType]: ...
|
||||
def clone(self) -> MotorCursor[_DocumentType]: ...
|
||||
def __copy__(self) -> MotorCursor[_DocumentType]: ...
|
||||
def __deepcopy__(self, memo: Any) -> MotorCursor[_DocumentType]: ...
|
||||
|
||||
class MotorRawBatchCursor(core.AgnosticRawBatchCursor[_DocumentType]): ...
|
||||
class MotorCommandCursor(core.AgnosticCommandCursor[_DocumentType]): ...
|
||||
class MotorRawBatchCommandCursor(core.AgnosticRawBatchCommandCursor[_DocumentType]): ...
|
||||
|
||||
class MotorChangeStream(core.AgnosticChangeStream[_DocumentType]):
|
||||
def __aiter__(self) -> MotorChangeStream[_DocumentType]: ...
|
||||
async def __aenter__(self) -> MotorChangeStream[_DocumentType]: ...
|
||||
|
||||
class MotorClientEncryption(core.AgnosticClientEncryption[_DocumentType]):
|
||||
async def __aenter__(self) -> MotorClientEncryption[_DocumentType]: ...
|
||||
async def get_keys(self) -> MotorCursor[RawBSONDocument]: ...
|
||||
async def create_encrypted_collection(
|
||||
self,
|
||||
database: core.AgnosticDatabase[_DocumentTypeArg],
|
||||
name: str,
|
||||
encrypted_fields: Mapping[str, Any],
|
||||
kms_provider: Optional[str] = None,
|
||||
master_key: Optional[Mapping[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> tuple[MotorCollection[_DocumentTypeArg], Mapping[str, Any]]: ...
|
||||
|
||||
class MotorGridOutCursor(motor_gridfs.AgnosticGridOutCursor):
|
||||
def next_object(self) -> MotorGridOutCursor: ...
|
||||
|
||||
class MotorGridOut(motor_gridfs.AgnosticGridOut):
|
||||
def __aiter__(self) -> MotorGridOut: ...
|
||||
|
||||
class MotorGridIn(motor_gridfs.AgnosticGridIn):
|
||||
async def __aenter__(self) -> MotorGridIn: ...
|
||||
|
||||
class MotorGridFSBucket(motor_gridfs.AgnosticGridFSBucket):
|
||||
async def open_download_stream_by_name(
|
||||
self,
|
||||
filename: str,
|
||||
revision: int = -1,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
) -> MotorGridOut: ...
|
||||
async def open_download_stream(
|
||||
self, file_id: Any, session: Optional[core.AgnosticClientSession] = None
|
||||
) -> MotorGridOut: ...
|
||||
def open_upload_stream(
|
||||
self,
|
||||
filename: str,
|
||||
chunk_size_bytes: Optional[int] = None,
|
||||
metadata: Optional[Mapping[str, Any]] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
) -> MotorGridIn: ...
|
||||
def open_upload_stream_with_id(
|
||||
self,
|
||||
file_id: Any,
|
||||
filename: str,
|
||||
chunk_size_bytes: Optional[int] = None,
|
||||
metadata: Optional[Mapping[str, Any]] = None,
|
||||
session: Optional[core.AgnosticClientSession] = None,
|
||||
) -> MotorGridIn: ...
|
||||
def find(self, *args: Any, **kwargs: Any) -> MotorGridOutCursor: ...
|
||||
2
backend/venv/lib/python3.12/site-packages/motor/py.typed
Normal file
2
backend/venv/lib/python3.12/site-packages/motor/py.typed
Normal file
@@ -0,0 +1,2 @@
|
||||
# PEP-561 Support File.
|
||||
# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing".
|
||||
182
backend/venv/lib/python3.12/site-packages/motor/web.py
Normal file
182
backend/venv/lib/python3.12/site-packages/motor/web.py
Normal file
@@ -0,0 +1,182 @@
|
||||
# Copyright 2011-2014 MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Utilities for using Motor with Tornado web applications."""
|
||||
import datetime
|
||||
import email.utils
|
||||
import mimetypes
|
||||
import time
|
||||
|
||||
import gridfs
|
||||
import tornado.web
|
||||
|
||||
import motor
|
||||
from motor.motor_gridfs import _hash_gridout
|
||||
|
||||
# mypy: disable-error-code="no-untyped-def,no-untyped-call"
|
||||
|
||||
# TODO: this class is not a drop-in replacement for StaticFileHandler.
|
||||
# StaticFileHandler provides class method make_static_url, which appends
|
||||
# an checksum of the static file's contents. Templates thus can do
|
||||
# {{ static_url('image.png') }} and get "/static/image.png?v=1234abcdef",
|
||||
# which is cached forever. Problem is, it calculates the checksum synchronously.
|
||||
# Two options: keep a synchronous GridFS available to get each grid file's
|
||||
# checksum synchronously for every static_url call, or find some other idiom.
|
||||
|
||||
|
||||
class GridFSHandler(tornado.web.RequestHandler):
|
||||
"""A handler that can serve content from GridFS, very similar to
|
||||
:class:`tornado.web.StaticFileHandler`.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
db = motor.MotorClient().my_database
|
||||
application = web.Application(
|
||||
[
|
||||
(r"/static/(.*)", web.GridFSHandler, {"database": db}),
|
||||
]
|
||||
)
|
||||
|
||||
By default, requests' If-Modified-Since headers are honored, but no
|
||||
specific cache-control timeout is sent to clients. Thus each request for
|
||||
a GridFS file requires a quick check of the file's ``uploadDate`` in
|
||||
MongoDB. Override :meth:`get_cache_time` in a subclass to customize this.
|
||||
"""
|
||||
|
||||
def initialize(self, database, root_collection="fs"):
|
||||
self.database = database
|
||||
self.root_collection = root_collection
|
||||
|
||||
def get_gridfs_file(self, bucket, filename, request):
|
||||
"""Overridable method to choose a GridFS file to serve at a URL.
|
||||
|
||||
By default, if a URL pattern like ``"/static/(.*)"`` is mapped to this
|
||||
``GridFSHandler``, then the trailing portion of the URL is used as the
|
||||
filename, so a request for "/static/image.png" results in a call to
|
||||
:meth:`MotorGridFSBucket.open_download_stream_by_name` with "image.png"
|
||||
as the ``filename`` argument. To customize the mapping of path to
|
||||
GridFS file, override ``get_gridfs_file`` and return a Future
|
||||
:class:`~motor.MotorGridOut` from it.
|
||||
|
||||
For example, to retrieve the file by ``_id`` instead of filename::
|
||||
|
||||
class CustomGridFSHandler(motor.web.GridFSHandler):
|
||||
def get_gridfs_file(self, bucket, filename, request):
|
||||
# Path is interpreted as _id instead of name.
|
||||
# Return a Future MotorGridOut.
|
||||
return fs.open_download_stream(file_id=ObjectId(path))
|
||||
|
||||
:Parameters:
|
||||
- `bucket`: A :class:`~motor.motor_tornado.MotorGridFSBucket`
|
||||
- `filename`: A string, the matched group of the URL pattern
|
||||
- `request`: An :class:`tornado.httputil.HTTPServerRequest`
|
||||
|
||||
.. versionchanged:: 1.0
|
||||
**BREAKING CHANGE**: Now takes a
|
||||
:class:`~motor.motor_tornado.MotorGridFSBucket`, not a
|
||||
``MotorGridFS``.
|
||||
Also takes an additional ``request`` parameter.
|
||||
|
||||
.. versionchanged:: 0.2
|
||||
``get_gridfs_file`` no longer accepts a callback, instead returns
|
||||
a Future.
|
||||
"""
|
||||
return bucket.open_download_stream_by_name(filename)
|
||||
|
||||
async def get(self, path, include_body=True):
|
||||
fs = motor.MotorGridFSBucket(self.database, self.root_collection)
|
||||
|
||||
try:
|
||||
gridout = await self.get_gridfs_file(fs, path, self.request)
|
||||
except gridfs.NoFile:
|
||||
raise tornado.web.HTTPError(404) from None
|
||||
|
||||
# If-Modified-Since header is only good to the second.
|
||||
modified = gridout.upload_date.replace(microsecond=0)
|
||||
self.set_header("Last-Modified", modified)
|
||||
|
||||
# Get the hash for the GridFS file.
|
||||
checksum = _hash_gridout(gridout)
|
||||
|
||||
self.set_header("Etag", '"%s"' % checksum)
|
||||
|
||||
mime_type = gridout.content_type
|
||||
|
||||
# If content type is not defined, try to check it with mimetypes
|
||||
if mime_type is None:
|
||||
mime_type, encoding = mimetypes.guess_type(path)
|
||||
|
||||
# Starting from here, largely a copy of StaticFileHandler
|
||||
if mime_type:
|
||||
self.set_header("Content-Type", mime_type)
|
||||
|
||||
cache_time = self.get_cache_time(path, modified, mime_type)
|
||||
|
||||
if cache_time > 0:
|
||||
self.set_header(
|
||||
"Expires",
|
||||
datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
+ datetime.timedelta(seconds=cache_time),
|
||||
)
|
||||
self.set_header("Cache-Control", "max-age=" + str(cache_time))
|
||||
else:
|
||||
self.set_header("Cache-Control", "public")
|
||||
|
||||
self.set_extra_headers(path, gridout)
|
||||
|
||||
# Check the If-Modified-Since, and don't send the result if the
|
||||
# content has not been modified
|
||||
ims_value = self.request.headers.get("If-Modified-Since")
|
||||
if ims_value is not None:
|
||||
date_tuple = email.utils.parsedate(ims_value)
|
||||
|
||||
# If our MotorClient is tz-aware, assume the naive ims_value is in
|
||||
# its time zone.
|
||||
if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple)).replace(
|
||||
tzinfo=modified.tzinfo
|
||||
)
|
||||
|
||||
if if_since >= modified:
|
||||
self.set_status(304)
|
||||
return
|
||||
|
||||
# Same for Etag
|
||||
etag = self.request.headers.get("If-None-Match")
|
||||
if etag is not None and etag.strip('"') == checksum:
|
||||
self.set_status(304)
|
||||
return
|
||||
|
||||
self.set_header("Content-Length", gridout.length)
|
||||
if include_body:
|
||||
await gridout.stream_to_handler(self)
|
||||
|
||||
# Needed until fix for Tornado bug 751 is released, see
|
||||
# https://github.com/facebook/tornado/issues/751 and
|
||||
# https://github.com/facebook/tornado/commit/5491685
|
||||
self.finish()
|
||||
|
||||
def head(self, path):
|
||||
# get() is a coroutine. Return its Future.
|
||||
return self.get(path, include_body=False)
|
||||
|
||||
def get_cache_time(self, path, modified, mime_type):
|
||||
"""Override to customize cache control behavior.
|
||||
|
||||
Return a positive number of seconds to trigger aggressive caching or 0
|
||||
to mark resource as cacheable, only. 0 is the default.
|
||||
"""
|
||||
return 0
|
||||
|
||||
def set_extra_headers(self, path, gridout):
|
||||
"""For subclass to add extra headers to the response"""
|
||||
Reference in New Issue
Block a user