hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
1d388c909a80a92793ab907e194d075735b25ac2ea004ac919b37446cd6053a3 | from django.urls import path
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name="dummy.html")
urlpatterns = [
path("foo/", view, name="not-prefixed-included-url"),
]
|
4834040532945473e6fc2b7576bc342bbdcdfc7a6234a1fb023031d59a290bb9 | from django.conf.urls.i18n import i18n_patterns
from django.urls import include, re_path
from django.utils.translation import gettext_lazy as _
urlpatterns = i18n_patterns(
re_path(
_(r"^account/"),
include("i18n.patterns.urls.wrong_namespace", namespace="account"),
),
)
|
548fe3f220c738d119d5030b4b32ce47f6657adc8f83b79770690981a9e479b2 | from django.conf.urls.i18n import i18n_patterns
from django.urls import include, path, re_path
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name="dummy.html")
urlpatterns = [
path("not-prefixed/", view, name="not-prefixed"),
path("not-prefixed-include/", include("i18n.patterns.urls.included")),
re_path(_(r"^translated/$"), view, name="no-prefix-translated"),
re_path(
_(r"^translated/(?P<slug>[\w-]+)/$"), view, name="no-prefix-translated-slug"
),
]
urlpatterns += i18n_patterns(
path("prefixed/", view, name="prefixed"),
path("prefixed.xml", view, name="prefixed_xml"),
re_path(
_(r"^with-arguments/(?P<argument>[\w-]+)/(?:(?P<optional>[\w-]+).html)?$"),
view,
name="with-arguments",
),
re_path(_(r"^users/$"), view, name="users"),
re_path(
_(r"^account/"), include("i18n.patterns.urls.namespace", namespace="account")
),
)
|
1dce27b2b89de79d554e035287dde8bc3a7267b1e120a2bdd1c0a64d4a492d6e | from django.urls import re_path
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name="dummy.html")
urlpatterns = [
re_path("^nl/foo/", view, name="not-translated"),
]
|
28cd2da5ce40a463b95c2bb5a8abc44ce580ed96aa4558f83cc3e48c09520045 | from django.conf.urls.i18n import i18n_patterns
from django.urls import re_path
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name="dummy.html")
app_name = "account"
urlpatterns = i18n_patterns(
re_path(_(r"^register/$"), view, name="register"),
)
|
d6d57f9e86aa8a893e9cce617f769976a942c361200c1ff19a7e802f3b56234e | from django.urls import path, re_path
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name="dummy.html")
app_name = "account"
urlpatterns = [
re_path(_(r"^register/$"), view, name="register"),
re_path(_(r"^register-without-slash$"), view, name="register-without-slash"),
path(_("register-as-path/"), view, name="register-as-path"),
]
|
70614b69e31aa2618c95c62cf868c2980edb18d0996364a840a067c3ac39c9c1 | from django.conf.urls.i18n import i18n_patterns
from django.urls import path
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name="dummy.html")
urlpatterns = i18n_patterns(
path("prefixed/", view, name="prefixed"),
)
|
952bc79aa1c6734b9465b1b899768dc1038413dead40a53e1f4742e26cbaffa7 | # A user-defined format
CUSTOM_DAY_FORMAT = "d/m/Y CUSTOM"
|
30f6987d951485677dd6f2cc2686ee6114421a5efa79a1680cfec9febcc1c063 | """
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
from sphinx import addnodes
from sphinx import version_info as sphinx_version
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.directives.code import CodeBlock
from sphinx.domains.std import Cmdoption
from sphinx.errors import ExtensionError
from sphinx.util import logging
from sphinx.util.console import bold
from sphinx.writers.html import HTMLTranslator
logger = logging.getLogger(__name__)
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(r"([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)")
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag",
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter",
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_object_type(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_directive("django-admin-option", Cmdoption)
app.add_config_value("django_next_version", "0.0", True)
app.add_directive("versionadded", VersionDirective)
app.add_directive("versionchanged", VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
app.set_translator("djangohtml", DjangoHTMLTranslator)
app.set_translator("json", DjangoHTMLTranslator)
app.add_node(
ConsoleNode,
html=(visit_console_html, None),
latex=(visit_console_dummy, depart_console_dummy),
man=(visit_console_dummy, depart_console_dummy),
text=(visit_console_dummy, depart_console_dummy),
texinfo=(visit_console_dummy, depart_console_dummy),
)
app.add_directive("console", ConsoleDirective)
app.connect("html-page-context", html_page_context_hook)
app.add_role("default-role-error", default_role_error)
return {"parallel_read_safe": True}
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(
directive_name=self.name
)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node["version"] = "Development version"
else:
node["version"] = self.arguments[0]
node["type"] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
try:
env.get_domain("changeset").note_changeset(node)
except ExtensionError:
# Sphinx < 1.8: Domain 'changeset' is not registered
env.note_versionchange(node["type"], node["version"], node, self.lineno)
return ret
class DjangoHTMLTranslator(HTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
# Needed by Sphinx.
if sphinx_version >= (4, 3):
self._table_row_indices.append(0)
else:
self._table_row_index = 0
self.body.append(self.starttag(node, "table", CLASS="docutils"))
def depart_table(self, node):
self.compact_p = self.context.pop()
if sphinx_version >= (4, 3):
self._table_row_indices.pop()
self.body.append("</table>\n")
def visit_desc_parameterlist(self, node):
self.body.append("(") # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum(
isinstance(c, addnodes.desc_parameter) for c in node.children
)
def depart_desc_parameterlist(self, node):
self.body.append(")")
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
"versionchanged": "Changed in Django %s",
"versionadded": "New in Django %s",
}
def visit_versionmodified(self, node):
self.body.append(self.starttag(node, "div", CLASS=node["type"]))
version_text = self.version_text.get(node["type"])
if version_text:
title = "%s%s" % (version_text % node["version"], ":" if len(node) else ".")
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get("ids", [])
node["ids"] = ["s-" + i for i in old_ids]
node["ids"].extend(old_ids)
super().visit_section(node)
node["ids"] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(" ")[0]
env.ref_context["std:program"] = command
title = "django-admin %s" % sig
signode += addnodes.desc_name(title, title)
return command
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = "djangohtml"
def finish(self):
super().finish()
logger.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [
n
for ((t, n), (k, a)) in xrefs.items()
if t == "templatetag" and k == "ref/templates/builtins"
],
"tfilters": [
n
for ((t, n), (k, a)) in xrefs.items()
if t == "templatefilter" and k == "ref/templates/builtins"
],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, "w") as fp:
fp.write("var django_template_builtins = ")
json.dump(templatebuiltins, fp)
fp.write(";\n")
class ConsoleNode(nodes.literal_block):
"""
Custom node to override the visit/depart event handlers at registration
time. Wrap a literal_block object and defer to it.
"""
tagname = "ConsoleNode"
def __init__(self, litblk_obj):
self.wrapped = litblk_obj
def __getattr__(self, attr):
if attr == "wrapped":
return self.__dict__.wrapped
return getattr(self.wrapped, attr)
def visit_console_dummy(self, node):
"""Defer to the corresponding parent's handler."""
self.visit_literal_block(node)
def depart_console_dummy(self, node):
"""Defer to the corresponding parent's handler."""
self.depart_literal_block(node)
def visit_console_html(self, node):
"""Generate HTML for the console directive."""
if self.builder.name in ("djangohtml", "json") and node["win_console_text"]:
# Put a mark on the document object signaling the fact the directive
# has been used on it.
self.document._console_directive_used_flag = True
uid = node["uid"]
self.body.append(
"""\
<div class="console-block" id="console-block-%(id)s">
<input class="c-tab-unix" id="c-tab-%(id)s-unix" type="radio" name="console-%(id)s" \
checked>
<label for="c-tab-%(id)s-unix" title="Linux/macOS">/</label>
<input class="c-tab-win" id="c-tab-%(id)s-win" type="radio" name="console-%(id)s">
<label for="c-tab-%(id)s-win" title="Windows"></label>
<section class="c-content-unix" id="c-content-%(id)s-unix">\n"""
% {"id": uid}
)
try:
self.visit_literal_block(node)
except nodes.SkipNode:
pass
self.body.append("</section>\n")
self.body.append(
'<section class="c-content-win" id="c-content-%(id)s-win">\n' % {"id": uid}
)
win_text = node["win_console_text"]
highlight_args = {"force": True}
linenos = node.get("linenos", False)
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(
win_text, "doscon", warn=warner, linenos=linenos, **highlight_args
)
self.body.append(highlighted)
self.body.append("</section>\n")
self.body.append("</div>\n")
raise nodes.SkipNode
else:
self.visit_literal_block(node)
class ConsoleDirective(CodeBlock):
"""
A reStructuredText directive which renders a two-tab code block in which
the second tab shows a Windows command line equivalent of the usual
Unix-oriented examples.
"""
required_arguments = 0
# The 'doscon' Pygments formatter needs a prompt like this. '>' alone
# won't do it because then it simply paints the whole command line as a
# gray comment with no highlighting at all.
WIN_PROMPT = r"...\> "
def run(self):
def args_to_win(cmdline):
changed = False
out = []
for token in cmdline.split():
if token[:2] == "./":
token = token[2:]
changed = True
elif token[:2] == "~/":
token = "%HOMEPATH%\\" + token[2:]
changed = True
elif token == "make":
token = "make.bat"
changed = True
if "://" not in token and "git" not in cmdline:
out.append(token.replace("/", "\\"))
changed = True
else:
out.append(token)
if changed:
return " ".join(out)
return cmdline
def cmdline_to_win(line):
if line.startswith("# "):
return "REM " + args_to_win(line[2:])
if line.startswith("$ # "):
return "REM " + args_to_win(line[4:])
if line.startswith("$ ./manage.py"):
return "manage.py " + args_to_win(line[13:])
if line.startswith("$ manage.py"):
return "manage.py " + args_to_win(line[11:])
if line.startswith("$ ./runtests.py"):
return "runtests.py " + args_to_win(line[15:])
if line.startswith("$ ./"):
return args_to_win(line[4:])
if line.startswith("$ python3"):
return "py " + args_to_win(line[9:])
if line.startswith("$ python"):
return "py " + args_to_win(line[8:])
if line.startswith("$ "):
return args_to_win(line[2:])
return None
def code_block_to_win(content):
bchanged = False
lines = []
for line in content:
modline = cmdline_to_win(line)
if modline is None:
lines.append(line)
else:
lines.append(self.WIN_PROMPT + modline)
bchanged = True
if bchanged:
return ViewList(lines)
return None
env = self.state.document.settings.env
self.arguments = ["console"]
lit_blk_obj = super().run()[0]
# Only do work when the djangohtml HTML Sphinx builder is being used,
# invoke the default behavior for the rest.
if env.app.builder.name not in ("djangohtml", "json"):
return [lit_blk_obj]
lit_blk_obj["uid"] = str(env.new_serialno("console"))
# Only add the tabbed UI if there is actually a Windows-specific
# version of the CLI example.
win_content = code_block_to_win(self.content)
if win_content is None:
lit_blk_obj["win_console_text"] = None
else:
self.content = win_content
lit_blk_obj["win_console_text"] = super().run()[0].rawsource
# Replace the literal_node object returned by Sphinx's CodeBlock with
# the ConsoleNode wrapper.
return [ConsoleNode(lit_blk_obj)]
def html_page_context_hook(app, pagename, templatename, context, doctree):
# Put a bool on the context used to render the template. It's used to
# control inclusion of console-tabs.css and activation of the JavaScript.
# This way it's include only from HTML files rendered from reST files where
# the ConsoleDirective is used.
context["include_console_assets"] = getattr(
doctree, "_console_directive_used_flag", False
)
def default_role_error(
name, rawtext, text, lineno, inliner, options=None, content=None
):
msg = (
"Default role used (`single backticks`): %s. Did you mean to use two "
"backticks for ``code``, or miss an underscore for a `link`_ ?" % rawtext
)
logger.warning(msg, location=(inliner.document.current_source, lineno))
return [nodes.Text(text)], []
|
c26c836df95fe3ee6cedb355a9eae2e2944f77503220f3fa074452d9feedf9e2 | import json
import mimetypes
import os
import sys
from copy import copy
from functools import partial
from http import HTTPStatus
from importlib import import_module
from io import BytesIO
from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit
from asgiref.sync import sync_to_async
from django.conf import settings
from django.core.handlers.asgi import ASGIRequest
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import got_request_exception, request_finished, request_started
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.regex_helper import _lazy_re_compile
__all__ = (
"AsyncClient",
"AsyncRequestFactory",
"Client",
"RedirectCycleError",
"RequestFactory",
"encode_file",
"encode_multipart",
)
BOUNDARY = "BoUnDaRyStRiNg"
MULTIPART_CONTENT = "multipart/form-data; boundary=%s" % BOUNDARY
CONTENT_TYPE_RE = _lazy_re_compile(r".*; charset=([\w-]+);?")
# Structured suffix spec: https://tools.ietf.org/html/rfc6838#section-4.2.8
JSON_CONTENT_TYPE_RE = _lazy_re_compile(r"^application\/(.+\+)?json")
class RedirectCycleError(Exception):
"""The test client has been asked to follow a redirect loop."""
def __init__(self, message, last_response):
super().__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload:
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be sought and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in real life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert (
self.__len >= num_bytes
), "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after it's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
yield from iterable
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
def conditional_content_removal(request, response):
"""
Simulate the behavior of most web servers by removing the content of
responses for HEAD requests, 1xx, 204, and 304 responses. Ensure
compliance with RFC 7230, section 3.3.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b""
if request.method == "HEAD":
if response.streaming:
response.streaming_content = []
else:
response.content = b""
return response
class ClientHandler(BaseHandler):
"""
An HTTP Handler that can be used for testing purposes. Use the WSGI
interface to compose requests, but return the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Simulate behaviors of most web servers.
conditional_content_removal(request, response)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# Emulate a WSGI server by calling the close method on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close
)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
class AsyncClientHandler(BaseHandler):
"""An async version of ClientHandler."""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
async def __call__(self, scope):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware(is_async=True)
# Extract body file from the scope, if provided.
if "_body_file" in scope:
body_file = scope.pop("_body_file")
else:
body_file = FakePayload("")
request_started.disconnect(close_old_connections)
await sync_to_async(request_started.send, thread_sensitive=False)(
sender=self.__class__, scope=scope
)
request_started.connect(close_old_connections)
request = ASGIRequest(scope, body_file)
# Sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably required
# for backwards compatibility with external tests against admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = await self.get_response_async(request)
# Simulate behaviors of most web servers.
conditional_content_removal(request, response)
# Attach the originating ASGI request to the response so that it could
# be later retrieved.
response.asgi_request = request
# Emulate a server by calling the close method on completion.
if response.streaming:
response.streaming_content = await sync_to_async(
closing_iterator_wrapper, thread_sensitive=False
)(
response.streaming_content,
response.close,
)
else:
request_finished.disconnect(close_old_connections)
# Will fire request_finished.
await sync_to_async(response.close, thread_sensitive=False)()
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Store templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault("templates", []).append(template)
if "context" not in store:
store["context"] = ContextList()
store["context"].append(copy(context))
def encode_multipart(boundary, data):
"""
Encode multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if value is None:
raise TypeError(
"Cannot encode None for key '%s' as POST data. Did you mean "
"to pass an empty string or omit the value?" % key
)
elif is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, str) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(
to_bytes(val)
for val in [
"--%s" % boundary,
'Content-Disposition: form-data; name="%s"' % key,
"",
item,
]
)
else:
lines.extend(
to_bytes(val)
for val in [
"--%s" % boundary,
'Content-Disposition: form-data; name="%s"' % key,
"",
value,
]
)
lines.extend(
[
to_bytes("--%s--" % boundary),
b"",
]
)
return b"\r\n".join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# file.name might not be a string. For example, it's an int for
# tempfile.TemporaryFile().
file_has_string_name = hasattr(file, "name") and isinstance(file.name, str)
filename = os.path.basename(file.name) if file_has_string_name else ""
if hasattr(file, "content_type"):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = "application/octet-stream"
filename = filename or key
return [
to_bytes("--%s" % boundary),
to_bytes(
'Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)
),
to_bytes("Content-Type: %s" % content_type),
b"",
to_bytes(file.read()),
]
class RequestFactory:
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, *, json_encoder=DjangoJSONEncoder, **defaults):
self.json_encoder = json_encoder
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See https://www.python.org/dev/peps/pep-3333/#environ-variables
return {
"HTTP_COOKIE": "; ".join(
sorted(
"%s=%s" % (morsel.key, morsel.coded_value)
for morsel in self.cookies.values()
)
),
"PATH_INFO": "/",
"REMOTE_ADDR": "127.0.0.1",
"REQUEST_METHOD": "GET",
"SCRIPT_NAME": "",
"SERVER_NAME": "testserver",
"SERVER_PORT": "80",
"SERVER_PROTOCOL": "HTTP/1.1",
"wsgi.version": (1, 0),
"wsgi.url_scheme": "http",
"wsgi.input": FakePayload(b""),
"wsgi.errors": self.errors,
"wsgi.multiprocess": True,
"wsgi.multithread": False,
"wsgi.run_once": False,
**self.defaults,
**request,
}
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match[1]
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _encode_json(self, data, content_type):
"""
Return encoded JSON if data is a dict, list, or tuple and content_type
is application/json.
"""
should_encode = JSON_CONTENT_TYPE_RE.match(content_type) and isinstance(
data, (dict, list, tuple)
)
return json.dumps(data, cls=self.json_encoder) if should_encode else data
def _get_path(self, parsed):
path = parsed.path
# If there are parameters, add them
if parsed.params:
path += ";" + parsed.params
path = unquote_to_bytes(path)
# Replace the behavior where non-ASCII values in the WSGI environ are
# arbitrarily decoded with ISO-8859-1.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode("iso-8859-1")
def get(self, path, data=None, secure=False, **extra):
"""Construct a GET request."""
data = {} if data is None else data
return self.generic(
"GET",
path,
secure=secure,
**{
"QUERY_STRING": urlencode(data, doseq=True),
**extra,
},
)
def post(
self, path, data=None, content_type=MULTIPART_CONTENT, secure=False, **extra
):
"""Construct a POST request."""
data = self._encode_json({} if data is None else data, content_type)
post_data = self._encode_data(data, content_type)
return self.generic(
"POST", path, post_data, content_type, secure=secure, **extra
)
def head(self, path, data=None, secure=False, **extra):
"""Construct a HEAD request."""
data = {} if data is None else data
return self.generic(
"HEAD",
path,
secure=secure,
**{
"QUERY_STRING": urlencode(data, doseq=True),
**extra,
},
)
def trace(self, path, secure=False, **extra):
"""Construct a TRACE request."""
return self.generic("TRACE", path, secure=secure, **extra)
def options(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
**extra,
):
"Construct an OPTIONS request."
return self.generic("OPTIONS", path, data, content_type, secure=secure, **extra)
def put(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
**extra,
):
"""Construct a PUT request."""
data = self._encode_json(data, content_type)
return self.generic("PUT", path, data, content_type, secure=secure, **extra)
def patch(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
**extra,
):
"""Construct a PATCH request."""
data = self._encode_json(data, content_type)
return self.generic("PATCH", path, data, content_type, secure=secure, **extra)
def delete(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
**extra,
):
"""Construct a DELETE request."""
data = self._encode_json(data, content_type)
return self.generic("DELETE", path, data, content_type, secure=secure, **extra)
def generic(
self,
method,
path,
data="",
content_type="application/octet-stream",
secure=False,
**extra,
):
"""Construct an arbitrary HTTP request."""
parsed = urlparse(str(path)) # path can be lazy
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
"PATH_INFO": self._get_path(parsed),
"REQUEST_METHOD": method,
"SERVER_PORT": "443" if secure else "80",
"wsgi.url_scheme": "https" if secure else "http",
}
if data:
r.update(
{
"CONTENT_LENGTH": str(len(data)),
"CONTENT_TYPE": content_type,
"wsgi.input": FakePayload(data),
}
)
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get("QUERY_STRING"):
# WSGI requires latin-1 encoded strings. See get_path_info().
query_string = parsed[4].encode().decode("iso-8859-1")
r["QUERY_STRING"] = query_string
return self.request(**r)
class AsyncRequestFactory(RequestFactory):
"""
Class that lets you create mock ASGI-like Request objects for use in
testing. Usage:
rf = AsyncRequestFactory()
get_request = await rf.get('/hello/')
post_request = await rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
including synchronous ones. The reason we have a separate class here is:
a) this makes ASGIRequest subclasses, and
b) AsyncTestClient can subclass it.
"""
def _base_scope(self, **request):
"""The base scope for a request."""
# This is a minimal valid ASGI scope, plus:
# - headers['cookie'] for cookie support,
# - 'client' often useful, see #8551.
scope = {
"asgi": {"version": "3.0"},
"type": "http",
"http_version": "1.1",
"client": ["127.0.0.1", 0],
"server": ("testserver", "80"),
"scheme": "http",
"method": "GET",
"headers": [],
**self.defaults,
**request,
}
scope["headers"].append(
(
b"cookie",
b"; ".join(
sorted(
("%s=%s" % (morsel.key, morsel.coded_value)).encode("ascii")
for morsel in self.cookies.values()
)
),
)
)
return scope
def request(self, **request):
"""Construct a generic request object."""
# This is synchronous, which means all methods on this class are.
# AsyncClient, however, has an async request function, which makes all
# its methods async.
if "_body_file" in request:
body_file = request.pop("_body_file")
else:
body_file = FakePayload("")
return ASGIRequest(self._base_scope(**request), body_file)
def generic(
self,
method,
path,
data="",
content_type="application/octet-stream",
secure=False,
**extra,
):
"""Construct an arbitrary HTTP request."""
parsed = urlparse(str(path)) # path can be lazy.
data = force_bytes(data, settings.DEFAULT_CHARSET)
s = {
"method": method,
"path": self._get_path(parsed),
"server": ("127.0.0.1", "443" if secure else "80"),
"scheme": "https" if secure else "http",
"headers": [(b"host", b"testserver")],
}
if data:
s["headers"].extend(
[
(b"content-length", str(len(data)).encode("ascii")),
(b"content-type", content_type.encode("ascii")),
]
)
s["_body_file"] = FakePayload(data)
follow = extra.pop("follow", None)
if follow is not None:
s["follow"] = follow
if query_string := extra.pop("QUERY_STRING", None):
s["query_string"] = query_string
s["headers"] += [
(key.lower().encode("ascii"), value.encode("latin1"))
for key, value in extra.items()
]
# If QUERY_STRING is absent or empty, we want to extract it from the
# URL.
if not s.get("query_string"):
s["query_string"] = parsed[4]
return self.request(**s)
class ClientMixin:
"""
Mixin with common methods between Client and AsyncClient.
"""
def store_exc_info(self, **kwargs):
"""Store exceptions when they are generated by a view."""
self.exc_info = sys.exc_info()
def check_exception(self, response):
"""
Look for a signaled exception, clear the current context exception
data, re-raise the signaled exception, and clear the signaled exception
from the local cache.
"""
response.exc_info = self.exc_info
if self.exc_info:
_, exc_value, _ = self.exc_info
self.exc_info = None
if self.raise_request_exception:
raise exc_value
@property
def session(self):
"""Return the current session variables."""
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
def login(self, **credentials):
"""
Set the Factory to appear as if it has successfully logged into a site.
Return True if login is possible or False if the provided credentials
are incorrect.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if user:
self._login(user)
return True
return False
def force_login(self, user, backend=None):
def get_backend():
from django.contrib.auth import load_backend
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
if hasattr(backend, "get_user"):
return backend_path
if backend is None:
backend = get_backend()
user.backend = backend
self._login(user, backend)
def _login(self, user, backend=None):
from django.contrib.auth import login
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore()
login(request, user, backend)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
"max-age": None,
"path": "/",
"domain": settings.SESSION_COOKIE_DOMAIN,
"secure": settings.SESSION_COOKIE_SECURE or None,
"expires": None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""Log out the user by removing the cookies and session object."""
from django.contrib.auth import get_user, logout
request = HttpRequest()
if self.session:
request.session = self.session
request.user = get_user(request)
else:
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if not hasattr(response, "_json"):
if not JSON_CONTENT_TYPE_RE.match(response.get("Content-Type")):
raise ValueError(
'Content-Type header is "%s", not "application/json"'
% response.get("Content-Type")
)
response._json = json.loads(
response.content.decode(response.charset), **extra
)
return response._json
class Client(ClientMixin, RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(
self, enforce_csrf_checks=False, raise_request_exception=True, **defaults
):
super().__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.raise_request_exception = raise_request_exception
self.exc_info = None
self.extra = None
def request(self, **request):
"""
Make a generic request. Compose the environment dictionary and pass
to the handler, return the result of the handler. Assume defaults for
the query environment, which can be overridden using the arguments to
the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = partial(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
response = self.handler(environ)
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
# Check for signaled exceptions.
self.check_exception(response)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = partial(self._parse_json, response)
# Attach the ResolverMatch instance to the response.
urlconf = getattr(response.wsgi_request, "urlconf", None)
response.resolver_match = SimpleLazyObject(
lambda: resolve(request["PATH_INFO"], urlconf=urlconf),
)
# Flatten a single context. Not really necessary anymore thanks to the
# __getattr__ flattening in ContextList, but has some edge case
# backwards compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
def get(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using GET."""
self.extra = extra
response = super().get(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def post(
self,
path,
data=None,
content_type=MULTIPART_CONTENT,
follow=False,
secure=False,
**extra,
):
"""Request a response from the server using POST."""
self.extra = extra
response = super().post(
path, data=data, content_type=content_type, secure=secure, **extra
)
if follow:
response = self._handle_redirects(
response, data=data, content_type=content_type, **extra
)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using HEAD."""
self.extra = extra
response = super().head(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def options(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
**extra,
):
"""Request a response from the server using OPTIONS."""
self.extra = extra
response = super().options(
path, data=data, content_type=content_type, secure=secure, **extra
)
if follow:
response = self._handle_redirects(
response, data=data, content_type=content_type, **extra
)
return response
def put(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
**extra,
):
"""Send a resource to the server using PUT."""
self.extra = extra
response = super().put(
path, data=data, content_type=content_type, secure=secure, **extra
)
if follow:
response = self._handle_redirects(
response, data=data, content_type=content_type, **extra
)
return response
def patch(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
**extra,
):
"""Send a resource to the server using PATCH."""
self.extra = extra
response = super().patch(
path, data=data, content_type=content_type, secure=secure, **extra
)
if follow:
response = self._handle_redirects(
response, data=data, content_type=content_type, **extra
)
return response
def delete(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
**extra,
):
"""Send a DELETE request to the server."""
self.extra = extra
response = super().delete(
path, data=data, content_type=content_type, secure=secure, **extra
)
if follow:
response = self._handle_redirects(
response, data=data, content_type=content_type, **extra
)
return response
def trace(self, path, data="", follow=False, secure=False, **extra):
"""Send a TRACE request to the server."""
self.extra = extra
response = super().trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, data=data, **extra)
return response
def _handle_redirects(self, response, data="", content_type="", **extra):
"""
Follow any redirects by requesting responses from the server using GET.
"""
response.redirect_chain = []
redirect_status_codes = (
HTTPStatus.MOVED_PERMANENTLY,
HTTPStatus.FOUND,
HTTPStatus.SEE_OTHER,
HTTPStatus.TEMPORARY_REDIRECT,
HTTPStatus.PERMANENT_REDIRECT,
)
while response.status_code in redirect_status_codes:
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra["wsgi.url_scheme"] = url.scheme
if url.hostname:
extra["SERVER_NAME"] = url.hostname
if url.port:
extra["SERVER_PORT"] = str(url.port)
path = url.path
# RFC 2616: bare domains without path are treated as the root.
if not path and url.netloc:
path = "/"
# Prepend the request path to handle relative path redirects
if not path.startswith("/"):
path = urljoin(response.request["PATH_INFO"], path)
if response.status_code in (
HTTPStatus.TEMPORARY_REDIRECT,
HTTPStatus.PERMANENT_REDIRECT,
):
# Preserve request method and query string (if needed)
# post-redirect for 307/308 responses.
request_method = response.request["REQUEST_METHOD"].lower()
if request_method not in ("get", "head"):
extra["QUERY_STRING"] = url.query
request_method = getattr(self, request_method)
else:
request_method = self.get
data = QueryDict(url.query)
content_type = None
response = request_method(
path, data=data, content_type=content_type, follow=False, **extra
)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError(
"Redirect loop detected.", last_response=response
)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
class AsyncClient(ClientMixin, AsyncRequestFactory):
"""
An async version of Client that creates ASGIRequests and calls through an
async request path.
Does not currently support "follow" on its methods.
"""
def __init__(
self, enforce_csrf_checks=False, raise_request_exception=True, **defaults
):
super().__init__(**defaults)
self.handler = AsyncClientHandler(enforce_csrf_checks)
self.raise_request_exception = raise_request_exception
self.exc_info = None
self.extra = None
async def request(self, **request):
"""
Make a generic request. Compose the scope dictionary and pass to the
handler, return the result of the handler. Assume defaults for the
query environment, which can be overridden using the arguments to the
request.
"""
if "follow" in request:
raise NotImplementedError(
"AsyncClient request methods do not accept the follow parameter."
)
scope = self._base_scope(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = partial(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
response = await self.handler(scope)
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
# Check for signaled exceptions.
self.check_exception(response)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = partial(self._parse_json, response)
# Attach the ResolverMatch instance to the response.
urlconf = getattr(response.asgi_request, "urlconf", None)
response.resolver_match = SimpleLazyObject(
lambda: resolve(request["path"], urlconf=urlconf),
)
# Flatten a single context. Not really necessary anymore thanks to the
# __getattr__ flattening in ContextList, but has some edge case
# backwards compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
|
e801b75f5f8651fe3ea2625c571c5a1fc672b6cea4f0759be7704fbb81219a4e | import functools
import sys
import threading
import warnings
from collections import Counter, defaultdict
from functools import partial
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from .config import AppConfig
class Apps:
"""
A registry that stores the configuration of installed applications.
It also keeps track of models, e.g. to provide reverse relations.
"""
def __init__(self, installed_apps=()):
# installed_apps is set to None when creating the main registry
# because it cannot be populated at that point. Other registries must
# provide a list of installed apps and are populated immediately.
if installed_apps is None and hasattr(sys.modules[__name__], "apps"):
raise RuntimeError("You must supply an installed_apps argument.")
# Mapping of app labels => model names => model classes. Every time a
# model is imported, ModelBase.__new__ calls apps.register_model which
# creates an entry in all_models. All imported models are registered,
# regardless of whether they're defined in an installed application
# and whether the registry has been populated. Since it isn't possible
# to reimport a module safely (it could reexecute initialization code)
# all_models is never overridden or reset.
self.all_models = defaultdict(dict)
# Mapping of labels to AppConfig instances for installed apps.
self.app_configs = {}
# Stack of app_configs. Used to store the current state in
# set_available_apps and set_installed_apps.
self.stored_app_configs = []
# Whether the registry is populated.
self.apps_ready = self.models_ready = self.ready = False
# For the autoreloader.
self.ready_event = threading.Event()
# Lock for thread-safe population.
self._lock = threading.RLock()
self.loading = False
# Maps ("app_label", "modelname") tuples to lists of functions to be
# called when the corresponding model is ready. Used by this class's
# `lazy_model_operation()` and `do_pending_operations()` methods.
self._pending_operations = defaultdict(list)
# Populate apps and models, unless it's the main registry.
if installed_apps is not None:
self.populate(installed_apps)
def populate(self, installed_apps=None):
"""
Load application configurations and models.
Import each application module and then each model module.
It is thread-safe and idempotent, but not reentrant.
"""
if self.ready:
return
# populate() might be called by two threads in parallel on servers
# that create threads before initializing the WSGI callable.
with self._lock:
if self.ready:
return
# An RLock prevents other threads from entering this section. The
# compare and set operation below is atomic.
if self.loading:
# Prevent reentrant calls to avoid running AppConfig.ready()
# methods twice.
raise RuntimeError("populate() isn't reentrant")
self.loading = True
# Phase 1: initialize app configs and import app modules.
for entry in installed_apps:
if isinstance(entry, AppConfig):
app_config = entry
else:
app_config = AppConfig.create(entry)
if app_config.label in self.app_configs:
raise ImproperlyConfigured(
"Application labels aren't unique, "
"duplicates: %s" % app_config.label
)
self.app_configs[app_config.label] = app_config
app_config.apps = self
# Check for duplicate app names.
counts = Counter(
app_config.name for app_config in self.app_configs.values()
)
duplicates = [name for name, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Application names aren't unique, "
"duplicates: %s" % ", ".join(duplicates)
)
self.apps_ready = True
# Phase 2: import models modules.
for app_config in self.app_configs.values():
app_config.import_models()
self.clear_cache()
self.models_ready = True
# Phase 3: run ready() methods of app configs.
for app_config in self.get_app_configs():
app_config.ready()
self.ready = True
self.ready_event.set()
def check_apps_ready(self):
"""Raise an exception if all apps haven't been imported yet."""
if not self.apps_ready:
from django.conf import settings
# If "not ready" is due to unconfigured settings, accessing
# INSTALLED_APPS raises a more helpful ImproperlyConfigured
# exception.
settings.INSTALLED_APPS
raise AppRegistryNotReady("Apps aren't loaded yet.")
def check_models_ready(self):
"""Raise an exception if all models haven't been imported yet."""
if not self.models_ready:
raise AppRegistryNotReady("Models aren't loaded yet.")
def get_app_configs(self):
"""Import applications and return an iterable of app configs."""
self.check_apps_ready()
return self.app_configs.values()
def get_app_config(self, app_label):
"""
Import applications and returns an app config for the given label.
Raise LookupError if no application exists with this label.
"""
self.check_apps_ready()
try:
return self.app_configs[app_label]
except KeyError:
message = "No installed app with label '%s'." % app_label
for app_config in self.get_app_configs():
if app_config.name == app_label:
message += " Did you mean '%s'?" % app_config.label
break
raise LookupError(message)
# This method is performance-critical at least for Django's test suite.
@functools.lru_cache(maxsize=None)
def get_models(self, include_auto_created=False, include_swapped=False):
"""
Return a list of all installed models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
"""
self.check_models_ready()
result = []
for app_config in self.app_configs.values():
result.extend(app_config.get_models(include_auto_created, include_swapped))
return result
def get_model(self, app_label, model_name=None, require_ready=True):
"""
Return the model matching the given app_label and model_name.
As a shortcut, app_label may be in the form <app_label>.<model_name>.
model_name is case-insensitive.
Raise LookupError if no application exists with this label, or no
model exists with this name in the application. Raise ValueError if
called with a single argument that doesn't contain exactly one dot.
"""
if require_ready:
self.check_models_ready()
else:
self.check_apps_ready()
if model_name is None:
app_label, model_name = app_label.split(".")
app_config = self.get_app_config(app_label)
if not require_ready and app_config.models is None:
app_config.import_models()
return app_config.get_model(model_name, require_ready=require_ready)
def register_model(self, app_label, model):
# Since this method is called when models are imported, it cannot
# perform imports because of the risk of import loops. It mustn't
# call get_app_config().
model_name = model._meta.model_name
app_models = self.all_models[app_label]
if model_name in app_models:
if (
model.__name__ == app_models[model_name].__name__
and model.__module__ == app_models[model_name].__module__
):
warnings.warn(
"Model '%s.%s' was already registered. Reloading models is not "
"advised as it can lead to inconsistencies, most notably with "
"related models." % (app_label, model_name),
RuntimeWarning,
stacklevel=2,
)
else:
raise RuntimeError(
"Conflicting '%s' models in application '%s': %s and %s."
% (model_name, app_label, app_models[model_name], model)
)
app_models[model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def is_installed(self, app_name):
"""
Check whether an application with this name exists in the registry.
app_name is the full name of the app e.g. 'django.contrib.admin'.
"""
self.check_apps_ready()
return any(ac.name == app_name for ac in self.app_configs.values())
def get_containing_app_config(self, object_name):
"""
Look for an app config containing a given object.
object_name is the dotted Python path to the object.
Return the app config for the inner application in case of nesting.
Return None if the object isn't in any registered app config.
"""
self.check_apps_ready()
candidates = []
for app_config in self.app_configs.values():
if object_name.startswith(app_config.name):
subpath = object_name[len(app_config.name) :]
if subpath == "" or subpath[0] == ".":
candidates.append(app_config)
if candidates:
return sorted(candidates, key=lambda ac: -len(ac.name))[0]
def get_registered_model(self, app_label, model_name):
"""
Similar to get_model(), but doesn't require that an app exists with
the given app_label.
It's safe to call this method at import time, even while the registry
is being populated.
"""
model = self.all_models[app_label].get(model_name.lower())
if model is None:
raise LookupError("Model '%s.%s' not registered." % (app_label, model_name))
return model
@functools.lru_cache(maxsize=None)
def get_swappable_settings_name(self, to_string):
"""
For a given model string (e.g. "auth.User"), return the name of the
corresponding settings name if it refers to a swappable model. If the
referred model is not swappable, return None.
This method is decorated with lru_cache because it's performance
critical when it comes to migrations. Since the swappable settings don't
change after Django has loaded the settings, there is no reason to get
the respective settings attribute over and over again.
"""
to_string = to_string.lower()
for model in self.get_models(include_swapped=True):
swapped = model._meta.swapped
# Is this model swapped out for the model given by to_string?
if swapped and swapped.lower() == to_string:
return model._meta.swappable
# Is this model swappable and the one given by to_string?
if model._meta.swappable and model._meta.label_lower == to_string:
return model._meta.swappable
return None
def set_available_apps(self, available):
"""
Restrict the set of installed apps used by get_app_config[s].
available must be an iterable of application names.
set_available_apps() must be balanced with unset_available_apps().
Primarily used for performance optimization in TransactionTestCase.
This method is safe in the sense that it doesn't trigger any imports.
"""
available = set(available)
installed = {app_config.name for app_config in self.get_app_configs()}
if not available.issubset(installed):
raise ValueError(
"Available apps isn't a subset of installed apps, extra apps: %s"
% ", ".join(available - installed)
)
self.stored_app_configs.append(self.app_configs)
self.app_configs = {
label: app_config
for label, app_config in self.app_configs.items()
if app_config.name in available
}
self.clear_cache()
def unset_available_apps(self):
"""Cancel a previous call to set_available_apps()."""
self.app_configs = self.stored_app_configs.pop()
self.clear_cache()
def set_installed_apps(self, installed):
"""
Enable a different set of installed apps for get_app_config[s].
installed must be an iterable in the same format as INSTALLED_APPS.
set_installed_apps() must be balanced with unset_installed_apps(),
even if it exits with an exception.
Primarily used as a receiver of the setting_changed signal in tests.
This method may trigger new imports, which may add new models to the
registry of all imported models. They will stay in the registry even
after unset_installed_apps(). Since it isn't possible to replay
imports safely (e.g. that could lead to registering listeners twice),
models are registered when they're imported and never removed.
"""
if not self.ready:
raise AppRegistryNotReady("App registry isn't ready yet.")
self.stored_app_configs.append(self.app_configs)
self.app_configs = {}
self.apps_ready = self.models_ready = self.loading = self.ready = False
self.clear_cache()
self.populate(installed)
def unset_installed_apps(self):
"""Cancel a previous call to set_installed_apps()."""
self.app_configs = self.stored_app_configs.pop()
self.apps_ready = self.models_ready = self.ready = True
self.clear_cache()
def clear_cache(self):
"""
Clear all internal caches, for methods that alter the app registry.
This is mostly used in tests.
"""
# Call expire cache on each model. This will purge
# the relation tree and the fields cache.
self.get_models.cache_clear()
if self.ready:
# Circumvent self.get_models() to prevent that the cache is refilled.
# This particularly prevents that an empty value is cached while cloning.
for app_config in self.app_configs.values():
for model in app_config.get_models(include_auto_created=True):
model._meta._expire_cache()
def lazy_model_operation(self, function, *model_keys):
"""
Take a function and a number of ("app_label", "modelname") tuples, and
when all the corresponding models have been imported and registered,
call the function with the model classes as its arguments.
The function passed to this method must accept exactly n models as
arguments, where n=len(model_keys).
"""
# Base case: no arguments, just execute the function.
if not model_keys:
function()
# Recursive case: take the head of model_keys, wait for the
# corresponding model class to be imported and registered, then apply
# that argument to the supplied function. Pass the resulting partial
# to lazy_model_operation() along with the remaining model args and
# repeat until all models are loaded and all arguments are applied.
else:
next_model, *more_models = model_keys
# This will be executed after the class corresponding to next_model
# has been imported and registered. The `func` attribute provides
# duck-type compatibility with partials.
def apply_next_model(model):
next_function = partial(apply_next_model.func, model)
self.lazy_model_operation(next_function, *more_models)
apply_next_model.func = function
# If the model has already been imported and registered, partially
# apply it to the function now. If not, add it to the list of
# pending operations for the model, where it will be executed with
# the model class as its sole argument once the model is ready.
try:
model_class = self.get_registered_model(*next_model)
except LookupError:
self._pending_operations[next_model].append(apply_next_model)
else:
apply_next_model(model_class)
def do_pending_operations(self, model):
"""
Take a newly-prepared model and pass it to each function waiting for
it. This is called at the very end of Apps.register_model().
"""
key = model._meta.app_label, model._meta.model_name
for function in self._pending_operations.pop(key, []):
function(model)
apps = Apps(installed_apps=None)
|
fa8507eacacf44ea9c28a1b6d36791fbaec70bbb3cc7c2f24c5764e59ebcb705 | from urllib.parse import quote
from django.http import (
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseNotFound,
HttpResponseServerError,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.views.decorators.csrf import requires_csrf_token
ERROR_404_TEMPLATE_NAME = "404.html"
ERROR_403_TEMPLATE_NAME = "403.html"
ERROR_400_TEMPLATE_NAME = "400.html"
ERROR_500_TEMPLATE_NAME = "500.html"
ERROR_PAGE_TEMPLATE = """
<!doctype html>
<html lang="en">
<head>
<title>%(title)s</title>
</head>
<body>
<h1>%(title)s</h1><p>%(details)s</p>
</body>
</html>
"""
# These views can be called when CsrfViewMiddleware.process_view() not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def page_not_found(request, exception, template_name=ERROR_404_TEMPLATE_NAME):
"""
Default 404 handler.
Templates: :template:`404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/'). It's
quoted to prevent a content injection attack.
exception
The message from the exception which triggered the 404 (if one was
supplied), or the exception class name
"""
exception_repr = exception.__class__.__name__
# Try to get an "interesting" exception message, if any (and not the ugly
# Resolver404 dictionary)
try:
message = exception.args[0]
except (AttributeError, IndexError):
pass
else:
if isinstance(message, str):
exception_repr = message
context = {
"request_path": quote(request.path),
"exception": exception_repr,
}
try:
template = loader.get_template(template_name)
body = template.render(context, request)
except TemplateDoesNotExist:
if template_name != ERROR_404_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
# Render template (even though there are no substitutions) to allow
# inspecting the context in tests.
template = Engine().from_string(
ERROR_PAGE_TEMPLATE
% {
"title": "Not Found",
"details": "The requested resource was not found on this server.",
},
)
body = template.render(Context(context))
return HttpResponseNotFound(body)
@requires_csrf_token
def server_error(request, template_name=ERROR_500_TEMPLATE_NAME):
"""
500 error handler.
Templates: :template:`500.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_500_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseServerError(
ERROR_PAGE_TEMPLATE % {"title": "Server Error (500)", "details": ""},
)
return HttpResponseServerError(template.render())
@requires_csrf_token
def bad_request(request, exception, template_name=ERROR_400_TEMPLATE_NAME):
"""
400 error handler.
Templates: :template:`400.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_400_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseBadRequest(
ERROR_PAGE_TEMPLATE % {"title": "Bad Request (400)", "details": ""},
)
# No exception content is passed to the template, to not disclose any
# sensitive information.
return HttpResponseBadRequest(template.render())
@requires_csrf_token
def permission_denied(request, exception, template_name=ERROR_403_TEMPLATE_NAME):
"""
Permission denied (403) handler.
Templates: :template:`403.html`
Context:
exception
The message from the exception which triggered the 403 (if one was
supplied).
If the template does not exist, an Http403 response containing the text
"403 Forbidden" (as per RFC 7231) will be returned.
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_403_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseForbidden(
ERROR_PAGE_TEMPLATE % {"title": "403 Forbidden", "details": ""},
)
return HttpResponseForbidden(
template.render(request=request, context={"exception": str(exception)})
)
|
0fe069439fdfbe3ec2e2d81a4d6618959c8b8f397d80a047fd1045ca4fd95425 | import functools
import re
import sys
import types
import warnings
from pathlib import Path
from django.conf import settings
from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import pprint
from django.urls import resolve
from django.utils import timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str
from django.utils.module_loading import import_string
from django.utils.regex_helper import _lazy_re_compile
from django.utils.version import get_docs_version
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting. Templates are
# read directly from the filesystem so that the error handler
# works even if the template loader is broken.
DEBUG_ENGINE = Engine(
debug=True,
libraries={"i18n": "django.templatetags.i18n"},
)
def builtin_template_path(name):
"""
Return a path to a builtin template.
Avoid calling this function at the module level or in a class-definition
because __file__ may not exist, e.g. in frozen environments.
"""
return Path(__file__).parent / "templates" / name
class ExceptionCycleWarning(UserWarning):
pass
class CallableSettingWrapper:
"""
Object to wrap callable appearing in settings.
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes
(#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = get_exception_reporter_class(request)(request, exc_type, exc_value, tb)
if request.accepts("text/html"):
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code)
else:
text = reporter.get_traceback_text()
return HttpResponse(
text, status=status_code, content_type="text/plain; charset=utf-8"
)
@functools.lru_cache
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, "exception_reporter_filter", default_filter)
def get_exception_reporter_class(request):
default_exception_reporter_class = import_string(
settings.DEFAULT_EXCEPTION_REPORTER
)
return getattr(
request, "exception_reporter_class", default_exception_reporter_class
)
def get_caller(request):
resolver_match = request.resolver_match
if resolver_match is None:
try:
resolver_match = resolve(request.path)
except Http404:
pass
return "" if resolver_match is None else resolver_match._func_path
class SafeExceptionReporterFilter:
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
cleansed_substitute = "********************"
hidden_settings = _lazy_re_compile(
"API|TOKEN|KEY|SECRET|PASS|SIGNATURE", flags=re.I
)
def cleanse_setting(self, key, value):
"""
Cleanse an individual setting key/value of sensitive content. If the
value is a dictionary, recursively cleanse the keys in that dictionary.
"""
try:
is_sensitive = self.hidden_settings.search(key)
except TypeError:
is_sensitive = False
if is_sensitive:
cleansed = self.cleansed_substitute
elif isinstance(value, dict):
cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()}
elif isinstance(value, list):
cleansed = [self.cleanse_setting("", v) for v in value]
elif isinstance(value, tuple):
cleansed = tuple([self.cleanse_setting("", v) for v in value])
else:
cleansed = value
if callable(cleansed):
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings(self):
"""
Return a dictionary of the settings module with values of sensitive
settings replaced with stars (*********).
"""
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = self.cleanse_setting(k, getattr(settings, k))
return settings_dict
def get_safe_request_meta(self, request):
"""
Return a dictionary of request.META with sensitive values redacted.
"""
if not hasattr(request, "META"):
return {}
return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replace the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, "sensitive_post_parameters", [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = self.cleansed_substitute
return multivaluedict
def get_post_parameters(self, request):
"""
Replace the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(
request, "sensitive_post_parameters", []
)
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == "__ALL__":
# Cleanse all parameters.
for k in cleansed:
cleansed[k] = self.cleansed_substitute
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = self.cleansed_substitute
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return "{!r} while evaluating {!r}".format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replace the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (
current_frame.f_code.co_name == "sensitive_variables_wrapper"
and "sensitive_variables_wrapper" in current_frame.f_locals
):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals["sensitive_variables_wrapper"]
sensitive_variables = getattr(wrapper, "sensitive_variables", None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == "__ALL__":
# Cleanse all variables
for name in tb_frame.f_locals:
cleansed[name] = self.cleansed_substitute
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = self.cleansed_substitute
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (
tb_frame.f_code.co_name == "sensitive_variables_wrapper"
and "sensitive_variables_wrapper" in tb_frame.f_locals
):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed["func_args"] = self.cleansed_substitute
cleansed["func_kwargs"] = self.cleansed_substitute
return cleansed.items()
class ExceptionReporter:
"""Organize and coordinate reporting on exceptions."""
@property
def html_template_path(self):
return builtin_template_path("technical_500.html")
@property
def text_template_path(self):
return builtin_template_path("technical_500.txt")
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, "template_debug", None)
self.template_does_not_exist = False
self.postmortem = None
def _get_raw_insecure_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return "{scheme}://{host}{path}".format(
scheme=self.request.scheme,
host=self.request._get_raw_host(),
path=self.request.get_full_path(),
)
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if "vars" in frame:
frame_vars = []
for k, v in frame["vars"]:
v = pprint(v)
# Trim large blobs of data
if len(v) > 4096:
v = "%s… <trimmed %d bytes string>" % (v[0:4096], len(v))
frame_vars.append((k, v))
frame["vars"] = frame_vars
frames[i] = frame
unicode_hint = ""
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, "start", None)
end = getattr(self.exc_value, "end", None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = force_str(
unicode_str[max(start - 5, 0) : min(end + 5, len(unicode_str))],
"ascii",
errors="replace",
)
from django import get_version
if self.request is None:
user_str = None
else:
try:
user_str = str(self.request.user)
except Exception:
# request.user may raise OperationalError if the database is
# unavailable, for example.
user_str = "[unable to retrieve the current user]"
c = {
"is_email": self.is_email,
"unicode_hint": unicode_hint,
"frames": frames,
"request": self.request,
"request_meta": self.filter.get_safe_request_meta(self.request),
"user_str": user_str,
"filtered_POST_items": list(
self.filter.get_post_parameters(self.request).items()
),
"settings": self.filter.get_safe_settings(),
"sys_executable": sys.executable,
"sys_version_info": "%d.%d.%d" % sys.version_info[0:3],
"server_time": timezone.now(),
"django_version_info": get_version(),
"sys_path": sys.path,
"template_info": self.template_info,
"template_does_not_exist": self.template_does_not_exist,
"postmortem": self.postmortem,
}
if self.request is not None:
c["request_GET_items"] = self.request.GET.items()
c["request_FILES_items"] = self.request.FILES.items()
c["request_COOKIES_items"] = self.request.COOKIES.items()
c["request_insecure_uri"] = self._get_raw_insecure_uri()
c["raising_view_name"] = get_caller(self.request)
# Check whether exception info is available
if self.exc_type:
c["exception_type"] = self.exc_type.__name__
if self.exc_value:
c["exception_value"] = str(self.exc_value)
if frames:
c["lastframe"] = frames[-1]
return c
def get_traceback_html(self):
"""Return HTML version of debug 500 HTTP error page."""
with self.html_template_path.open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"""Return plain text version of debug 500 HTTP error page."""
with self.text_template_path.open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_source(self, filename, loader, module_name):
source = None
if hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, "rb") as fp:
source = fp.read().splitlines()
except OSError:
pass
return source
def _get_lines_from_file(
self, filename, lineno, context_lines, loader=None, module_name=None
):
"""
Return context_lines before and after lineno from file.
Return (pre_context_lineno, pre_context, context_line, post_context).
"""
source = self._get_source(filename, loader, module_name)
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = "ascii"
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (https://www.python.org/dev/peps/pep-0263/)
match = re.search(rb"coding[:=]\s*([-\w.]+)", line)
if match:
encoding = match[1].decode("ascii")
break
source = [str(sline, encoding, "replace") for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
try:
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1 : upper_bound]
except IndexError:
return None, [], None, []
return lower_bound, pre_context, context_line, post_context
def _get_explicit_or_implicit_cause(self, exc_value):
explicit = getattr(exc_value, "__cause__", None)
suppress_context = getattr(exc_value, "__suppress_context__", None)
implicit = getattr(exc_value, "__context__", None)
return explicit or (None if suppress_context else implicit)
def get_traceback_frames(self):
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = self._get_explicit_or_implicit_cause(exc_value)
if exc_value in exceptions:
warnings.warn(
"Cycle in the exception chain detected: exception '%s' "
"encountered again." % exc_value,
ExceptionCycleWarning,
)
# Avoid infinite loop if there's a cyclic reference (#29393).
break
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = self.tb if not exceptions else exc_value.__traceback__
while True:
frames.extend(self.get_exception_traceback_frames(exc_value, tb))
try:
exc_value = exceptions.pop()
except IndexError:
break
tb = exc_value.__traceback__
return frames
def get_exception_traceback_frames(self, exc_value, tb):
exc_cause = self._get_explicit_or_implicit_cause(exc_value)
exc_cause_explicit = getattr(exc_value, "__cause__", True)
if tb is None:
yield {
"exc_cause": exc_cause,
"exc_cause_explicit": exc_cause_explicit,
"tb": None,
"type": "user",
}
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get("__traceback_hide__"):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get("__loader__")
module_name = tb.tb_frame.f_globals.get("__name__") or ""
(
pre_context_lineno,
pre_context,
context_line,
post_context,
) = self._get_lines_from_file(
filename,
lineno,
7,
loader,
module_name,
)
if pre_context_lineno is None:
pre_context_lineno = lineno
pre_context = []
context_line = "<source code not available>"
post_context = []
yield {
"exc_cause": exc_cause,
"exc_cause_explicit": exc_cause_explicit,
"tb": tb,
"type": "django" if module_name.startswith("django.") else "user",
"filename": filename,
"function": function,
"lineno": lineno + 1,
"vars": self.filter.get_traceback_frame_variables(
self.request, tb.tb_frame
),
"id": id(tb),
"pre_context": pre_context,
"context_line": context_line,
"post_context": post_context,
"pre_context_lineno": pre_context_lineno + 1,
}
tb = tb.tb_next
def technical_404_response(request, exception):
"""Create a technical 404 error response. `exception` is the Http404."""
try:
error_url = exception.args[0]["path"]
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]["tried"]
except (IndexError, TypeError, KeyError):
resolved = True
tried = request.resolver_match.tried if request.resolver_match else None
else:
resolved = False
if not tried or ( # empty URLconf
request.path == "/"
and len(tried) == 1
and len(tried[0]) == 1 # default URLconf
and getattr(tried[0][0], "app_name", "")
== getattr(tried[0][0], "namespace", "")
== "admin"
):
return default_urlconf(request)
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
with builtin_template_path("technical_404.html").open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
reporter_filter = get_default_exception_reporter_filter()
c = Context(
{
"urlconf": urlconf,
"root_urlconf": settings.ROOT_URLCONF,
"request_path": error_url,
"urlpatterns": tried,
"resolved": resolved,
"reason": str(exception),
"request": request,
"settings": reporter_filter.get_safe_settings(),
"raising_view_name": get_caller(request),
}
)
return HttpResponseNotFound(t.render(c))
def default_urlconf(request):
"""Create an empty URLconf 404 error response."""
with builtin_template_path("default_urlconf.html").open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(
{
"version": get_docs_version(),
}
)
return HttpResponse(t.render(c))
|
f1bae1a288383bd3223a75e4fefefdba288710dc03d13c13bd0cf25dec65f3be | from django.conf import settings
from django.http import HttpResponseForbidden
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.translation import gettext as _
from django.utils.version import get_docs_version
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
# Only the text appearing with DEBUG=False is translated. Normal translation
# tags cannot be used with this inline templates as makemessages would not be
# able to discover the strings.
CSRF_FAILURE_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>403 Forbidden</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; color:#000; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
#info { background:#f6f6f6; }
#info ul { margin: 0.5em 4em; }
#info p, #summary p { padding-top:10px; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ title }} <span>(403)</span></h1>
<p>{{ main }}</p>
{% if no_referer %}
<p>{{ no_referer1 }}</p>
<p>{{ no_referer2 }}</p>
<p>{{ no_referer3 }}</p>
{% endif %}
{% if no_cookie %}
<p>{{ no_cookie1 }}</p>
<p>{{ no_cookie2 }}</p>
{% endif %}
</div>
{% if DEBUG %}
<div id="info">
<h2>Help</h2>
{% if reason %}
<p>Reason given for failure:</p>
<pre>
{{ reason }}
</pre>
{% endif %}
<p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when
<a
href="https://docs.djangoproject.com/en/{{ docs_version }}/ref/csrf/">Django’s
CSRF mechanism</a> has not been used correctly. For POST forms, you need to
ensure:</p>
<ul>
<li>Your browser is accepting cookies.</li>
<li>The view function passes a <code>request</code> to the template’s <a
href="https://docs.djangoproject.com/en/dev/topics/templates/#django.template.backends.base.Template.render"><code>render</code></a>
method.</li>
<li>In the template, there is a <code>{% templatetag openblock %} csrf_token
{% templatetag closeblock %}</code> template tag inside each POST form that
targets an internal URL.</li>
<li>If you are not using <code>CsrfViewMiddleware</code>, then you must use
<code>csrf_protect</code> on any views that use the <code>csrf_token</code>
template tag, as well as those that accept the POST data.</li>
<li>The form has a valid CSRF token. After logging in in another browser
tab or hitting the back button after a login, you may need to reload the
page with the form, because the token is rotated after a login.</li>
</ul>
<p>You’re seeing the help section of this page because you have <code>DEBUG =
True</code> in your Django settings file. Change that to <code>False</code>,
and only the initial error message will be displayed. </p>
<p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p>
</div>
{% else %}
<div id="explanation">
<p><small>{{ more }}</small></p>
</div>
{% endif %}
</body>
</html>
""" # NOQA
CSRF_FAILURE_TEMPLATE_NAME = "403_csrf.html"
def csrf_failure(request, reason="", template_name=CSRF_FAILURE_TEMPLATE_NAME):
"""
Default view used when request fails CSRF protection
"""
from django.middleware.csrf import REASON_NO_CSRF_COOKIE, REASON_NO_REFERER
c = {
"title": _("Forbidden"),
"main": _("CSRF verification failed. Request aborted."),
"reason": reason,
"no_referer": reason == REASON_NO_REFERER,
"no_referer1": _(
"You are seeing this message because this HTTPS site requires a "
"“Referer header” to be sent by your web browser, but none was "
"sent. This header is required for security reasons, to ensure "
"that your browser is not being hijacked by third parties."
),
"no_referer2": _(
"If you have configured your browser to disable “Referer” headers, "
"please re-enable them, at least for this site, or for HTTPS "
"connections, or for “same-origin” requests."
),
"no_referer3": _(
'If you are using the <meta name="referrer" '
'content="no-referrer"> tag or including the “Referrer-Policy: '
"no-referrer” header, please remove them. The CSRF protection "
"requires the “Referer” header to do strict referer checking. If "
"you’re concerned about privacy, use alternatives like "
'<a rel="noreferrer" …> for links to third-party sites.'
),
"no_cookie": reason == REASON_NO_CSRF_COOKIE,
"no_cookie1": _(
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties."
),
"no_cookie2": _(
"If you have configured your browser to disable cookies, please "
"re-enable them, at least for this site, or for “same-origin” "
"requests."
),
"DEBUG": settings.DEBUG,
"docs_version": get_docs_version(),
"more": _("More information is available with DEBUG=True."),
}
try:
t = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name == CSRF_FAILURE_TEMPLATE_NAME:
# If the default template doesn't exist, use the string template.
t = Engine().from_string(CSRF_FAILURE_TEMPLATE)
c = Context(c)
else:
# Raise if a developer-specified template doesn't exist.
raise
return HttpResponseForbidden(t.render(c))
|
6a133fa76f4cdaf5a66cff5aeb124da0ef01f53374cfc3cc080174e0e926424b | """
Settings and configuration for Django.
Read values from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global_settings.py
for a list of all possible variables.
"""
import importlib
import os
import time
import traceback
import warnings
from pathlib import Path
import django
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
# RemovedInDjango50Warning
USE_DEPRECATED_PYTZ_DEPRECATED_MSG = (
"The USE_DEPRECATED_PYTZ setting, and support for pytz timezones is "
"deprecated in favor of the stdlib zoneinfo module. Please update your "
"code to use zoneinfo and remove the USE_DEPRECATED_PYTZ setting."
)
USE_L10N_DEPRECATED_MSG = (
"The USE_L10N setting is deprecated. Starting with Django 5.0, localized "
"formatting of data will always be enabled. For example Django will "
"display numbers and dates using the format of the current locale."
)
CSRF_COOKIE_MASKED_DEPRECATED_MSG = (
"The CSRF_COOKIE_MASKED transitional setting is deprecated. Support for "
"it will be removed in Django 5.0."
)
class SettingsReference(str):
"""
String subclass which references a current settings value. It's treated as
the value in memory but serializes to a settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time settings are needed, if the user hasn't
configured settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE)
)
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return "<LazySettings [Unevaluated]>"
return '<LazySettings "%(settings_module)s">' % {
"settings_module": self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
"""Return the value of a setting and cache it in self.__dict__."""
if (_wrapped := self._wrapped) is empty:
self._setup(name)
_wrapped = self._wrapped
val = getattr(_wrapped, name)
# Special case some settings which require further modification.
# This is done here for performance reasons so the modified value is cached.
if name in {"MEDIA_URL", "STATIC_URL"} and val is not None:
val = self._add_script_prefix(val)
elif name == "SECRET_KEY" and not val:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
self.__dict__[name] = val
return val
def __setattr__(self, name, value):
"""
Set the value of setting. Clear all cached values if _wrapped changes
(@override_settings does this) or clear single values when set.
"""
if name == "_wrapped":
self.__dict__.clear()
else:
self.__dict__.pop(name, None)
super().__setattr__(name, value)
def __delattr__(self, name):
"""Delete a setting and clear it from cache if needed."""
super().__delattr__(name)
self.__dict__.pop(name, None)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError("Settings already configured.")
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
if not name.isupper():
raise TypeError("Setting %r must be uppercase." % name)
setattr(holder, name, value)
self._wrapped = holder
@staticmethod
def _add_script_prefix(value):
"""
Add SCRIPT_NAME prefix to relative paths.
Useful when the app is being served at a subpath and manually prefixing
subpath to STATIC_URL and MEDIA_URL in settings is inconvenient.
"""
# Don't apply prefix to absolute paths and URLs.
if value.startswith(("http://", "https://", "/")):
return value
from django.urls import get_script_prefix
return "%s%s" % (get_script_prefix(), value)
@property
def configured(self):
"""Return True if the settings have already been configured."""
return self._wrapped is not empty
@property
def USE_L10N(self):
stack = traceback.extract_stack()
# Show a warning if the setting is used outside of Django.
# Stack index: -1 this line, -2 the LazyObject __getattribute__(),
# -3 the caller.
filename, _, _, _ = stack[-3]
if not filename.startswith(os.path.dirname(django.__file__)):
warnings.warn(
USE_L10N_DEPRECATED_MSG,
RemovedInDjango50Warning,
stacklevel=2,
)
return self.__getattr__("USE_L10N")
# RemovedInDjango50Warning.
@property
def _USE_L10N_INTERNAL(self):
# Special hook to avoid checking a traceback in internal use on hot
# paths.
return self.__getattr__("USE_L10N")
class Settings:
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"ALLOWED_HOSTS",
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
"SECRET_KEY_FALLBACKS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and not isinstance(
setting_value, (list, tuple)
):
raise ImproperlyConfigured(
"The %s setting must be a list or a tuple." % setting
)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if self.USE_TZ is False and not self.is_overridden("USE_TZ"):
warnings.warn(
"The default value of USE_TZ will change from False to True "
"in Django 5.0. Set USE_TZ to False in your project settings "
"if you want to keep the current default behavior.",
category=RemovedInDjango50Warning,
)
if self.is_overridden("USE_DEPRECATED_PYTZ"):
warnings.warn(USE_DEPRECATED_PYTZ_DEPRECATED_MSG, RemovedInDjango50Warning)
if self.is_overridden("CSRF_COOKIE_MASKED"):
warnings.warn(CSRF_COOKIE_MASKED_DEPRECATED_MSG, RemovedInDjango50Warning)
if hasattr(time, "tzset") and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = Path("/usr/share/zoneinfo")
zone_info_file = zoneinfo_root.joinpath(*self.TIME_ZONE.split("/"))
if zoneinfo_root.exists() and not zone_info_file.exists():
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ["TZ"] = self.TIME_ZONE
time.tzset()
if self.is_overridden("USE_L10N"):
warnings.warn(USE_L10N_DEPRECATED_MSG, RemovedInDjango50Warning)
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
"cls": self.__class__.__name__,
"settings_module": self.SETTINGS_MODULE,
}
class UserSettingsHolder:
"""Holder for user configured settings."""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__["_deleted"] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if not name.isupper() or name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
if name == "USE_L10N":
warnings.warn(USE_L10N_DEPRECATED_MSG, RemovedInDjango50Warning)
if name == "CSRF_COOKIE_MASKED":
warnings.warn(CSRF_COOKIE_MASKED_DEPRECATED_MSG, RemovedInDjango50Warning)
super().__setattr__(name, value)
if name == "USE_DEPRECATED_PYTZ":
warnings.warn(USE_DEPRECATED_PYTZ_DEPRECATED_MSG, RemovedInDjango50Warning)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super().__delattr__(name)
def __dir__(self):
return sorted(
s
for s in [*self.__dict__, *dir(self.default_settings)]
if s not in self._deleted
)
def is_overridden(self, setting):
deleted = setting in self._deleted
set_locally = setting in self.__dict__
set_on_default = getattr(
self.default_settings, "is_overridden", lambda s: False
)(setting)
return deleted or set_locally or set_on_default
def __repr__(self):
return "<%(cls)s>" % {
"cls": self.__class__.__name__,
}
settings = LazySettings()
|
c693a2635d46a6081dfcc226a8bc0c16894ee20c37aaf4b908554033b1427308 | import codecs
import datetime
import locale
from decimal import Decimal
from urllib.parse import quote
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
super().__init__(*args)
def __str__(self):
return "%s. You passed in %r (%s)" % (
super().__str__(),
self.obj,
type(self.obj),
)
def smart_str(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Return a string representing 's'. Treat bytestrings using the 'encoding'
codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_str(s, encoding, strings_only, errors)
_PROTECTED_TYPES = (
type(None),
int,
float,
Decimal,
datetime.datetime,
datetime.date,
datetime.time,
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_str(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def force_str(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_str(), except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), str):
return s
if strings_only and is_protected_type(s):
return s
try:
if isinstance(s, bytes):
s = str(s, encoding, errors)
else:
s = str(s)
except UnicodeDecodeError as e:
raise DjangoUnicodeDecodeError(s, *e.args)
return s
def smart_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Return a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == "utf-8":
return s
else:
return s.decode("utf-8", errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, memoryview):
return bytes(s)
return str(s).encode(encoding, errors)
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987, slightly simplified
since the input is assumed to be a string rather than an arbitrary byte
stream.
Take an IRI (string or UTF-8 bytes, e.g. '/I ♥ Django/' or
b'/I \xe2\x99\xa5 Django/') and return a string containing the encoded
result with ASCII chars only (e.g. '/I%20%E2%99%A5%20Django/').
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.parse.quote() already considers all
# but the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
elif isinstance(iri, Promise):
iri = str(iri)
return quote(iri, safe="/#%[]=:;$&()+,!?*@'~")
# List of byte values that uri_to_iri() decodes from percent encoding.
# First, the unreserved characters from RFC 3986:
_ascii_ranges = [[45, 46, 95, 126], range(65, 91), range(97, 123)]
_hextobyte = {
(fmt % char).encode(): bytes((char,))
for ascii_range in _ascii_ranges
for char in ascii_range
for fmt in ["%02x", "%02X"]
}
# And then everything above 128, because bytes ≥ 128 are part of multibyte
# Unicode characters.
_hexdig = "0123456789ABCDEFabcdef"
_hextobyte.update(
{(a + b).encode(): bytes.fromhex(a + b) for a in _hexdig[8:] for b in _hexdig}
)
def uri_to_iri(uri):
"""
Convert a Uniform Resource Identifier(URI) into an Internationalized
Resource Identifier(IRI).
This is the algorithm from section 3.2 of RFC 3987, excluding step 4.
Take an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and return
a string containing the encoded result (e.g. '/I%20♥%20Django/').
"""
if uri is None:
return uri
uri = force_bytes(uri)
# Fast selective unquote: First, split on '%' and then starting with the
# second block, decode the first 2 bytes if they represent a hex code to
# decode. The rest of the block is the part after '%AB', not containing
# any '%'. Add that to the output without further processing.
bits = uri.split(b"%")
if len(bits) == 1:
iri = uri
else:
parts = [bits[0]]
append = parts.append
hextobyte = _hextobyte
for item in bits[1:]:
hex = item[:2]
if hex in hextobyte:
append(hextobyte[item[:2]])
append(item[2:])
else:
append(b"%")
append(item)
iri = b"".join(parts)
return repercent_broken_unicode(iri).decode()
def escape_uri_path(path):
"""
Escape the unsafe characters from the path portion of a Uniform Resource
Identifier (URI).
"""
# These are the "reserved" and "unreserved" characters specified in
# sections 2.2 and 2.3 of RFC 2396:
# reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
# unreserved = alphanum | mark
# mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")"
# The list of safe characters here is constructed subtracting ";", "=",
# and "?" according to section 3.3 of RFC 2396.
# The reason for not subtracting and escaping "/" is that we are escaping
# the entire path, not a path segment.
return quote(path, safe="/:@&+$,-_.!~*'()")
def punycode(domain):
"""Return the Punycode of the given domain if it's non-ASCII."""
return domain.encode("idna").decode("ascii")
def repercent_broken_unicode(path):
"""
As per section 3.2 of RFC 3987, step three of converting a URI into an IRI,
repercent-encode any octet produced that is not part of a strictly legal
UTF-8 octet sequence.
"""
while True:
try:
path.decode()
except UnicodeDecodeError as e:
# CVE-2019-14235: A recursion shouldn't be used since the exception
# handling uses massive amounts of memory
repercent = quote(path[e.start : e.end], safe=b"/#%[]=:;$&()+,!?*@'~")
path = path[: e.start] + repercent.encode() + path[e.end :]
else:
return path
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
Encode certain chars that would normally be recognized as special chars
for URIs. Do not encode the ' character, as it is a valid character
within URIs. See the encodeURIComponent() JavaScript function for details.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(str(path).replace("\\", "/"), safe="/~!*()'")
def get_system_encoding():
"""
The encoding for the character type functions. Fallback to 'ascii' if the
#encoding is unsupported by Python or could not be determined. See tickets
#10335 and #5846.
"""
try:
encoding = locale.getlocale()[1] or "ascii"
codecs.lookup(encoding)
except Exception:
encoding = "ascii"
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
|
c74ab461890a2d7de638a78633d5551991790c29432b997d2131203627cf610e | import copy
import itertools
import operator
import warnings
from functools import total_ordering, wraps
class cached_property:
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
A cached property can be made out of an existing method:
(e.g. ``url = cached_property(get_absolute_url)``).
"""
name = None
@staticmethod
def func(instance):
raise TypeError(
"Cannot use cached_property instance without calling "
"__set_name__() on it."
)
def __init__(self, func, name=None):
from django.utils.deprecation import RemovedInDjango50Warning
if name is not None:
warnings.warn(
"The name argument is deprecated as it's unnecessary as of "
"Python 3.6.",
RemovedInDjango50Warning,
stacklevel=2,
)
self.real_func = func
self.__doc__ = getattr(func, "__doc__")
def __set_name__(self, owner, name):
if self.name is None:
self.name = name
self.func = self.real_func
elif name != self.name:
raise TypeError(
"Cannot assign the same cached_property to two different names "
"(%r and %r)." % (self.name, name)
)
def __get__(self, instance, cls=None):
"""
Call the function and put the return value in instance.__dict__ so that
subsequent attribute access on the instance returns the cached value
instead of calling cached_property.__get__().
"""
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
class classproperty:
"""
Decorator that converts a method with a single cls argument into a property
that can be accessed directly from the class.
"""
def __init__(self, method=None):
self.fget = method
def __get__(self, instance, cls=None):
return self.fget(cls)
def getter(self, method):
self.fget = method
return self
class Promise:
"""
Base class for the proxy class created in the closure of the lazy function.
It's used to recognize promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turn any callable into a lazy evaluated callable. result classes or types
is required -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__prepared = False
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if not self.__prepared:
self.__prepare_class__()
self.__class__.__prepared = True
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses,
)
def __repr__(self):
return repr(self.__cast())
@classmethod
def __prepare_class__(cls):
for resultclass in resultclasses:
for type_ in resultclass.mro():
for method_name in type_.__dict__:
# All __promise__ return the same wrapper method, they
# look up the correct implementation when called.
if hasattr(cls, method_name):
continue
meth = cls.__promise__(method_name)
setattr(cls, method_name, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = str in resultclasses
if cls._delegate_bytes and cls._delegate_text:
raise ValueError(
"Cannot call lazy() with both bytes and text return types."
)
if cls._delegate_text:
cls.__str__ = cls.__text_cast
elif cls._delegate_bytes:
cls.__bytes__ = cls.__bytes_cast
@classmethod
def __promise__(cls, method_name):
# Builds a wrapper around some magic method
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
return getattr(res, method_name)(*args, **kw)
return __wrapper__
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __bytes_cast_encoded(self):
return func(*self.__args, **self.__kw).encode()
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __str__(self):
# object defines __str__(), so __prepare_class__() won't overload
# a __str__() method from the proxied class.
return str(self.__cast())
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_text:
return str(self) % rhs
return self.__cast() % rhs
def __add__(self, other):
return self.__cast() + other
def __radd__(self, other):
return other + self.__cast()
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def lazystr(text):
"""
Shortcut for the common case of a lazy callable that returns str.
"""
return lazy(str, str)(text)
def keep_lazy(*resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
if not resultclasses:
raise TypeError("You must pass at least one argument to keep_lazy().")
def decorator(func):
lazy_func = lazy(func, *resultclasses)
@wraps(func)
def wrapper(*args, **kwargs):
if any(
isinstance(arg, Promise)
for arg in itertools.chain(args, kwargs.values())
):
return lazy_func(*args, **kwargs)
return func(*args, **kwargs)
return wrapper
return decorator
def keep_lazy_text(func):
"""
A decorator for functions that accept lazy arguments and return text.
"""
return keep_lazy(str)(func)
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if (_wrapped := self._wrapped) is empty:
self._setup()
_wrapped = self._wrapped
return func(_wrapped, *args)
inner._mask_wrapped = False
return inner
class LazyObject:
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
# Avoid infinite recursion when tracing __init__ (#19456).
_wrapped = None
def __init__(self):
# Note: if a subclass overrides __init__(), it will likely need to
# override __copy__() and __deepcopy__() as well.
self._wrapped = empty
def __getattribute__(self, name):
if name == "_wrapped":
# Avoid recursion when getting wrapped object.
return super().__getattribute__(name)
value = super().__getattribute__(name)
# If attribute is a proxy method, raise an AttributeError to call
# __getattr__() and use the wrapped object method.
if not getattr(value, "_mask_wrapped", True):
raise AttributeError
return value
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialize the wrapped object.
"""
raise NotImplementedError(
"subclasses of LazyObject must provide a _setup() method"
)
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. We're going to have to initialize the wrapped
# object to successfully pickle it, so we might as well just pickle the
# wrapped object since they're supposed to act the same way.
#
# Unfortunately, if we try to simply act like the wrapped object, the ruse
# will break down when pickle gets our id(). Thus we end up with pickle
# thinking, in effect, that we are a distinct object from the wrapped
# object, but with the same __dict__. This can cause problems (see #25389).
#
# So instead, we define our own __reduce__ method and custom unpickler. We
# pickle the wrapped object as the unpickler's argument, so that pickle
# will pickle it normally, and then the unpickler simply returns its
# argument.
def __reduce__(self):
if self._wrapped is empty:
self._setup()
return (unpickle_lazyobject, (self._wrapped,))
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use type(self), not
# self.__class__, because the latter is proxied.
return type(self)()
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use type(self), not self.__class__, because the
# latter is proxied.
result = type(self)()
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
__bool__ = new_method_proxy(bool)
# Introspection support
__dir__ = new_method_proxy(dir)
# Need to pretend to be the wrapped class, for the sake of objects that
# care about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__lt__ = new_method_proxy(operator.lt)
__gt__ = new_method_proxy(operator.gt)
__ne__ = new_method_proxy(operator.ne)
__hash__ = new_method_proxy(hash)
# List/Tuple/Dictionary methods support
__getitem__ = new_method_proxy(operator.getitem)
__setitem__ = new_method_proxy(operator.setitem)
__delitem__ = new_method_proxy(operator.delitem)
__iter__ = new_method_proxy(iter)
__len__ = new_method_proxy(len)
__contains__ = new_method_proxy(operator.contains)
def unpickle_lazyobject(wrapped):
"""
Used to unpickle lazy objects. Just return its argument, which will be the
wrapped object.
"""
return wrapped
class SimpleLazyObject(LazyObject):
"""
A lazy object initialized from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__["_setupfunc"] = func
super().__init__()
def _setup(self):
self._wrapped = self._setupfunc()
# Return a meaningful representation of the lazy object for debugging
# without evaluating the wrapped object.
def __repr__(self):
if self._wrapped is empty:
repr_attr = self._setupfunc
else:
repr_attr = self._wrapped
return "<%s: %r>" % (type(self).__name__, repr_attr)
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use SimpleLazyObject, not
# self.__class__, because the latter is proxied.
return SimpleLazyObject(self._setupfunc)
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
__add__ = new_method_proxy(operator.add)
@new_method_proxy
def __radd__(self, other):
return other + self
def partition(predicate, values):
"""
Split the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
|
f8d8606746fe576189adb71e6951ddc9bc96b1413253b3ea4e79af53d6a744af | import datetime
import io
import json
import mimetypes
import os
import re
import sys
import time
from email.header import Header
from http.client import responses
from urllib.parse import quote, urlparse
from django.conf import settings
from django.core import signals, signing
from django.core.exceptions import DisallowedRedirect
from django.core.serializers.json import DjangoJSONEncoder
from django.http.cookie import SimpleCookie
from django.utils import timezone
from django.utils.datastructures import CaseInsensitiveMapping
from django.utils.encoding import iri_to_uri
from django.utils.http import http_date
from django.utils.regex_helper import _lazy_re_compile
_charset_from_content_type_re = _lazy_re_compile(
r";\s*charset=(?P<charset>[^\s;]+)", re.I
)
class ResponseHeaders(CaseInsensitiveMapping):
def __init__(self, data):
"""
Populate the initial data using __setitem__ to ensure values are
correctly encoded.
"""
self._store = {}
if data:
for header, value in self._unpack_items(data):
self[header] = value
def _convert_to_charset(self, value, charset, mime_encode=False):
"""
Convert headers key/value to ascii/latin-1 native strings.
`charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and
`value` can't be represented in the given charset, apply MIME-encoding.
"""
try:
if isinstance(value, str):
# Ensure string is valid in given charset
value.encode(charset)
elif isinstance(value, bytes):
# Convert bytestring using given charset
value = value.decode(charset)
else:
value = str(value)
# Ensure string is valid in given charset.
value.encode(charset)
if "\n" in value or "\r" in value:
raise BadHeaderError(
f"Header values can't contain newlines (got {value!r})"
)
except UnicodeError as e:
# Encoding to a string of the specified charset failed, but we
# don't know what type that value was, or if it contains newlines,
# which we may need to check for before sending it to be
# encoded for multiple character sets.
if (isinstance(value, bytes) and (b"\n" in value or b"\r" in value)) or (
isinstance(value, str) and ("\n" in value or "\r" in value)
):
raise BadHeaderError(
f"Header values can't contain newlines (got {value!r})"
) from e
if mime_encode:
value = Header(value, "utf-8", maxlinelen=sys.maxsize).encode()
else:
e.reason += ", HTTP response headers must be in %s format" % charset
raise
return value
def __delitem__(self, key):
self.pop(key)
def __setitem__(self, key, value):
key = self._convert_to_charset(key, "ascii")
value = self._convert_to_charset(value, "latin-1", mime_encode=True)
self._store[key.lower()] = (key, value)
def pop(self, key, default=None):
return self._store.pop(key.lower(), default)
def setdefault(self, key, value):
if key not in self:
self[key] = value
class BadHeaderError(ValueError):
pass
class HttpResponseBase:
"""
An HTTP response base class with dictionary-accessed headers.
This class doesn't handle content. It should not be used directly.
Use the HttpResponse and StreamingHttpResponse subclasses instead.
"""
status_code = 200
def __init__(
self, content_type=None, status=None, reason=None, charset=None, headers=None
):
self.headers = ResponseHeaders(headers)
self._charset = charset
if "Content-Type" not in self.headers:
if content_type is None:
content_type = f"text/html; charset={self.charset}"
self.headers["Content-Type"] = content_type
elif content_type:
raise ValueError(
"'headers' must not contain 'Content-Type' when the "
"'content_type' parameter is provided."
)
self._resource_closers = []
# This parameter is set by the handler. It's necessary to preserve the
# historical behavior of request_finished.
self._handler_class = None
self.cookies = SimpleCookie()
self.closed = False
if status is not None:
try:
self.status_code = int(status)
except (ValueError, TypeError):
raise TypeError("HTTP status code must be an integer.")
if not 100 <= self.status_code <= 599:
raise ValueError("HTTP status code must be an integer from 100 to 599.")
self._reason_phrase = reason
@property
def reason_phrase(self):
if self._reason_phrase is not None:
return self._reason_phrase
# Leave self._reason_phrase unset in order to use the default
# reason phrase for status code.
return responses.get(self.status_code, "Unknown Status Code")
@reason_phrase.setter
def reason_phrase(self, value):
self._reason_phrase = value
@property
def charset(self):
if self._charset is not None:
return self._charset
# The Content-Type header may not yet be set, because the charset is
# being inserted *into* it.
if content_type := self.headers.get("Content-Type"):
if matched := _charset_from_content_type_re.search(content_type):
# Extract the charset and strip its double quotes.
# Note that having parsed it from the Content-Type, we don't
# store it back into the _charset for later intentionally, to
# allow for the Content-Type to be switched again later.
return matched["charset"].replace('"', "")
return settings.DEFAULT_CHARSET
@charset.setter
def charset(self, value):
self._charset = value
def serialize_headers(self):
"""HTTP headers as a bytestring."""
return b"\r\n".join(
[
key.encode("ascii") + b": " + value.encode("latin-1")
for key, value in self.headers.items()
]
)
__bytes__ = serialize_headers
@property
def _content_type_for_repr(self):
return (
', "%s"' % self.headers["Content-Type"]
if "Content-Type" in self.headers
else ""
)
def __setitem__(self, header, value):
self.headers[header] = value
def __delitem__(self, header):
del self.headers[header]
def __getitem__(self, header):
return self.headers[header]
def has_header(self, header):
"""Case-insensitive check for a header."""
return header in self.headers
__contains__ = has_header
def items(self):
return self.headers.items()
def get(self, header, alternate=None):
return self.headers.get(header, alternate)
def set_cookie(
self,
key,
value="",
max_age=None,
expires=None,
path="/",
domain=None,
secure=False,
httponly=False,
samesite=None,
):
"""
Set a cookie.
``expires`` can be:
- a string in the correct format,
- a naive ``datetime.datetime`` object in UTC,
- an aware ``datetime.datetime`` object in any time zone.
If it is a ``datetime.datetime`` object then calculate ``max_age``.
``max_age`` can be:
- int/float specifying seconds,
- ``datetime.timedelta`` object.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
if timezone.is_naive(expires):
expires = timezone.make_aware(expires, timezone.utc)
delta = expires - datetime.datetime.now(tz=timezone.utc)
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
if max_age is not None:
raise ValueError("'expires' and 'max_age' can't be used together.")
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]["expires"] = expires
else:
self.cookies[key]["expires"] = ""
if max_age is not None:
if isinstance(max_age, datetime.timedelta):
max_age = max_age.total_seconds()
self.cookies[key]["max-age"] = int(max_age)
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]["expires"] = http_date(time.time() + max_age)
if path is not None:
self.cookies[key]["path"] = path
if domain is not None:
self.cookies[key]["domain"] = domain
if secure:
self.cookies[key]["secure"] = True
if httponly:
self.cookies[key]["httponly"] = True
if samesite:
if samesite.lower() not in ("lax", "none", "strict"):
raise ValueError('samesite must be "lax", "none", or "strict".')
self.cookies[key]["samesite"] = samesite
def setdefault(self, key, value):
"""Set a header unless it has already been set."""
self.headers.setdefault(key, value)
def set_signed_cookie(self, key, value, salt="", **kwargs):
value = signing.get_cookie_signer(salt=key + salt).sign(value)
return self.set_cookie(key, value, **kwargs)
def delete_cookie(self, key, path="/", domain=None, samesite=None):
# Browsers can ignore the Set-Cookie header if the cookie doesn't use
# the secure flag and:
# - the cookie name starts with "__Host-" or "__Secure-", or
# - the samesite is "none".
secure = key.startswith(("__Secure-", "__Host-")) or (
samesite and samesite.lower() == "none"
)
self.set_cookie(
key,
max_age=0,
path=path,
domain=domain,
secure=secure,
expires="Thu, 01 Jan 1970 00:00:00 GMT",
samesite=samesite,
)
# Common methods used by subclasses
def make_bytes(self, value):
"""Turn a value into a bytestring encoded in the output charset."""
# Per PEP 3333, this response body must be bytes. To avoid returning
# an instance of a subclass, this function returns `bytes(value)`.
# This doesn't make a copy when `value` already contains bytes.
# Handle string types -- we can't rely on force_bytes here because:
# - Python attempts str conversion first
# - when self._charset != 'utf-8' it re-encodes the content
if isinstance(value, (bytes, memoryview)):
return bytes(value)
if isinstance(value, str):
return bytes(value.encode(self.charset))
# Handle non-string types.
return str(value).encode(self.charset)
# These methods partially implement the file-like object interface.
# See https://docs.python.org/library/io.html#io.IOBase
# The WSGI server must call this method upon completion of the request.
# See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
def close(self):
for closer in self._resource_closers:
try:
closer()
except Exception:
pass
# Free resources that were still referenced.
self._resource_closers.clear()
self.closed = True
signals.request_finished.send(sender=self._handler_class)
def write(self, content):
raise OSError("This %s instance is not writable" % self.__class__.__name__)
def flush(self):
pass
def tell(self):
raise OSError(
"This %s instance cannot tell its position" % self.__class__.__name__
)
# These methods partially implement a stream-like object interface.
# See https://docs.python.org/library/io.html#io.IOBase
def readable(self):
return False
def seekable(self):
return False
def writable(self):
return False
def writelines(self, lines):
raise OSError("This %s instance is not writable" % self.__class__.__name__)
class HttpResponse(HttpResponseBase):
"""
An HTTP response class with a string as content.
This content can be read, appended to, or replaced.
"""
streaming = False
def __init__(self, content=b"", *args, **kwargs):
super().__init__(*args, **kwargs)
# Content is a bytestring. See the `content` property methods.
self.content = content
def __repr__(self):
return "<%(cls)s status_code=%(status_code)d%(content_type)s>" % {
"cls": self.__class__.__name__,
"status_code": self.status_code,
"content_type": self._content_type_for_repr,
}
def serialize(self):
"""Full HTTP message, including headers, as a bytestring."""
return self.serialize_headers() + b"\r\n\r\n" + self.content
__bytes__ = serialize
@property
def content(self):
return b"".join(self._container)
@content.setter
def content(self, value):
# Consume iterators upon assignment to allow repeated iteration.
if hasattr(value, "__iter__") and not isinstance(
value, (bytes, memoryview, str)
):
content = b"".join(self.make_bytes(chunk) for chunk in value)
if hasattr(value, "close"):
try:
value.close()
except Exception:
pass
else:
content = self.make_bytes(value)
# Create a list of properly encoded bytestrings to support write().
self._container = [content]
def __iter__(self):
return iter(self._container)
def write(self, content):
self._container.append(self.make_bytes(content))
def tell(self):
return len(self.content)
def getvalue(self):
return self.content
def writable(self):
return True
def writelines(self, lines):
for line in lines:
self.write(line)
class StreamingHttpResponse(HttpResponseBase):
"""
A streaming HTTP response class with an iterator as content.
This should only be iterated once, when the response is streamed to the
client. However, it can be appended to or replaced with a new iterator
that wraps the original content (or yields entirely new content).
"""
streaming = True
def __init__(self, streaming_content=(), *args, **kwargs):
super().__init__(*args, **kwargs)
# `streaming_content` should be an iterable of bytestrings.
# See the `streaming_content` property methods.
self.streaming_content = streaming_content
def __repr__(self):
return "<%(cls)s status_code=%(status_code)d%(content_type)s>" % {
"cls": self.__class__.__qualname__,
"status_code": self.status_code,
"content_type": self._content_type_for_repr,
}
@property
def content(self):
raise AttributeError(
"This %s instance has no `content` attribute. Use "
"`streaming_content` instead." % self.__class__.__name__
)
@property
def streaming_content(self):
return map(self.make_bytes, self._iterator)
@streaming_content.setter
def streaming_content(self, value):
self._set_streaming_content(value)
def _set_streaming_content(self, value):
# Ensure we can never iterate on "value" more than once.
self._iterator = iter(value)
if hasattr(value, "close"):
self._resource_closers.append(value.close)
def __iter__(self):
return self.streaming_content
def getvalue(self):
return b"".join(self.streaming_content)
class FileResponse(StreamingHttpResponse):
"""
A streaming HTTP response class optimized for files.
"""
block_size = 4096
def __init__(self, *args, as_attachment=False, filename="", **kwargs):
self.as_attachment = as_attachment
self.filename = filename
self._no_explicit_content_type = (
"content_type" not in kwargs or kwargs["content_type"] is None
)
super().__init__(*args, **kwargs)
def _set_streaming_content(self, value):
if not hasattr(value, "read"):
self.file_to_stream = None
return super()._set_streaming_content(value)
self.file_to_stream = filelike = value
if hasattr(filelike, "close"):
self._resource_closers.append(filelike.close)
value = iter(lambda: filelike.read(self.block_size), b"")
self.set_headers(filelike)
super()._set_streaming_content(value)
def set_headers(self, filelike):
"""
Set some common response headers (Content-Length, Content-Type, and
Content-Disposition) based on the `filelike` response content.
"""
filename = getattr(filelike, "name", "")
filename = filename if isinstance(filename, str) else ""
seekable = hasattr(filelike, "seek") and (
not hasattr(filelike, "seekable") or filelike.seekable()
)
if hasattr(filelike, "tell"):
if seekable:
initial_position = filelike.tell()
filelike.seek(0, io.SEEK_END)
self.headers["Content-Length"] = filelike.tell() - initial_position
filelike.seek(initial_position)
elif hasattr(filelike, "getbuffer"):
self.headers["Content-Length"] = (
filelike.getbuffer().nbytes - filelike.tell()
)
elif os.path.exists(filename):
self.headers["Content-Length"] = (
os.path.getsize(filename) - filelike.tell()
)
elif seekable:
self.headers["Content-Length"] = sum(
iter(lambda: len(filelike.read(self.block_size)), 0)
)
filelike.seek(-int(self.headers["Content-Length"]), io.SEEK_END)
filename = os.path.basename(self.filename or filename)
if self._no_explicit_content_type:
if filename:
content_type, encoding = mimetypes.guess_type(filename)
# Encoding isn't set to prevent browsers from automatically
# uncompressing files.
content_type = {
"bzip2": "application/x-bzip",
"gzip": "application/gzip",
"xz": "application/x-xz",
}.get(encoding, content_type)
self.headers["Content-Type"] = (
content_type or "application/octet-stream"
)
else:
self.headers["Content-Type"] = "application/octet-stream"
if filename:
disposition = "attachment" if self.as_attachment else "inline"
try:
filename.encode("ascii")
file_expr = 'filename="{}"'.format(filename)
except UnicodeEncodeError:
file_expr = "filename*=utf-8''{}".format(quote(filename))
self.headers["Content-Disposition"] = "{}; {}".format(
disposition, file_expr
)
elif self.as_attachment:
self.headers["Content-Disposition"] = "attachment"
class HttpResponseRedirectBase(HttpResponse):
allowed_schemes = ["http", "https", "ftp"]
def __init__(self, redirect_to, *args, **kwargs):
super().__init__(*args, **kwargs)
self["Location"] = iri_to_uri(redirect_to)
parsed = urlparse(str(redirect_to))
if parsed.scheme and parsed.scheme not in self.allowed_schemes:
raise DisallowedRedirect(
"Unsafe redirect to URL with protocol '%s'" % parsed.scheme
)
url = property(lambda self: self["Location"])
def __repr__(self):
return (
'<%(cls)s status_code=%(status_code)d%(content_type)s, url="%(url)s">'
% {
"cls": self.__class__.__name__,
"status_code": self.status_code,
"content_type": self._content_type_for_repr,
"url": self.url,
}
)
class HttpResponseRedirect(HttpResponseRedirectBase):
status_code = 302
class HttpResponsePermanentRedirect(HttpResponseRedirectBase):
status_code = 301
class HttpResponseNotModified(HttpResponse):
status_code = 304
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
del self["content-type"]
@HttpResponse.content.setter
def content(self, value):
if value:
raise AttributeError(
"You cannot set content to a 304 (Not Modified) response"
)
self._container = []
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods, *args, **kwargs):
super().__init__(*args, **kwargs)
self["Allow"] = ", ".join(permitted_methods)
def __repr__(self):
return "<%(cls)s [%(methods)s] status_code=%(status_code)d%(content_type)s>" % {
"cls": self.__class__.__name__,
"status_code": self.status_code,
"content_type": self._content_type_for_repr,
"methods": self["Allow"],
}
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
class Http404(Exception):
pass
class JsonResponse(HttpResponse):
"""
An HTTP response class that consumes data to be serialized to JSON.
:param data: Data to be dumped into json. By default only ``dict`` objects
are allowed to be passed due to a security flaw before ECMAScript 5. See
the ``safe`` parameter for more information.
:param encoder: Should be a json encoder class. Defaults to
``django.core.serializers.json.DjangoJSONEncoder``.
:param safe: Controls if only ``dict`` objects may be serialized. Defaults
to ``True``.
:param json_dumps_params: A dictionary of kwargs passed to json.dumps().
"""
def __init__(
self,
data,
encoder=DjangoJSONEncoder,
safe=True,
json_dumps_params=None,
**kwargs,
):
if safe and not isinstance(data, dict):
raise TypeError(
"In order to allow non-dict objects to be serialized set the "
"safe parameter to False."
)
if json_dumps_params is None:
json_dumps_params = {}
kwargs.setdefault("content_type", "application/json")
data = json.dumps(data, cls=encoder, **json_dumps_params)
super().__init__(content=data, **kwargs)
|
f02957a7092ed98681f14809808c25bf17211da2a50cc9d1826f370512b4c875 | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import operator
import warnings
from itertools import chain, islice
import django
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY,
IntegrityError,
NotSupportedError,
connections,
router,
transaction,
)
from django.db.models import AutoField, DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP, OnConflict
from django.db.models.deletion import Collector
from django.db.models.expressions import Case, F, Ref, Value, When
from django.db.models.functions import Cast, Trunc
from django.db.models.query_utils import FilteredRelation, Q
from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
from django.db.models.utils import create_namedtuple_class, resolve_callables
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property, partition
# The maximum number of results to fetch in a get() query.
MAX_GET_RESULTS = 21
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
class BaseIterable:
def __init__(
self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
self.chunk_size = chunk_size
class ModelIterable(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
)
select, klass_info, annotation_col_map = (
compiler.select,
compiler.klass_info,
compiler.annotation_col_map,
)
model_cls = klass_info["model"]
select_fields = klass_info["select_fields"]
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [
f[0].target.attname for f in select[model_fields_start:model_fields_end]
]
related_populators = get_related_populators(klass_info, select, db)
known_related_objects = [
(
field,
related_objs,
operator.attrgetter(
*[
field.attname
if from_field == "self"
else queryset.model._meta.get_field(from_field).attname
for from_field in field.from_fields
]
),
)
for field, related_objs in queryset._known_related_objects.items()
]
for row in compiler.results_iter(results):
obj = model_cls.from_db(
db, init_list, row[model_fields_start:model_fields_end]
)
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model.
for field, rel_objs, rel_getter in known_related_objects:
# Avoid overwriting objects loaded by, e.g., select_related().
if field.is_cached(obj):
continue
rel_obj_id = rel_getter(obj)
try:
rel_obj = rel_objs[rel_obj_id]
except KeyError:
pass # May happen in qs1 | qs2 scenarios.
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
indexes = range(len(names))
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield {names[i]: row[i] for i in indexes}
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if queryset._fields:
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
fields = [
*queryset._fields,
*(f for f in query.annotation_select if f not in queryset._fields),
]
if fields != names:
# Reorder according to fields.
index_map = {name: idx for idx, name in enumerate(names)}
rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
return map(
rowfactory,
compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
),
)
return compiler.results_iter(
tuple_expected=True,
chunked_fetch=self.chunked_fetch,
chunk_size=self.chunk_size,
)
class NamedValuesListIterable(ValuesListIterable):
"""
Iterable returned by QuerySet.values_list(named=True) that yields a
namedtuple for each row.
"""
def __iter__(self):
queryset = self.queryset
if queryset._fields:
names = queryset._fields
else:
query = queryset.query
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
tuple_class = create_namedtuple_class(*names)
new = tuple.__new__
for row in super().__iter__():
yield new(tuple_class, row)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield row[0]
class QuerySet:
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self._query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
self._defer_next_filter = False
self._deferred_filter = None
@property
def query(self):
if self._deferred_filter:
negate, args, kwargs = self._deferred_filter
self._filter_or_exclude_inplace(negate, args, kwargs)
self._deferred_filter = None
return self._query
@query.setter
def query(self, value):
if value.values_select:
self._iterable_class = ValuesIterable
self._query = value
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == "_result_cache":
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__}
def __setstate__(self, state):
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
if pickled_version != django.__version__:
warnings.warn(
"Pickled queryset instance's Django version %s does not "
"match the current version %s."
% (pickled_version, django.__version__),
RuntimeWarning,
stacklevel=2,
)
else:
warnings.warn(
"Pickled queryset instance's Django version is not specified.",
RuntimeWarning,
stacklevel=2,
)
self.__dict__.update(state)
def __repr__(self):
data = list(self[: REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return "<%s %r>" % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError(
"QuerySet indices must be integers or slices, not %s."
% type(k).__name__
)
if (isinstance(k, int) and k < 0) or (
isinstance(k, slice)
and (
(k.start is not None and k.start < 0)
or (k.stop is not None and k.stop < 0)
)
):
raise ValueError("Negative indexing is not supported.")
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[:: k.step] if k.step else qs
qs = self._chain()
qs.query.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0]
def __class_getitem__(cls, *args, **kwargs):
return cls
def __and__(self, other):
self._check_operator_queryset(other, "&")
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._chain()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._check_operator_queryset(other, "|")
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = (
self
if self.query.can_filter()
else self.model._base_manager.filter(pk__in=self.values("pk"))
)
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values("pk"))
combined.query.combine(other.query, sql.OR)
return combined
def __xor__(self, other):
self._check_operator_queryset(other, "^")
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = (
self
if self.query.can_filter()
else self.model._base_manager.filter(pk__in=self.values("pk"))
)
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values("pk"))
combined.query.combine(other.query, sql.XOR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def _iterator(self, use_chunked_fetch, chunk_size):
iterable = self._iterable_class(
self,
chunked_fetch=use_chunked_fetch,
chunk_size=chunk_size or 2000,
)
if not self._prefetch_related_lookups or chunk_size is None:
yield from iterable
return
iterator = iter(iterable)
while results := list(islice(iterator, chunk_size)):
prefetch_related_objects(results, *self._prefetch_related_lookups)
yield from results
def iterator(self, chunk_size=None):
"""
An iterator over the results from applying this QuerySet to the
database. chunk_size must be provided for QuerySets that prefetch
related objects. Otherwise, a default chunk_size of 2000 is supplied.
"""
if chunk_size is None:
if self._prefetch_related_lookups:
# When the deprecation ends, replace with:
# raise ValueError(
# 'chunk_size must be provided when using '
# 'QuerySet.iterator() after prefetch_related().'
# )
warnings.warn(
"Using QuerySet.iterator() after prefetch_related() "
"without specifying chunk_size is deprecated.",
category=RemovedInDjango50Warning,
stacklevel=2,
)
elif chunk_size <= 0:
raise ValueError("Chunk size must be strictly positive.")
use_chunked_fetch = not connections[self.db].settings_dict.get(
"DISABLE_SERVER_SIDE_CURSORS"
)
return self._iterator(use_chunked_fetch, chunk_size)
def aggregate(self, *args, **kwargs):
"""
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions(
(*args, *kwargs.values()), method_name="aggregate"
)
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.chain()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
annotation = query.annotations[alias]
if not annotation.contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
for expr in annotation.get_source_expressions():
if (
expr.contains_aggregate
and isinstance(expr, Ref)
and expr.refs in kwargs
):
name = expr.refs
raise exceptions.FieldError(
"Cannot compute %s('%s'): '%s' is an aggregate"
% (annotation.name, name, name)
)
return query.get_aggregation(self.db, kwargs)
def count(self):
"""
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Perform the query and return a single object matching the given
keyword arguments.
"""
if self.query.combinator and (args or kwargs):
raise NotSupportedError(
"Calling QuerySet.get(...) with filters after %s() is not "
"supported." % self.query.combinator
)
clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
limit = None
if (
not clone.query.select_for_update
or connections[clone.db].features.supports_select_for_update_with_limit
):
limit = MAX_GET_RESULTS
clone.query.set_limits(high=limit)
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." % self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!"
% (
self.model._meta.object_name,
num if not limit or num < limit else "more than %s" % (limit - 1),
)
)
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _prepare_for_bulk_create(self, objs):
for obj in objs:
if obj.pk is None:
# Populate new PK values.
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
obj._prepare_related_fields_for_save(operation_name="bulk_create")
def _check_bulk_create_options(
self, ignore_conflicts, update_conflicts, update_fields, unique_fields
):
if ignore_conflicts and update_conflicts:
raise ValueError(
"ignore_conflicts and update_conflicts are mutually exclusive."
)
db_features = connections[self.db].features
if ignore_conflicts:
if not db_features.supports_ignore_conflicts:
raise NotSupportedError(
"This database backend does not support ignoring conflicts."
)
return OnConflict.IGNORE
elif update_conflicts:
if not db_features.supports_update_conflicts:
raise NotSupportedError(
"This database backend does not support updating conflicts."
)
if not update_fields:
raise ValueError(
"Fields that will be updated when a row insertion fails "
"on conflicts must be provided."
)
if unique_fields and not db_features.supports_update_conflicts_with_target:
raise NotSupportedError(
"This database backend does not support updating "
"conflicts with specifying unique fields that can trigger "
"the upsert."
)
if not unique_fields and db_features.supports_update_conflicts_with_target:
raise ValueError(
"Unique fields that can trigger the upsert must be provided."
)
# Updating primary keys and non-concrete fields is forbidden.
update_fields = [self.model._meta.get_field(name) for name in update_fields]
if any(not f.concrete or f.many_to_many for f in update_fields):
raise ValueError(
"bulk_create() can only be used with concrete fields in "
"update_fields."
)
if any(f.primary_key for f in update_fields):
raise ValueError(
"bulk_create() cannot be used with primary keys in "
"update_fields."
)
if unique_fields:
# Primary key is allowed in unique_fields.
unique_fields = [
self.model._meta.get_field(name)
for name in unique_fields
if name != "pk"
]
if any(not f.concrete or f.many_to_many for f in unique_fields):
raise ValueError(
"bulk_create() can only be used with concrete fields "
"in unique_fields."
)
return OnConflict.UPDATE
return None
def bulk_create(
self,
objs,
batch_size=None,
ignore_conflicts=False,
update_conflicts=False,
update_fields=None,
unique_fields=None,
):
"""
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if features.can_return_rows_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_rows_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
if batch_size is not None and batch_size <= 0:
raise ValueError("Batch size must be a positive integer.")
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
on_conflict = self._check_bulk_create_options(
ignore_conflicts,
update_conflicts,
update_fields,
unique_fields,
)
self._for_write = True
opts = self.model._meta
fields = opts.concrete_fields
objs = list(objs)
self._prepare_for_bulk_create(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
returned_columns = self._batched_insert(
objs_with_pk,
fields,
batch_size,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
for obj_with_pk, results in zip(objs_with_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
if field != opts.pk:
setattr(obj_with_pk, field.attname, result)
for obj_with_pk in objs_with_pk:
obj_with_pk._state.adding = False
obj_with_pk._state.db = self.db
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
returned_columns = self._batched_insert(
objs_without_pk,
fields,
batch_size,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
connection = connections[self.db]
if (
connection.features.can_return_rows_from_bulk_insert
and on_conflict is None
):
assert len(returned_columns) == len(objs_without_pk)
for obj_without_pk, results in zip(objs_without_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
setattr(obj_without_pk, field.attname, result)
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def bulk_update(self, objs, fields, batch_size=None):
"""
Update the given fields in each of the given objects in the database.
"""
if batch_size is not None and batch_size < 0:
raise ValueError("Batch size must be a positive integer.")
if not fields:
raise ValueError("Field names must be given to bulk_update().")
objs = tuple(objs)
if any(obj.pk is None for obj in objs):
raise ValueError("All bulk_update() objects must have a primary key set.")
fields = [self.model._meta.get_field(name) for name in fields]
if any(not f.concrete or f.many_to_many for f in fields):
raise ValueError("bulk_update() can only be used with concrete fields.")
if any(f.primary_key for f in fields):
raise ValueError("bulk_update() cannot be used with primary key fields.")
if not objs:
return 0
for obj in objs:
obj._prepare_related_fields_for_save(
operation_name="bulk_update", fields=fields
)
# PK is used twice in the resulting update query, once in the filter
# and once in the WHEN. Each field will also have one CAST.
self._for_write = True
connection = connections[self.db]
max_batch_size = connection.ops.bulk_batch_size(["pk", "pk"] + fields, objs)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
requires_casting = connection.features.requires_casted_case_in_updates
batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size))
updates = []
for batch_objs in batches:
update_kwargs = {}
for field in fields:
when_statements = []
for obj in batch_objs:
attr = getattr(obj, field.attname)
if not hasattr(attr, "resolve_expression"):
attr = Value(attr, output_field=field)
when_statements.append(When(pk=obj.pk, then=attr))
case_statement = Case(*when_statements, output_field=field)
if requires_casting:
case_statement = Cast(case_statement, output_field=field)
update_kwargs[field.attname] = case_statement
updates.append(([obj.pk for obj in batch_objs], update_kwargs))
rows_updated = 0
queryset = self.using(self.db)
with transaction.atomic(using=self.db, savepoint=False):
for pks, update_kwargs in updates:
rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs)
return rows_updated
bulk_update.alters_data = True
def get_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
# Try to create an object using passed params.
try:
with transaction.atomic(using=self.db):
params = dict(resolve_callables(params))
return self.create(**params), True
except IntegrityError:
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
pass
raise
def update_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
self._for_write = True
with transaction.atomic(using=self.db):
# Lock the row so that a concurrent update is blocked until
# update_or_create() has performed its save.
obj, created = self.select_for_update().get_or_create(defaults, **kwargs)
if created:
return obj, created
for k, v in resolve_callables(defaults):
setattr(obj, k, v)
obj.save(using=self.db)
return obj, False
def _extract_model_params(self, defaults, **kwargs):
"""
Prepare `params` for creating a model instance based on the given
kwargs; for use by get_or_create().
"""
defaults = defaults or {}
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
property_names = self.model._meta._property_names
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
# It's okay to use a model's property if it has a setter.
if not (param in property_names and getattr(self.model, param).fset):
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'."
% (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
)
)
return params
def _earliest(self, *fields):
"""
Return the earliest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if fields:
order_by = fields
else:
order_by = getattr(self.model._meta, "get_latest_by")
if order_by and not isinstance(order_by, (tuple, list)):
order_by = (order_by,)
if order_by is None:
raise ValueError(
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta."
)
obj = self._chain()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force=True)
obj.query.add_ordering(*order_by)
return obj.get()
def earliest(self, *fields):
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
return self._earliest(*fields)
def latest(self, *fields):
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
return self.reverse()._earliest(*fields)
def first(self):
"""Return the first object of a query or None if no match is found."""
for obj in (self if self.ordered else self.order_by("pk"))[:1]:
return obj
def last(self):
"""Return the last object of a query or None if no match is found."""
for obj in (self.reverse() if self.ordered else self.order_by("-pk"))[:1]:
return obj
def in_bulk(self, id_list=None, *, field_name="pk"):
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
if self.query.is_sliced:
raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().")
opts = self.model._meta
unique_fields = [
constraint.fields[0]
for constraint in opts.total_unique_constraints
if len(constraint.fields) == 1
]
if (
field_name != "pk"
and not opts.get_field(field_name).unique
and field_name not in unique_fields
and self.query.distinct_fields != (field_name,)
):
raise ValueError(
"in_bulk()'s field_name must be a unique field but %r isn't."
% field_name
)
if id_list is not None:
if not id_list:
return {}
filter_key = "{}__in".format(field_name)
batch_size = connections[self.db].features.max_query_params
id_list = tuple(id_list)
# If the database has a limit on the number of query parameters
# (e.g. SQLite), retrieve objects in batches if necessary.
if batch_size and batch_size < len(id_list):
qs = ()
for offset in range(0, len(id_list), batch_size):
batch = id_list[offset : offset + batch_size]
qs += tuple(self.filter(**{filter_key: batch}).order_by())
else:
qs = self.filter(**{filter_key: id_list}).order_by()
else:
qs = self._chain()
return {getattr(obj, field_name): obj for obj in qs}
def delete(self):
"""Delete the records in the current QuerySet."""
self._not_support_combined_queries("delete")
if self.query.is_sliced:
raise TypeError("Cannot use 'limit' or 'offset' with delete().")
if self.query.distinct or self.query.distinct_fields:
raise TypeError("Cannot call delete() after .distinct().")
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._chain()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force=True)
collector = Collector(using=del_query.db, origin=self)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
query = self.query.clone()
query.__class__ = sql.DeleteQuery
cursor = query.get_compiler(using).execute_sql(CURSOR)
if cursor:
with cursor:
return cursor.rowcount
return 0
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
self._not_support_combined_queries("update")
if self.query.is_sliced:
raise TypeError("Cannot update a query once a slice has been taken.")
self._for_write = True
query = self.query.chain(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
with transaction.mark_for_rollback_on_error(using=self.db):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update() that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
if self.query.is_sliced:
raise TypeError("Cannot update a query once a slice has been taken.")
query = self.query.chain(sql.UpdateQuery)
query.add_update_fields(values)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def contains(self, obj):
"""Return True if the queryset contains an object."""
self._not_support_combined_queries("contains")
if self._fields is not None:
raise TypeError(
"Cannot call QuerySet.contains() after .values() or .values_list()."
)
try:
if obj._meta.concrete_model != self.model._meta.concrete_model:
return False
except AttributeError:
raise TypeError("'obj' must be a model instance.")
if obj.pk is None:
raise ValueError("QuerySet.contains() cannot be used on unsaved objects.")
if self._result_cache is not None:
return obj in self._result_cache
return self.filter(pk=obj.pk).exists()
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def explain(self, *, format=None, **options):
return self.query.explain(using=self.db, format=format, **options)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=(), translations=None, using=None):
if using is None:
using = self.db
qs = RawQuerySet(
raw_query,
model=self.model,
params=params,
translations=translations,
using=using,
)
qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
return qs
def _values(self, *fields, **expressions):
clone = self._chain()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False, named=False):
if flat and named:
raise TypeError("'flat' and 'named' can't be used together.")
if flat and len(fields) > 1:
raise TypeError(
"'flat' is not valid when values_list is called with more than one "
"field."
)
field_names = {f for f in fields if not hasattr(f, "resolve_expression")}
_fields = []
expressions = {}
counter = 1
for field in fields:
if hasattr(field, "resolve_expression"):
field_id_prefix = getattr(
field, "default_alias", field.__class__.__name__.lower()
)
while True:
field_id = field_id_prefix + str(counter)
counter += 1
if field_id not in field_names:
break
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = (
NamedValuesListIterable
if named
else FlatValuesListIterable
if flat
else ValuesListIterable
)
return clone
def dates(self, field_name, kind, order="ASC"):
"""
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
if kind not in ("year", "month", "week", "day"):
raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.")
if order not in ("ASC", "DESC"):
raise ValueError("'order' must be either 'ASC' or 'DESC'.")
return (
self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name),
)
.values_list("datefield", flat=True)
.distinct()
.filter(plain_field__isnull=False)
.order_by(("-" if order == "DESC" else "") + "datefield")
)
# RemovedInDjango50Warning: when the deprecation ends, remove is_dst
# argument.
def datetimes(
self, field_name, kind, order="ASC", tzinfo=None, is_dst=timezone.NOT_PASSED
):
"""
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
if kind not in ("year", "month", "week", "day", "hour", "minute", "second"):
raise ValueError(
"'kind' must be one of 'year', 'month', 'week', 'day', "
"'hour', 'minute', or 'second'."
)
if order not in ("ASC", "DESC"):
raise ValueError("'order' must be either 'ASC' or 'DESC'.")
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return (
self.annotate(
datetimefield=Trunc(
field_name,
kind,
output_field=DateTimeField(),
tzinfo=tzinfo,
is_dst=is_dst,
),
plain_field=F(field_name),
)
.values_list("datetimefield", flat=True)
.distinct()
.filter(plain_field__isnull=False)
.order_by(("-" if order == "DESC" else "") + "datetimefield")
)
def none(self):
"""Return an empty QuerySet."""
clone = self._chain()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._chain()
def filter(self, *args, **kwargs):
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
self._not_support_combined_queries("filter")
return self._filter_or_exclude(False, args, kwargs)
def exclude(self, *args, **kwargs):
"""
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
self._not_support_combined_queries("exclude")
return self._filter_or_exclude(True, args, kwargs)
def _filter_or_exclude(self, negate, args, kwargs):
if (args or kwargs) and self.query.is_sliced:
raise TypeError("Cannot filter a query once a slice has been taken.")
clone = self._chain()
if self._defer_next_filter:
self._defer_next_filter = False
clone._deferred_filter = negate, args, kwargs
else:
clone._filter_or_exclude_inplace(negate, args, kwargs)
return clone
def _filter_or_exclude_inplace(self, negate, args, kwargs):
if negate:
self._query.add_q(~Q(*args, **kwargs))
else:
self._query.add_q(Q(*args, **kwargs))
def complex_filter(self, filter_obj):
"""
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object or a dictionary of keyword lookup
arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q):
clone = self._chain()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(False, args=(), kwargs=filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._chain()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(force=True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(
qs.query for qs in other_qs
)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
# If the query is an EmptyQuerySet, combine all nonempty querysets.
if isinstance(self, EmptyQuerySet):
qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
if not qs:
return self
if len(qs) == 1:
return qs[0]
return qs[0]._combinator_query("union", *qs[1:], all=all)
return self._combinator_query("union", *other_qs, all=all)
def intersection(self, *other_qs):
# If any query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
for other in other_qs:
if isinstance(other, EmptyQuerySet):
return other
return self._combinator_query("intersection", *other_qs)
def difference(self, *other_qs):
# If the query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
return self._combinator_query("difference", *other_qs)
def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):
"""
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError("The nowait option cannot be used with skip_locked.")
obj = self._chain()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
obj.query.select_for_update_of = of
obj.query.select_for_no_key_update = no_key
return obj
def select_related(self, *fields):
"""
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, clear the list.
"""
self._not_support_combined_queries("select_related")
if self._fields is not None:
raise TypeError(
"Cannot call select_related() after .values() or .values_list()"
)
obj = self._chain()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
self._not_support_combined_queries("prefetch_related")
clone = self._chain()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
for lookup in lookups:
if isinstance(lookup, Prefetch):
lookup = lookup.prefetch_to
lookup = lookup.split(LOOKUP_SEP, 1)[0]
if lookup in self.query._filtered_relations:
raise ValueError(
"prefetch_related() is not supported with FilteredRelation."
)
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
self._not_support_combined_queries("annotate")
return self._annotate(args, kwargs, select=True)
def alias(self, *args, **kwargs):
"""
Return a query set with added aliases for extra data or aggregations.
"""
self._not_support_combined_queries("alias")
return self._annotate(args, kwargs, select=False)
def _annotate(self, args, kwargs, select=True):
self._validate_values_are_expressions(
args + tuple(kwargs.values()), method_name="annotate"
)
annotations = {}
for arg in args:
# The default_alias property may raise a TypeError.
try:
if arg.default_alias in kwargs:
raise ValueError(
"The named annotation '%s' conflicts with the "
"default name for another annotation." % arg.default_alias
)
except TypeError:
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._chain()
names = self._fields
if names is None:
names = set(
chain.from_iterable(
(field.name, field.attname)
if hasattr(field, "attname")
else (field.name,)
for field in self.model._meta.get_fields()
)
)
for alias, annotation in annotations.items():
if alias in names:
raise ValueError(
"The annotation '%s' conflicts with a field on "
"the model." % alias
)
if isinstance(annotation, FilteredRelation):
clone.query.add_filtered_relation(annotation, alias)
else:
clone.query.add_annotation(
annotation,
alias,
is_summary=False,
select=select,
)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""Return a new QuerySet instance with the ordering changed."""
if self.query.is_sliced:
raise TypeError("Cannot reorder a query once a slice has been taken.")
obj = self._chain()
obj.query.clear_ordering(force=True, clear_default=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Return a new QuerySet instance that will select only distinct results.
"""
self._not_support_combined_queries("distinct")
if self.query.is_sliced:
raise TypeError(
"Cannot create distinct fields once a slice has been taken."
)
obj = self._chain()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(
self,
select=None,
where=None,
params=None,
tables=None,
order_by=None,
select_params=None,
):
"""Add extra SQL fragments to the query."""
self._not_support_combined_queries("extra")
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
clone = self._chain()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""Reverse the ordering of the QuerySet."""
if self.query.is_sliced:
raise TypeError("Cannot reverse a query once a slice has been taken.")
clone = self._chain()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case removal all deferrals.
"""
self._not_support_combined_queries("defer")
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._chain()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
self._not_support_combined_queries("only")
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
for field in fields:
field = field.split(LOOKUP_SEP, 1)[0]
if field in self.query._filtered_relations:
raise ValueError("only() is not supported with FilteredRelation.")
clone = self._chain()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""Select which database this QuerySet should execute against."""
clone = self._chain()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model (or is empty).
"""
if isinstance(self, EmptyQuerySet):
return True
if self.query.extra_order_by or self.query.order_by:
return True
elif (
self.query.default_ordering
and self.query.get_meta().ordering
and
# A default ordering doesn't affect GROUP BY queries.
not self.query.group_by
):
return True
else:
return False
@property
def db(self):
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(
self,
objs,
fields,
returning_fields=None,
raw=False,
using=None,
on_conflict=None,
update_fields=None,
unique_fields=None,
):
"""
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(
self.model,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(returning_fields)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(
self,
objs,
fields,
batch_size,
on_conflict=None,
update_fields=None,
unique_fields=None,
):
"""
Helper method for bulk_create() to insert objs one batch at a time.
"""
connection = connections[self.db]
ops = connection.ops
max_batch_size = max(ops.bulk_batch_size(fields, objs), 1)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
inserted_rows = []
bulk_return = connection.features.can_return_rows_from_bulk_insert
for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]:
if bulk_return and on_conflict is None:
inserted_rows.extend(
self._insert(
item,
fields=fields,
using=self.db,
returning_fields=self.model._meta.db_returning_fields,
)
)
else:
self._insert(
item,
fields=fields,
using=self.db,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
return inserted_rows
def _chain(self):
"""
Return a copy of the current QuerySet that's ready for another
operation.
"""
obj = self._clone()
if obj._sticky_filter:
obj.query.filter_is_sticky = True
obj._sticky_filter = False
return obj
def _clone(self):
"""
Return a copy of the current QuerySet. A lightweight alternative
to deepcopy().
"""
c = self.__class__(
model=self.model,
query=self.query.chain(),
using=self._db,
hints=self._hints,
)
c._sticky_filter = self._sticky_filter
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c._iterable_class = self._iterable_class
c._fields = self._fields
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select)
or set(self.query.extra_select) != set(other.query.extra_select)
or set(self.query.annotation_select) != set(other.query.annotation_select)
):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def resolve_expression(self, *args, **kwargs):
if self._fields and len(self._fields) > 1:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
raise TypeError("Cannot use multi-field values as a filter value.")
query = self.query.resolve_expression(*args, **kwargs)
query._db = self._db
return query
resolve_expression.queryset_only = True
def _add_hints(self, **hints):
"""
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
self._hints.update(hints)
def _has_filters(self):
"""
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
@staticmethod
def _validate_values_are_expressions(values, method_name):
invalid_args = sorted(
str(arg) for arg in values if not hasattr(arg, "resolve_expression")
)
if invalid_args:
raise TypeError(
"QuerySet.%s() received non-expression(s): %s."
% (
method_name,
", ".join(invalid_args),
)
)
def _not_support_combined_queries(self, operation_name):
if self.query.combinator:
raise NotSupportedError(
"Calling QuerySet.%s() after %s() is not supported."
% (operation_name, self.query.combinator)
)
def _check_operator_queryset(self, other, operator_):
if self.query.combinator or other.query.combinator:
raise TypeError(f"Cannot use {operator_} operator with combined queryset.")
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(
self,
raw_query,
model=None,
query=None,
params=(),
translations=None,
using=None,
hints=None,
):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params
self.translations = translations or {}
self._result_cache = None
self._prefetch_related_lookups = ()
self._prefetch_done = False
def resolve_model_init_order(self):
"""Resolve the init field names and value positions."""
converter = connections[self.db].introspection.identifier_converter
model_init_fields = [
f for f in self.model._meta.fields if converter(f.column) in self.columns
]
annotation_fields = [
(column, pos)
for pos, column in enumerate(self.columns)
if column not in self.model_fields
]
model_init_order = [
self.columns.index(converter(f.column)) for f in model_init_fields
]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def prefetch_related(self, *lookups):
"""Same as QuerySet.prefetch_related()"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def _prefetch_related_objects(self):
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def _clone(self):
"""Same as QuerySet._clone()"""
c = self.__class__(
self.raw_query,
model=self.model,
query=self.query,
params=self.params,
translations=self.translations,
using=self._db,
hints=self._hints,
)
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def iterator(self):
# Cache some things for performance reasons outside the loop.
db = self.db
connection = connections[db]
compiler = connection.ops.compiler("SQLCompiler")(self.query, connection, db)
query = iter(self.query)
try:
(
model_init_names,
model_init_pos,
annotation_fields,
) = self.resolve_model_init_order()
if self.model._meta.pk.attname not in model_init_names:
raise exceptions.FieldDoesNotExist(
"Raw query must include the primary key"
)
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters(
[f.get_col(f.model._meta.db_table) if f else None for f in fields]
)
if converters:
query = compiler.apply_converters(query, converters)
for values in query:
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, "cursor") and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query,
model=self.model,
query=self.query.chain(using=alias),
params=self.params,
translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
# Ignore translations for nonexistent column names
try:
index = columns.index(query_name)
except ValueError:
pass
else:
columns[index] = model_name
return columns
@cached_property
def model_fields(self):
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.identifier_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and (
isinstance(queryset, RawQuerySet)
or (
hasattr(queryset, "_iterable_class")
and not issubclass(queryset._iterable_class, ModelIterable)
)
):
raise ValueError(
"Prefetch querysets cannot use raw(), values(), and values_list()."
)
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(
lookup.split(LOOKUP_SEP)[:-1] + [to_attr]
)
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
queryset = self.queryset._chain()
# Prevent the QuerySet from being evaluated
queryset._result_cache = []
queryset._prefetch_done = True
obj_dict["queryset"] = queryset
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if not isinstance(other, Prefetch):
return NotImplemented
return self.prefetch_to == other.prefetch_to
def __hash__(self):
return hash((self.__class__, self.prefetch_to))
def normalize_prefetch_lookups(lookups, prefix=None):
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if not model_instances:
return # nothing to do
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
while all_lookups:
lookup = all_lookups.pop()
if lookup.prefetch_to in done_queries:
if lookup.queryset is not None:
raise ValueError(
"'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups."
% lookup.prefetch_to
)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if not obj_list:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, "_prefetched_objects_cache"):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(
first_obj, through_attr, to_attr
)
if not attr_found:
raise AttributeError(
"Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()"
% (
through_attr,
first_obj.__class__.__name__,
lookup.prefetch_through,
)
)
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError(
"'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through
)
obj_to_fetch = None
if prefetcher is not None:
obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)]
if obj_to_fetch:
obj_list, additional_lookups = prefetch_one_level(
obj_to_fetch,
prefetcher,
lookup,
level,
)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (
prefetch_to in done_queries
and lookup in auto_lookups
and descriptor in followed_descriptors
):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(
reversed(additional_lookups), prefetch_to
)
auto_lookups.update(new_lookups)
all_lookups.extend(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
if through_attr in getattr(obj, "_prefetched_objects_cache", ()):
# If related objects have been prefetched, use the
# cache rather than the object's through_attr.
new_obj = list(obj._prefetched_objects_cache.get(through_attr))
else:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_queryset().
Return a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a function that takes an instance and returns a boolean that is True if
the attribute has already been fetched for that instance)
"""
def has_to_attr_attribute(instance):
return hasattr(instance, to_attr)
prefetcher = None
is_fetched = has_to_attr_attribute
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, "get_prefetch_queryset"):
prefetcher = rel_obj_descriptor
is_fetched = rel_obj_descriptor.is_cached
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, "get_prefetch_queryset"):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(
getattr(instance.__class__, to_attr, None), cached_property
):
def has_cached_property(instance):
return to_attr in instance.__dict__
is_fetched = has_cached_property
else:
def in_prefetched_cache(instance):
return through_attr in instance._prefetched_objects_cache
is_fetched = in_prefetched_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects().
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache or field name to assign to,
# boolean that is True when the previous argument is a cache name vs a field name).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
(
rel_qs,
rel_obj_attr,
instance_attr,
single,
cache_name,
is_descriptor,
) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup)
for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = "to_attr={} conflicts with a field on the {} model."
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
if as_attr:
# A to_attr has been given for the prefetch.
setattr(obj, to_attr, val)
elif is_descriptor:
# cache_name points to a field name in obj.
# This field is a descriptor for a related object.
setattr(obj, cache_name, val)
else:
# No to_attr has been given for this prefetch operation and the
# cache_name does not point to a descriptor. Store the value of
# the field in the object's field cache.
obj._state.fields_cache[cache_name] = val
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - local_setter, remote_setter: Methods to set cached values on
# the object being populated and on the remote object. Usually
# these are Field.set_cached_value() methods.
select_fields = klass_info["select_fields"]
from_parent = klass_info["from_parent"]
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start : self.cols_end]
]
self.reorder_for_init = None
else:
attname_indexes = {
select[idx][0].target.attname: idx for idx in select_fields
}
model_init_attnames = (
f.attname for f in klass_info["model"]._meta.concrete_fields
)
self.init_list = [
attname for attname in model_init_attnames if attname in attname_indexes
]
self.reorder_for_init = operator.itemgetter(
*[attname_indexes[attname] for attname in self.init_list]
)
self.model_cls = klass_info["model"]
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
self.local_setter = klass_info["local_setter"]
self.remote_setter = klass_info["remote_setter"]
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start : self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
self.local_setter(from_obj, obj)
if obj is not None:
self.remote_setter(obj, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get("related_klass_infos", [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
|
576aa8b11bd240a9abe390616ff13cfe86bfe218b1a02553c029c353deeeaff7 | """
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
import copy
import functools
import inspect
from collections import namedtuple
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple(
"PathInfo",
"from_opts to_opts target_fields join_field m2m direct filtered_relation",
)
def subclasses(cls):
yield cls
for subclass in cls.__subclasses__():
yield from subclasses(subclass)
class Q(tree.Node):
"""
Encapsulate filters as objects that can then be combined logically (using
`&` and `|`).
"""
# Connection types
AND = "AND"
OR = "OR"
XOR = "XOR"
default = AND
conditional = True
def __init__(self, *args, _connector=None, _negated=False, **kwargs):
super().__init__(
children=[*args, *sorted(kwargs.items())],
connector=_connector,
negated=_negated,
)
def _combine(self, other, conn):
if not (isinstance(other, Q) or getattr(other, "conditional", False) is True):
raise TypeError(other)
if not self:
return other.copy() if hasattr(other, "copy") else copy.copy(other)
elif isinstance(other, Q) and not other:
_, args, kwargs = self.deconstruct()
return type(self)(*args, **kwargs)
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __xor__(self, other):
return self._combine(other, self.XOR)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(
self,
reuse,
allow_joins=allow_joins,
split_subq=False,
check_filterable=False,
)
query.promote_joins(joins)
return clause
def deconstruct(self):
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.query_utils"):
path = path.replace("django.db.models.query_utils", "django.db.models")
args = tuple(self.children)
kwargs = {}
if self.connector != self.default:
kwargs["_connector"] = self.connector
if self.negated:
kwargs["_negated"] = True
return path, args, kwargs
class DeferredAttribute:
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, cls=None):
"""
Retrieve and caches the value from the datastore on the first lookup.
Return the cached value.
"""
if instance is None:
return self
data = instance.__dict__
field_name = self.field.attname
if field_name not in data:
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance)
if val is None:
instance.refresh_from_db(fields=[field_name])
else:
data[field_name] = val
return data[field_name]
def _check_parent_chain(self, instance):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
link_field = opts.get_ancestor_link(self.field.model)
if self.field.primary_key and self.field != link_field:
return getattr(instance, link_field.attname)
return None
class RegisterLookupMixin:
@classmethod
def _get_lookup(cls, lookup_name):
return cls.get_lookups().get(lookup_name, None)
@classmethod
@functools.lru_cache(maxsize=None)
def get_lookups(cls):
class_lookups = [
parent.__dict__.get("class_lookups", {}) for parent in inspect.getmro(cls)
]
return cls.merge_dicts(class_lookups)
def get_lookup(self, lookup_name):
from django.db.models.lookups import Lookup
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, "output_field"):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
from django.db.models.lookups import Transform
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, "output_field"):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@staticmethod
def merge_dicts(dicts):
"""
Merge dicts in reverse to preference the order of the original list. e.g.,
merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'.
"""
merged = {}
for d in reversed(dicts):
merged.update(d)
return merged
@classmethod
def _clear_cached_lookups(cls):
for subclass in subclasses(cls):
subclass.get_lookups.cache_clear()
@classmethod
def register_lookup(cls, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if "class_lookups" not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup_name] = lookup
cls._clear_cached_lookups()
return lookup
@classmethod
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name]
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Return True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.remote_field:
return False
if field.remote_field.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.attname not in load_fields:
if restricted and field.name in requested:
msg = (
"Field %s.%s cannot be both deferred and traversed using "
"select_related at the same time."
) % (field.model._meta.object_name, field.name)
raise FieldError(msg)
return True
def refs_expression(lookup_parts, annotations):
"""
Check if the lookup_parts contains references to the given annotations set.
Because the LOOKUP_SEP is contained in the default annotation names, check
each prefix of the lookup_parts for a match.
"""
for n in range(1, len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in annotations and annotations[level_n_lookup]:
return annotations[level_n_lookup], lookup_parts[n:]
return False, ()
def check_rel_lookup_compatibility(model, target_opts, field):
"""
Check that self.model is compatible with target_opts. Compatibility
is OK if:
1) model and opts match (where proxy inheritance is removed)
2) model is parent of opts' model or the other way around
"""
def check(opts):
return (
model._meta.concrete_model == opts.concrete_model
or opts.concrete_model in model._meta.get_parent_list()
or model in opts.get_parent_list()
)
# If the field is a primary key, then doing a query against the field's
# model is ok, too. Consider the case:
# class Restaurant(models.Model):
# place = OneToOneField(Place, primary_key=True):
# Restaurant.objects.filter(pk__in=Restaurant.objects.all()).
# If we didn't have the primary key check, then pk__in (== place__in) would
# give Place's opts as the target opts, but Restaurant isn't compatible
# with that. This logic applies only to primary keys, as when doing __in=qs,
# we are going to turn this into __in=qs.values('pk') later on.
return check(target_opts) or (
getattr(field, "primary_key", False) and check(field.model._meta)
)
class FilteredRelation:
"""Specify custom filtering in the ON clause of SQL joins."""
def __init__(self, relation_name, *, condition=Q()):
if not relation_name:
raise ValueError("relation_name cannot be empty.")
self.relation_name = relation_name
self.alias = None
if not isinstance(condition, Q):
raise ValueError("condition argument must be a Q() instance.")
self.condition = condition
self.path = []
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.relation_name == other.relation_name
and self.alias == other.alias
and self.condition == other.condition
)
def clone(self):
clone = FilteredRelation(self.relation_name, condition=self.condition)
clone.alias = self.alias
clone.path = self.path[:]
return clone
def resolve_expression(self, *args, **kwargs):
"""
QuerySet.annotate() only accepts expression-like arguments
(with a resolve_expression() method).
"""
raise NotImplementedError("FilteredRelation.resolve_expression() is unused.")
def as_sql(self, compiler, connection):
# Resolve the condition in Join.filtered_relation.
query = compiler.query
where = query.build_filtered_relation_q(self.condition, reuse=set(self.path))
return compiler.compile(where)
|
4a0c90242e92ec6a7730e4a1d5a90af7625940603f71779146259066a2506170 | import copy
import datetime
import functools
import inspect
from decimal import Decimal
from uuid import UUID
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q
from django.utils.deconstruct import deconstructible
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class SQLiteNumericMixin:
"""
Some expressions with output_field=DecimalField() must be cast to
numeric to be properly filtered.
"""
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
try:
if self.output_field.get_internal_type() == "DecimalField":
sql = "CAST(%s AS NUMERIC)" % sql
except FieldError:
pass
return sql, params
class Combinable:
"""
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = "+"
SUB = "-"
MUL = "*"
DIV = "/"
POW = "^"
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = "%%"
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = "&"
BITOR = "|"
BITLEFTSHIFT = "<<"
BITRIGHTSHIFT = ">>"
BITXOR = "#"
def _combine(self, other, connector, reversed):
if not hasattr(other, "resolve_expression"):
# everything must be resolvable to an expression
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __neg__(self):
return self._combine(-1, self.MUL, False)
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) & Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __xor__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) ^ Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitxor(self, other):
return self._combine(other, self.BITXOR, False)
def __or__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) | Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __rxor__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
class BaseExpression:
"""Base class for all query expressions."""
empty_result_set_value = NotImplemented
# aggregate specific fields
is_summary = False
_output_field_resolved_to_none = False
# Can the expression be used in a WHERE clause?
filterable = True
# Can the expression can be used as a source expression in Window?
window_compatible = False
def __init__(self, output_field=None):
if output_field is not None:
self.output_field = output_field
def __getstate__(self):
state = self.__dict__.copy()
state.pop("convert_value", None)
return state
def get_db_converters(self, connection):
return (
[]
if self.convert_value is self._convert_value_noop
else [self.convert_value]
) + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert not exprs
def _parse_expressions(self, *expressions):
return [
arg
if hasattr(arg, "resolve_expression")
else (F(arg) if isinstance(arg, str) else Value(arg))
for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
return any(
expr and expr.contains_aggregate for expr in self.get_source_expressions()
)
@cached_property
def contains_over_clause(self):
return any(
expr and expr.contains_over_clause for expr in self.get_source_expressions()
)
@cached_property
def contains_column_references(self):
return any(
expr and expr.contains_column_references
for expr in self.get_source_expressions()
)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
"""
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions(
[
expr.resolve_expression(query, allow_joins, reuse, summarize)
if expr
else None
for expr in c.get_source_expressions()
]
)
return c
@property
def conditional(self):
return isinstance(self.output_field, fields.BooleanField)
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""Return the output type of this expressions."""
output_field = self._resolve_output_field()
if output_field is None:
self._output_field_resolved_to_none = True
raise FieldError("Cannot resolve expression type, unknown output_field")
return output_field
@cached_property
def _output_field_or_none(self):
"""
Return the output field of this expression, or None if
_resolve_output_field() didn't return an output type.
"""
try:
return self.output_field
except FieldError:
if not self._output_field_resolved_to_none:
raise
def _resolve_output_field(self):
"""
Attempt to infer the output type of the expression. If the output
fields of all source fields match then, simply infer the same type
here. This isn't always correct, but it makes sense most of the time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source's output field resolves to None, exclude it from this check.
If all sources are None, then an error is raised higher up the stack in
the output_field property.
"""
sources_iter = (
source for source in self.get_source_fields() if source is not None
)
for output_field in sources_iter:
for source in sources_iter:
if not isinstance(output_field, source.__class__):
raise FieldError(
"Expression contains mixed types: %s, %s. You must "
"set output_field."
% (
output_field.__class__.__name__,
source.__class__.__name__,
)
)
return output_field
@staticmethod
def _convert_value_noop(value, expression, connection):
return value
@cached_property
def convert_value(self):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if internal_type == "FloatField":
return (
lambda value, expression, connection: None
if value is None
else float(value)
)
elif internal_type.endswith("IntegerField"):
return (
lambda value, expression, connection: None
if value is None
else int(value)
)
elif internal_type == "DecimalField":
return (
lambda value, expression, connection: None
if value is None
else Decimal(value)
)
return self._convert_value_noop
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[
e.relabeled_clone(change_map) if e is not None else None
for e in self.get_source_expressions()
]
)
return clone
def copy(self):
return copy.copy(self)
def get_group_by_cols(self, alias=None):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
if hasattr(expr, "flatten"):
yield from expr.flatten()
else:
yield expr
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, EXISTS expressions need
to be wrapped in CASE WHEN on Oracle.
"""
if hasattr(self.output_field, "select_format"):
return self.output_field.select_format(compiler, sql, params)
return sql, params
@deconstructible
class Expression(BaseExpression, Combinable):
"""An expression that can be combined with other expressions."""
@cached_property
def identity(self):
constructor_signature = inspect.signature(self.__init__)
args, kwargs = self._constructor_args
signature = constructor_signature.bind_partial(*args, **kwargs)
signature.apply_defaults()
arguments = signature.arguments.items()
identity = [self.__class__]
for arg, value in arguments:
if isinstance(value, fields.Field):
if value.name and value.model:
value = (value.model._meta.label, value.name)
else:
value = type(value)
else:
value = make_hashable(value)
identity.append((arg, value))
return tuple(identity)
def __eq__(self, other):
if not isinstance(other, Expression):
return NotImplemented
return other.identity == self.identity
def __hash__(self):
return hash(self.identity)
_connector_combinators = {
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
(fields.IntegerField, fields.DecimalField, fields.DecimalField),
(fields.DecimalField, fields.IntegerField, fields.DecimalField),
(fields.IntegerField, fields.FloatField, fields.FloatField),
(fields.FloatField, fields.IntegerField, fields.FloatField),
]
for connector in (Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV)
}
@functools.lru_cache(maxsize=128)
def _resolve_combined_type(connector, lhs_type, rhs_type):
combinators = _connector_combinators.get(connector, ())
for combinator_lhs_type, combinator_rhs_type, combined_type in combinators:
if issubclass(lhs_type, combinator_lhs_type) and issubclass(
rhs_type, combinator_rhs_type
):
return combined_type
class CombinedExpression(SQLiteNumericMixin, Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def _resolve_output_field(self):
try:
return super()._resolve_output_field()
except FieldError:
combined_type = _resolve_combined_type(
self.connector,
type(self.lhs.output_field),
type(self.rhs.output_field),
)
if combined_type is None:
raise
return combined_type()
def as_sql(self, compiler, connection):
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
lhs = self.lhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
rhs = self.rhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
if not isinstance(self, (DurationExpression, TemporalSubtraction)):
try:
lhs_type = lhs.output_field.get_internal_type()
except (AttributeError, FieldError):
lhs_type = None
try:
rhs_type = rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
rhs_type = None
if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type:
return DurationExpression(
self.lhs, self.connector, self.rhs
).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
datetime_fields = {"DateField", "DateTimeField", "TimeField"}
if (
self.connector == self.SUB
and lhs_type in datetime_fields
and lhs_type == rhs_type
):
return TemporalSubtraction(self.lhs, self.rhs).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
c = self.copy()
c.is_summary = summarize
c.lhs = lhs
c.rhs = rhs
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == "DurationField":
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
if self.connector in {Combinable.MUL, Combinable.DIV}:
try:
lhs_type = self.lhs.output_field.get_internal_type()
rhs_type = self.rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
pass
else:
allowed_fields = {
"DecimalField",
"DurationField",
"FloatField",
"IntegerField",
}
if lhs_type not in allowed_fields or rhs_type not in allowed_fields:
raise DatabaseError(
f"Invalid arguments for operator {self.connector}."
)
return sql, params
class TemporalSubtraction(CombinedExpression):
output_field = fields.DurationField()
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs)
rhs = compiler.compile(self.rhs)
return connection.ops.subtract_temporals(
self.lhs.output_field.get_internal_type(), lhs, rhs
)
@deconstructible(path="django.db.models.F")
class F(Combinable):
"""An object capable of resolving references to existing query objects."""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
contains_aggregate = False
def as_sql(self, *args, **kwargs):
raise ValueError(
"This queryset contains a reference to an outer query and may "
"only be used in a subquery."
)
def resolve_expression(self, *args, **kwargs):
col = super().resolve_expression(*args, **kwargs)
# FIXME: Rename possibly_multivalued to multivalued and fix detection
# for non-multivalued JOINs (e.g. foreign key fields). This should take
# into account only many-to-many and one-to-many relationships.
col.possibly_multivalued = LOOKUP_SEP in self.name
return col
def relabeled_clone(self, relabels):
return self
def get_group_by_cols(self, alias=None):
return []
class OuterRef(F):
contains_aggregate = False
def resolve_expression(self, *args, **kwargs):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def relabeled_clone(self, relabels):
return self
@deconstructible(path="django.db.models.Func")
class Func(SQLiteNumericMixin, Expression):
"""An SQL function call."""
function = None
template = "%(function)s(%(expressions)s)"
arg_joiner = ", "
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)"
% (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = {**self.extra, **self._get_repr_options()}
if extra:
extra = ", ".join(
str(key) + "=" + str(val) for key, val in sorted(extra.items())
)
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def _get_repr_options(self):
"""Return a dict of extra __init__() options to include in the repr."""
return {}
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(
self,
compiler,
connection,
function=None,
template=None,
arg_joiner=None,
**extra_context,
):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
try:
arg_sql, arg_params = compiler.compile(arg)
except EmptyResultSet:
empty_result_set_value = getattr(
arg, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
raise
arg_sql, arg_params = compiler.compile(Value(empty_result_set_value))
sql_parts.append(arg_sql)
params.extend(arg_params)
data = {**self.extra, **extra_context}
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data["function"] = function
else:
data.setdefault("function", self.function)
template = template or data.get("template", self.template)
arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner)
data["expressions"] = data["field"] = arg_joiner.join(sql_parts)
return template % data, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
@deconstructible(path="django.db.models.Value")
class Value(SQLiteNumericMixin, Expression):
"""Represent a wrapped value as a node within an expression."""
# Provide a default value for `for_save` in order to allow unresolved
# instances to be compiled until a decision is taken in #25425.
for_save = False
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return f"{self.__class__.__name__}({self.value!r})"
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
output_field = self._output_field_or_none
if output_field is not None:
if self.for_save:
val = output_field.get_db_prep_save(val, connection=connection)
else:
val = output_field.get_db_prep_value(val, connection=connection)
if hasattr(output_field, "get_placeholder"):
return output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return "NULL", []
return "%s", [val]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self, alias=None):
return []
def _resolve_output_field(self):
if isinstance(self.value, str):
return fields.CharField()
if isinstance(self.value, bool):
return fields.BooleanField()
if isinstance(self.value, int):
return fields.IntegerField()
if isinstance(self.value, float):
return fields.FloatField()
if isinstance(self.value, datetime.datetime):
return fields.DateTimeField()
if isinstance(self.value, datetime.date):
return fields.DateField()
if isinstance(self.value, datetime.time):
return fields.TimeField()
if isinstance(self.value, datetime.timedelta):
return fields.DurationField()
if isinstance(self.value, Decimal):
return fields.DecimalField()
if isinstance(self.value, bytes):
return fields.BinaryField()
if isinstance(self.value, UUID):
return fields.UUIDField()
@property
def empty_result_set_value(self):
return self.value
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return "(%s)" % self.sql, self.params
def get_group_by_cols(self, alias=None):
return [self]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# Resolve parents fields used in raw SQL.
for parent in query.model._meta.get_parent_list():
for parent_field in parent._meta.local_fields:
_, column_name = parent_field.get_attname_column()
if column_name.lower() in self.sql.lower():
query.resolve_ref(parent_field.name, allow_joins, reuse, summarize)
break
return super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return "*", []
class Col(Expression):
contains_column_references = True
possibly_multivalued = False
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
alias, target = self.alias, self.target
identifiers = (alias, str(target)) if alias else (str(target),)
return "{}({})".format(self.__class__.__name__, ", ".join(identifiers))
def as_sql(self, compiler, connection):
alias, column = self.alias, self.target.column
identifiers = (alias, column) if alias else (column,)
sql = ".".join(map(compiler.quote_name_unless_alias, identifiers))
return sql, []
def relabeled_clone(self, relabels):
if self.alias is None:
return self
return self.__class__(
relabels.get(self.alias, self.alias), self.target, self.output_field
)
def get_group_by_cols(self, alias=None):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return self.output_field.get_db_converters(
connection
) + self.target.get_db_converters(connection)
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
(self.source,) = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return connection.ops.quote_name(self.refs), []
def get_group_by_cols(self, alias=None):
return [self]
class ExpressionList(Func):
"""
An expression containing multiple expressions. Can be used to provide a
list of expressions as an argument to another expression, like a partition
clause.
"""
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if not expressions:
raise ValueError(
"%s requires at least one expression." % self.__class__.__name__
)
super().__init__(*expressions, **extra)
def __str__(self):
return self.arg_joiner.join(str(arg) for arg in self.source_expressions)
def as_sqlite(self, compiler, connection, **extra_context):
# Casting to numeric is unnecessary.
return self.as_sql(compiler, connection, **extra_context)
class OrderByList(Func):
template = "ORDER BY %(expressions)s"
def __init__(self, *expressions, **extra):
expressions = (
(
OrderBy(F(expr[1:]), descending=True)
if isinstance(expr, str) and expr[0] == "-"
else expr
)
for expr in expressions
)
super().__init__(*expressions, **extra)
def as_sql(self, *args, **kwargs):
if not self.source_expressions:
return "", ()
return super().as_sql(*args, **kwargs)
@deconstructible(path="django.db.models.ExpressionWrapper")
class ExpressionWrapper(SQLiteNumericMixin, Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def get_group_by_cols(self, alias=None):
if isinstance(self.expression, Expression):
expression = self.expression.copy()
expression.output_field = self.output_field
return expression.get_group_by_cols(alias=alias)
# For non-expressions e.g. an SQL WHERE clause, the entire
# `expression` must be included in the GROUP BY clause.
return super().get_group_by_cols()
def as_sql(self, compiler, connection):
return compiler.compile(self.expression)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
@deconstructible(path="django.db.models.When")
class When(Expression):
template = "WHEN %(condition)s THEN %(result)s"
# This isn't a complete conditional expression, must be used in Case().
conditional = False
def __init__(self, condition=None, then=None, **lookups):
if lookups:
if condition is None:
condition, lookups = Q(**lookups), None
elif getattr(condition, "conditional", False):
condition, lookups = Q(condition, **lookups), None
if condition is None or not getattr(condition, "conditional", False) or lookups:
raise TypeError(
"When() supports a Q object, a boolean expression, or lookups "
"as a condition."
)
if isinstance(condition, Q) and not condition:
raise ValueError("An empty Q() can't be used as a When() condition.")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, "resolve_expression"):
c.condition = c.condition.resolve_expression(
query, allow_joins, reuse, summarize, False
)
c.result = c.result.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params["condition"] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params["result"] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self, alias=None):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
@deconstructible(path="django.db.models.Case")
class Case(SQLiteNumericMixin, Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = "CASE %(cases)s ELSE %(default)s END"
case_joiner = " "
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (
", ".join(str(c) for c in self.cases),
self.default,
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
*self.cases, self.default = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
c.default = c.default.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(
self, compiler, connection, template=None, case_joiner=None, **extra_context
):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = {**self.extra, **extra_context}
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params["cases"] = case_joiner.join(case_parts)
template_params["default"] = default_sql
sql_params.extend(default_params)
template = template or template_params.get("template", self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def get_group_by_cols(self, alias=None):
if not self.cases:
return self.default.get_group_by_cols(alias)
return super().get_group_by_cols(alias)
class Subquery(BaseExpression, Combinable):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = "(%(subquery)s)"
contains_aggregate = False
empty_result_set_value = None
def __init__(self, queryset, output_field=None, **extra):
# Allow the usage of both QuerySet and sql.Query objects.
self.query = getattr(queryset, "query", queryset).clone()
self.query.subquery = True
self.extra = extra
super().__init__(output_field)
def get_source_expressions(self):
return [self.query]
def set_source_expressions(self, exprs):
self.query = exprs[0]
def _resolve_output_field(self):
return self.query.output_field
def copy(self):
clone = super().copy()
clone.query = clone.query.clone()
return clone
@property
def external_aliases(self):
return self.query.external_aliases
def get_external_cols(self):
return self.query.get_external_cols()
def as_sql(self, compiler, connection, template=None, query=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
query = query or self.query
subquery_sql, sql_params = query.as_sql(compiler, connection)
template_params["subquery"] = subquery_sql[1:-1]
template = template or template_params.get("template", self.template)
sql = template % template_params
return sql, sql_params
def get_group_by_cols(self, alias=None):
# If this expression is referenced by an alias for an explicit GROUP BY
# through values() a reference to this expression and not the
# underlying .query must be returned to ensure external column
# references are not grouped against as well.
if alias:
return [Ref(alias, self)]
return self.query.get_group_by_cols()
class Exists(Subquery):
template = "EXISTS(%(subquery)s)"
output_field = fields.BooleanField()
def __init__(self, queryset, negated=False, **kwargs):
self.negated = negated
super().__init__(queryset, **kwargs)
def __invert__(self):
clone = self.copy()
clone.negated = not self.negated
return clone
def as_sql(self, compiler, connection, template=None, **extra_context):
query = self.query.exists(using=connection.alias)
try:
sql, params = super().as_sql(
compiler,
connection,
template=template,
query=query,
**extra_context,
)
except EmptyResultSet:
if self.negated:
features = compiler.connection.features
if not features.supports_boolean_expr_in_select_clause:
return "1=1", ()
return compiler.compile(Value(True))
raise
if self.negated:
sql = "NOT {}".format(sql)
return sql, params
def select_format(self, compiler, sql, params):
# Wrap EXISTS() with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql)
return sql, params
@deconstructible(path="django.db.models.OrderBy")
class OrderBy(Expression):
template = "%(expression)s %(ordering)s"
conditional = False
def __init__(
self, expression, descending=False, nulls_first=False, nulls_last=False
):
if nulls_first and nulls_last:
raise ValueError("nulls_first and nulls_last are mutually exclusive")
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, "resolve_expression"):
raise ValueError("expression must be an expression type")
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending
)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
template = template or self.template
if connection.features.supports_order_by_nulls_modifier:
if self.nulls_last:
template = "%s NULLS LAST" % template
elif self.nulls_first:
template = "%s NULLS FIRST" % template
else:
if self.nulls_last and not (
self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NULL, %s" % template
elif self.nulls_first and not (
not self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NOT NULL, %s" % template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
"expression": expression_sql,
"ordering": "DESC" if self.descending else "ASC",
**extra_context,
}
params *= template.count("%(expression)s")
return (template % placeholders).rstrip(), params
def as_oracle(self, compiler, connection):
# Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped
# in a CASE WHEN.
if connection.ops.conditional_expression_supported_in_where_clause(
self.expression
):
copy = self.copy()
copy.expression = Case(
When(self.expression, then=True),
default=False,
)
return copy.as_sql(compiler, connection)
return self.as_sql(compiler, connection)
def get_group_by_cols(self, alias=None):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
if self.nulls_first or self.nulls_last:
self.nulls_first = not self.nulls_first
self.nulls_last = not self.nulls_last
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
class Window(SQLiteNumericMixin, Expression):
template = "%(expression)s OVER (%(window)s)"
# Although the main expression may either be an aggregate or an
# expression with an aggregate function, the GROUP BY that will
# be introduced in the query as a result is not desired.
contains_aggregate = False
contains_over_clause = True
filterable = False
def __init__(
self,
expression,
partition_by=None,
order_by=None,
frame=None,
output_field=None,
):
self.partition_by = partition_by
self.order_by = order_by
self.frame = frame
if not getattr(expression, "window_compatible", False):
raise ValueError(
"Expression '%s' isn't compatible with OVER clauses."
% expression.__class__.__name__
)
if self.partition_by is not None:
if not isinstance(self.partition_by, (tuple, list)):
self.partition_by = (self.partition_by,)
self.partition_by = ExpressionList(*self.partition_by)
if self.order_by is not None:
if isinstance(self.order_by, (list, tuple)):
self.order_by = OrderByList(*self.order_by)
elif isinstance(self.order_by, (BaseExpression, str)):
self.order_by = OrderByList(self.order_by)
else:
raise ValueError(
"Window.order_by must be either a string reference to a "
"field, an expression, or a list or tuple of them."
)
super().__init__(output_field=output_field)
self.source_expression = self._parse_expressions(expression)[0]
def _resolve_output_field(self):
return self.source_expression.output_field
def get_source_expressions(self):
return [self.source_expression, self.partition_by, self.order_by, self.frame]
def set_source_expressions(self, exprs):
self.source_expression, self.partition_by, self.order_by, self.frame = exprs
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
if not connection.features.supports_over_clause:
raise NotSupportedError("This backend does not support window expressions.")
expr_sql, params = compiler.compile(self.source_expression)
window_sql, window_params = [], []
if self.partition_by is not None:
sql_expr, sql_params = self.partition_by.as_sql(
compiler=compiler,
connection=connection,
template="PARTITION BY %(expressions)s",
)
window_sql.append(sql_expr)
window_params.extend(sql_params)
if self.order_by is not None:
order_sql, order_params = compiler.compile(self.order_by)
window_sql.append(order_sql)
window_params.extend(order_params)
if self.frame:
frame_sql, frame_params = compiler.compile(self.frame)
window_sql.append(frame_sql)
window_params.extend(frame_params)
params.extend(window_params)
template = template or self.template
return (
template % {"expression": expr_sql, "window": " ".join(window_sql).strip()},
params,
)
def as_sqlite(self, compiler, connection):
if isinstance(self.output_field, fields.DecimalField):
# Casting to numeric must be outside of the window expression.
copy = self.copy()
source_expressions = copy.get_source_expressions()
source_expressions[0].output_field = fields.FloatField()
copy.set_source_expressions(source_expressions)
return super(Window, copy).as_sqlite(compiler, connection)
return self.as_sql(compiler, connection)
def __str__(self):
return "{} OVER ({}{}{})".format(
str(self.source_expression),
"PARTITION BY " + str(self.partition_by) if self.partition_by else "",
str(self.order_by or ""),
str(self.frame or ""),
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
return []
class WindowFrame(Expression):
"""
Model the frame clause in window expressions. There are two types of frame
clauses which are subclasses, however, all processing and validation (by no
means intended to be complete) is done here. Thus, providing an end for a
frame is optional (the default is UNBOUNDED FOLLOWING, which is the last
row in the frame).
"""
template = "%(frame_type)s BETWEEN %(start)s AND %(end)s"
def __init__(self, start=None, end=None):
self.start = Value(start)
self.end = Value(end)
def set_source_expressions(self, exprs):
self.start, self.end = exprs
def get_source_expressions(self):
return [self.start, self.end]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
start, end = self.window_frame_start_end(
connection, self.start.value, self.end.value
)
return (
self.template
% {
"frame_type": self.frame_type,
"start": start,
"end": end,
},
[],
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
return []
def __str__(self):
if self.start.value is not None and self.start.value < 0:
start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING)
elif self.start.value is not None and self.start.value == 0:
start = connection.ops.CURRENT_ROW
else:
start = connection.ops.UNBOUNDED_PRECEDING
if self.end.value is not None and self.end.value > 0:
end = "%d %s" % (self.end.value, connection.ops.FOLLOWING)
elif self.end.value is not None and self.end.value == 0:
end = connection.ops.CURRENT_ROW
else:
end = connection.ops.UNBOUNDED_FOLLOWING
return self.template % {
"frame_type": self.frame_type,
"start": start,
"end": end,
}
def window_frame_start_end(self, connection, start, end):
raise NotImplementedError("Subclasses must implement window_frame_start_end().")
class RowRange(WindowFrame):
frame_type = "ROWS"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_rows_start_end(start, end)
class ValueRange(WindowFrame):
frame_type = "RANGE"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_range_start_end(start, end)
|
ac25e901c30123d7fa2005ae6ddf3997b78d73e8519d717d08ccbd83fba78449 | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import difflib
import functools
import sys
from collections import Counter, namedtuple
from collections.abc import Iterator, Mapping
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import (
BaseExpression,
Col,
Exists,
F,
OuterRef,
Ref,
ResolvedOuterRef,
)
from django.db.models.fields import Field
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q,
check_rel_lookup_compatibility,
refs_expression,
)
from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE
from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin
from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode
from django.utils.functional import cached_property
from django.utils.tree import Node
__all__ = ["Query", "RawQuery"]
def get_field_names_from_opts(opts):
return set(
chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields()
)
)
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
"JoinInfo",
("final_field", "targets", "opts", "joins", "path", "transform_function"),
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=()):
self.params = params
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.identifier_converter
return [converter(column_meta[0]) for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
if self.params is None:
return None
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
if self.params_type is None:
return self.sql
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
elif params_type is None:
params = None
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
ExplainInfo = namedtuple("ExplainInfo", ("format", "options"))
class Query(BaseExpression):
"""A single SQL query."""
alias_prefix = "T"
empty_result_set_value = None
subq_aliases = frozenset([alias_prefix])
compiler = "SQLCompiler"
base_table_class = BaseTable
join_class = Join
default_cols = True
default_ordering = True
standard_ordering = True
filter_is_sticky = False
subquery = False
# SQL-related attributes.
# Select and related select clauses are expressions to use in the SELECT
# clause of the query. The select is used for cases where we want to set up
# the select clause to contain other than default fields (values(),
# subqueries...). Note that annotations go to annotations dictionary.
select = ()
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
group_by = None
order_by = ()
low_mark = 0 # Used for offset/limit.
high_mark = None # Used for offset/limit.
distinct = False
distinct_fields = ()
select_for_update = False
select_for_update_nowait = False
select_for_update_skip_locked = False
select_for_update_of = ()
select_for_no_key_update = False
select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
values_select = ()
# SQL annotation-related attributes.
annotation_select_mask = None
_annotation_select_cache = None
# Set combination attributes.
combinator = None
combinator_all = False
combined_queries = ()
# These are for extensions. The contents are more or less appended verbatim
# to the appropriate clause.
extra_select_mask = None
_extra_select_cache = None
extra_tables = ()
extra_order_by = ()
# A tuple that is a set of model field names and either True, if these are
# the fields to defer, or False if these are the only fields to load.
deferred_loading = (frozenset(), True)
explain_info = None
def __init__(self, model, alias_cols=True):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Whether to provide alias to columns during reference resolving.
self.alias_cols = alias_cols
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
# Map external tables to whether they are aliased.
self.external_aliases = {}
self.table_map = {} # Maps table names to list of aliases.
self.used_aliases = set()
self.where = WhereNode()
# Maps alias -> Annotation Expression.
self.annotations = {}
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = {} # Maps col_alias -> (col_sql, params).
self._filtered_relations = {}
@property
def output_field(self):
if len(self.select) == 1:
select = self.select[0]
return getattr(select, "target", None) or select.field
elif len(self.annotation_select) == 1:
return next(iter(self.annotation_select.values())).output_field
@property
def has_select_fields(self):
return bool(
self.select or self.annotation_select_mask or self.extra_select_mask
)
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def get_compiler(self, using=None, connection=None, elide_empty=True):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(
self, connection, using, elide_empty
)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
to deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj.annotations = self.annotations.copy()
if self.annotation_select_mask is not None:
obj.annotation_select_mask = self.annotation_select_mask.copy()
if self.combined_queries:
obj.combined_queries = tuple(
[query.clone() for query in self.combined_queries]
)
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.extra = self.extra.copy()
if self.extra_select_mask is not None:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is not None:
obj._extra_select_cache = self._extra_select_cache.copy()
if self.select_related is not False:
# Use deepcopy because select_related stores fields in nested
# dicts.
obj.select_related = copy.deepcopy(obj.select_related)
if "subq_aliases" in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
obj._filtered_relations = self._filtered_relations.copy()
# Clear the cached_property, if it exists.
obj.__dict__.pop("base_table", None)
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, "_setup_query"):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def _get_col(self, target, field, alias):
if not self.alias_cols:
alias = None
return target.get_col(alias, field)
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
else:
# Reuse aliases of expressions already selected in subquery.
for col_alias, selected_annotation in self.annotation_select.items():
if selected_annotation is expr:
new_expr = Ref(col_alias, expr)
break
else:
# An expression that is not selected the subquery.
if isinstance(expr, Col) or (
expr.contains_aggregate and not expr.is_summary
):
# Reference column or another aggregate. Select it
# under a non-conflicting alias.
col_cnt += 1
col_alias = "__col%d" % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_expr = Ref(col_alias, expr)
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
existing_annotations = [
annotation
for alias, annotation in self.annotations.items()
if alias not in added_aggregate_names
]
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (
isinstance(self.group_by, tuple)
or self.is_sliced
or existing_annotations
or self.distinct
or self.combinator
):
from django.db.models.sql.subqueries import AggregateQuery
inner_query = self.clone()
inner_query.subquery = True
outer_query = AggregateQuery(self.model, inner_query)
inner_query.select_for_update = False
inner_query.select_related = False
inner_query.set_annotation_mask(self.annotation_select)
# Queries with distinct_fields need ordering and when a limit is
# applied we must take the slice from the ordered query. Otherwise
# no need for ordering.
inner_query.clear_ordering(force=False)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
has_existing_aggregate_annotations = any(
annotation
for annotation in existing_annotations
if getattr(annotation, "contains_aggregate", True)
)
if inner_query.default_cols and has_existing_aggregate_annotations:
inner_query.group_by = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
inner_query.default_cols = False
relabels = {t: "subquery" for t in inner_query.alias_map}
relabels[None] = "subquery"
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
annotation_select_mask = inner_query.annotation_select_mask
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(
relabels
)
del inner_query.annotations[alias]
annotation_select_mask.remove(alias)
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if (
inner_query.select == ()
and not inner_query.default_cols
and not inner_query.annotation_select_mask
):
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
else:
outer_query = self
self.select = ()
self.default_cols = False
self.extra = {}
empty_set_result = [
expression.empty_result_set_value
for expression in outer_query.annotation_select.values()
]
elide_empty = not any(result is NotImplemented for result in empty_set_result)
outer_query.clear_ordering(force=True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using, elide_empty=elide_empty)
result = compiler.execute_sql(SINGLE)
if result is None:
result = empty_set_result
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return dict(zip(outer_query.annotation_select, result))
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count("*"), alias="__count", is_summary=True)
return obj.get_aggregation(using, ["__count"])["__count"]
def has_filters(self):
return self.where
def exists(self, using, limit=True):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
q.set_group_by(allow_aliases=False)
q.clear_select_clause()
if q.combined_queries and q.combinator == "union":
limit_combined = connections[
using
].features.supports_slicing_ordering_in_compound
q.combined_queries = tuple(
combined_query.exists(using, limit=limit_combined)
for combined_query in q.combined_queries
)
q.clear_ordering(force=True)
if limit:
q.set_limits(high=1)
q.add_extra({"a": 1}, None, None, None, None, None)
q.set_extra_mask(["a"])
return q
def has_results(self, using):
q = self.exists(using)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def explain(self, using, format=None, **options):
q = self.clone()
q.explain_info = ExplainInfo(format, options)
compiler = q.get_compiler(using=using)
return "\n".join(compiler.explain_query())
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
if self.model != rhs.model:
raise TypeError("Cannot combine queries on two different base models.")
if self.is_sliced:
raise TypeError("Cannot combine queries once a slice has been taken.")
if self.distinct != rhs.distinct:
raise TypeError("Cannot combine a unique query with a non-unique query.")
if self.distinct_fields != rhs.distinct_fields:
raise TypeError("Cannot combine queries with different distinct fields.")
# If lhs and rhs shares the same alias prefix, it is possible to have
# conflicting alias changes like T4 -> T5, T5 -> T6, which might end up
# as T4 -> T6 while combining two querysets. To prevent this, change an
# alias prefix of the rhs and update current aliases accordingly,
# except if the alias is the base table since it must be present in the
# query on both sides.
initial_alias = self.get_initial_alias()
rhs.bump_prefix(self, exclude={initial_alias})
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = connector == AND
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER
)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Combine subqueries aliases to ensure aliases relabelling properly
# handle subqueries when combining where and select clauses.
self.subq_aliases |= rhs.subq_aliases
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError(
"When merging querysets using 'or', you cannot have "
"extra(select=...) on both sides."
)
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
if name in self._filtered_relations:
name = self._filtered_relations[name].relation_name
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.items():
for field in model._meta.local_fields:
if field not in values:
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in must_include.items():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.items():
callback(target, model, values)
else:
for model, values in must_include.items():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
seen.setdefault(model, set())
for model, values in seen.items():
callback(target, model, values)
def table_alias(self, table_name, create=False, filtered_relation=None):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = (
filtered_relation.alias if filtered_relation is not None else table_name
)
self.table_map[table_name] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = (
parent_alias and self.alias_map[parent_alias].join_type == LOUTER
)
already_louter = self.alias_map[alias].join_type == LOUTER
if (self.alias_map[alias].nullable or parent_louter) and not already_louter:
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join
for join in self.alias_map
if self.alias_map[join].parent_alias == alias
and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
# If keys and values of change_map were to intersect, an alias might be
# updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending
# on their order in change_map.
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple(
[col.relabeled_clone(change_map) for col in self.group_by]
)
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
self.annotations = self.annotations and {
key: col.relabeled_clone(change_map)
for key, col in self.annotations.items()
}
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {
# Table is aliased or it's being changed and thus is aliased.
change_map.get(alias, alias): (aliased or alias in change_map)
for alias, aliased in self.external_aliases.items()
}
def bump_prefix(self, other_query, exclude=None):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the other query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call. To prevent changing aliases use the exclude parameter.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet
for s in product(seq, repeat=n):
yield "".join(s)
prefix = None
if self.alias_prefix != other_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
# Explicitly avoid infinite loop. The constant divider is based on how
# much depth recursive subquery references add to the stack. This value
# might need to be adjusted when adding or removing function calls from
# the code path in charge of performing these operations.
local_recursion_limit = sys.getrecursionlimit() // 16
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RecursionError(
"Maximum recursion depth exceeded: too many subqueries."
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases)
if exclude is None:
exclude = {}
self.change_aliases(
{
alias: "%s%d" % (self.alias_prefix, pos)
for pos, alias in enumerate(self.alias_map)
if alias not in exclude
}
)
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
else:
alias = self.join(self.base_table_class(self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a base_table_class or
join_class.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
reuse_aliases = [
a
for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j.equals(join)
]
if reuse_aliases:
if join.table_alias in reuse_aliases:
reuse_alias = join.table_alias
else:
# Reuse the most recent alias of the joined table
# (a many-to-many relation may be joined multiple times).
reuse_alias = reuse_aliases[-1]
self.ref_alias(reuse_alias)
return reuse_alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(
join.table_name, create=True, filtered_relation=join.filtered_relation
)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False, select=True):
"""Add a single annotation expression to the Query."""
annotation = annotation.resolve_expression(
self, allow_joins=True, reuse=None, summarize=is_summary
)
if select:
self.append_annotation_mask([alias])
else:
self.set_annotation_mask(set(self.annotation_select).difference({alias}))
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
clone.where.resolve_expression(query, *args, **kwargs)
# Resolve combined queries.
if clone.combinator:
clone.combined_queries = tuple(
[
combined_query.resolve_expression(query, *args, **kwargs)
for combined_query in clone.combined_queries
]
)
for key, value in clone.annotations.items():
resolved = value.resolve_expression(query, *args, **kwargs)
if hasattr(resolved, "external_aliases"):
resolved.external_aliases.update(clone.external_aliases)
clone.annotations[key] = resolved
# Outer query's aliases are considered external.
for alias, table in query.alias_map.items():
clone.external_aliases[alias] = (
isinstance(table, Join)
and table.join_field.related_model._meta.db_table != alias
) or (
isinstance(table, BaseTable) and table.table_name != table.table_alias
)
return clone
def get_external_cols(self):
exprs = chain(self.annotations.values(), self.where.children)
return [
col
for col in self._gen_cols(exprs, include_external=True)
if col.alias in self.external_aliases
]
def get_group_by_cols(self, alias=None):
if alias:
return [Ref(alias, self)]
external_cols = self.get_external_cols()
if any(col.possibly_multivalued for col in external_cols):
return [self]
return external_cols
def as_sql(self, compiler, connection):
# Some backends (e.g. Oracle) raise an error when a subquery contains
# unnecessary ORDER BY clause.
if (
self.subquery
and not connection.features.ignores_unnecessary_order_by_in_subqueries
):
self.clear_ordering(force=False)
sql, params = self.get_compiler(connection=connection).as_sql()
if self.subquery:
sql = "(%s)" % sql
return sql, params
def resolve_lookup_value(self, value, can_reuse, allow_joins):
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self,
reuse=can_reuse,
allow_joins=allow_joins,
)
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
values = (
self.resolve_lookup_value(sub_value, can_reuse, allow_joins)
for sub_value in value
)
type_ = type(value)
if hasattr(type_, "_make"): # namedtuple
return type_(*values)
return type_(values)
return value
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self.annotations:
expression, expression_lookups = refs_expression(
lookup_splitted, self.annotations
)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".'
% (lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, "_meta"):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.'
% (value, opts.object_name)
)
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (
isinstance(value, Query)
and not value.has_select_fields
and not check_rel_lookup_compatibility(value.model, opts, field)
):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".'
% (value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, "_meta"):
self.check_query_object_type(value, opts, field)
elif hasattr(value, "__iter__"):
for v in value:
self.check_query_object_type(v, opts, field)
def check_filterable(self, expression):
"""Raise an error if expression cannot be used in a WHERE clause."""
if hasattr(expression, "resolve_expression") and not getattr(
expression, "filterable", True
):
raise NotSupportedError(
expression.__class__.__name__ + " is disallowed in the filter "
"clause."
)
if hasattr(expression, "get_source_expressions"):
for expr in expression.get_source_expressions():
self.check_filterable(expr)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
*transforms, lookup_name = lookups or ["exact"]
for name in transforms:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
if lhs.field.is_relation:
raise FieldError(
"Related Field got invalid lookup: {}".format(lookup_name)
)
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookup_name)
lookup_name = "exact"
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value unless the lookup supports it.
if lookup.rhs is None and not lookup.can_use_none_as_rhs:
if lookup_name not in ("exact", "iexact"):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup("isnull")(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (
lookup_name == "exact"
and lookup.rhs == ""
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
):
return lhs.get_lookup("isnull")(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
output_field = lhs.output_field.__class__
suggested_lookups = difflib.get_close_matches(
name, output_field.get_lookups()
)
if suggested_lookups:
suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups)
else:
suggestion = "."
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted%s" % (name, output_field.__name__, suggestion)
)
def build_filter(
self,
filter_expr,
branch_negated=False,
current_negated=False,
can_reuse=None,
allow_joins=True,
split_subq=True,
check_filterable=True,
):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
if isinstance(filter_expr, Q):
return self._add_q(
filter_expr,
branch_negated=branch_negated,
current_negated=current_negated,
used_aliases=can_reuse,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
)
if hasattr(filter_expr, "resolve_expression"):
if not getattr(filter_expr, "conditional", False):
raise TypeError("Cannot filter against a non-conditional expression.")
condition = filter_expr.resolve_expression(self, allow_joins=allow_joins)
if not isinstance(condition, Lookup):
condition = self.build_lookup(["exact"], condition, True)
return WhereNode([condition], connector=AND), []
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if check_filterable:
self.check_filterable(reffed_expression)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
pre_joins = self.alias_refcount.copy()
value = self.resolve_lookup_value(value, can_reuse, allow_joins)
used_joins = {
k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)
}
if check_filterable:
self.check_filterable(value)
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
return WhereNode([condition], connector=AND), []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(
parts,
opts,
alias,
can_reuse=can_reuse,
allow_many=allow_many,
)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError(
"Related Field got invalid lookup: {}".format(lookups[0])
)
if len(targets) == 1:
col = self._get_col(targets[0], join_info.final_field, alias)
else:
col = MultiColSource(
alias, targets, join_info.targets, join_info.final_field
)
else:
col = self._get_col(targets[0], join_info.final_field, alias)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause = WhereNode([condition], connector=AND)
require_outer = (
lookup_type == "isnull" and condition.rhs is True and not current_negated
)
if (
current_negated
and (lookup_type != "isnull" or condition.rhs is False)
and condition.rhs is not None
):
require_outer = True
if lookup_type != "isnull":
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
if (
self.is_nullable(targets[0])
or self.alias_map[join_list[-1]].join_type == LOUTER
):
lookup_class = targets[0].get_lookup("isnull")
col = self._get_col(targets[0], join_info.targets[0], alias)
clause.add(lookup_class(col, False), AND)
# If someval is a nullable column, someval IS NOT NULL is
# added.
if isinstance(value, Col) and self.is_nullable(value.target):
lookup_class = value.target.get_lookup("isnull")
clause.add(lookup_class(value, False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_lhs, filter_rhs):
self.add_q(Q((filter_lhs, filter_rhs)))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {
a for a in self.alias_map if self.alias_map[a].join_type == INNER
}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def build_where(self, filter_expr):
return self.build_filter(filter_expr, allow_joins=False)[0]
def clear_where(self):
self.where = WhereNode()
def _add_q(
self,
q_object,
used_aliases,
branch_negated=False,
current_negated=False,
allow_joins=True,
split_subq=True,
check_filterable=True,
):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
joinpromoter = JoinPromoter(
q_object.connector, len(q_object.children), current_negated
)
for child in q_object.children:
child_clause, needed_inner = self.build_filter(
child,
can_reuse=used_aliases,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def build_filtered_relation_q(
self, q_object, reuse, branch_negated=False, current_negated=False
):
"""Add a FilteredRelation object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause = self.build_filtered_relation_q(
child,
reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
)
else:
child_clause, _ = self.build_filter(
child,
can_reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=True,
split_subq=False,
)
target_clause.add(child_clause, connector)
return target_clause
def add_filtered_relation(self, filtered_relation, alias):
filtered_relation.alias = alias
lookups = dict(get_children_from_q(filtered_relation.condition))
relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(
filtered_relation.relation_name
)
if relation_lookup_parts:
raise ValueError(
"FilteredRelation's relation_name cannot contain lookups "
"(got %r)." % filtered_relation.relation_name
)
for lookup in chain(lookups):
lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup)
shift = 2 if not lookup_parts else 1
lookup_field_path = lookup_field_parts[:-shift]
for idx, lookup_field_part in enumerate(lookup_field_path):
if len(relation_field_parts) > idx:
if relation_field_parts[idx] != lookup_field_part:
raise ValueError(
"FilteredRelation's condition doesn't support "
"relations outside the %r (got %r)."
% (filtered_relation.relation_name, lookup)
)
else:
raise ValueError(
"FilteredRelation's condition doesn't support nested "
"relations deeper than the relation_name (got %r for "
"%r)." % (lookup, filtered_relation.relation_name)
)
self._filtered_relations[filtered_relation.alias] = filtered_relation
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == "pk":
name = opts.pk.name
field = None
filtered_relation = None
try:
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif name in self._filtered_relations and pos == 0:
filtered_relation = self._filtered_relations[name]
if LOOKUP_SEP in filtered_relation.relation_name:
parts = filtered_relation.relation_name.split(LOOKUP_SEP)
filtered_relation_path, field, _, _ = self.names_to_path(
parts,
opts,
allow_many,
fail_on_missing,
)
path.extend(filtered_relation_path[:-1])
else:
field = opts.get_field(filtered_relation.relation_name)
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
available = sorted(
[
*get_field_names_from_opts(opts),
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available))
)
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, "path_infos"):
if filtered_relation:
pathinfos = field.get_path_info(filtered_relation)
else:
pathinfos = field.path_infos
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name)
)
break
return path, final_field, targets, names[pos + 1 :]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins, the
field path traveled to generate the joins, and a transform function
that takes a field and alias and is equivalent to `field.get_col(alias)`
in the simple case but wraps field transforms if they were included in
names.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# The transform can't be applied yet, as joins must be trimmed later.
# To avoid making every caller of this method look up transforms
# directly, compute transforms here and create a partial that converts
# fields to the appropriate wrapped version.
def final_transformer(field, alias):
if not self.alias_cols:
alias = None
return field.get_col(alias)
# Try resolving all the names as fields first. If there's an error,
# treat trailing names as lookups until a field can be resolved.
last_field_exception = None
for pivot in range(len(names), 0, -1):
try:
path, final_field, targets, rest = self.names_to_path(
names[:pivot],
opts,
allow_many,
fail_on_missing=True,
)
except FieldError as exc:
if pivot == 1:
# The first item cannot be a lookup, so it's safe
# to raise the field error here.
raise
else:
last_field_exception = exc
else:
# The transforms are the remaining items that couldn't be
# resolved into fields.
transforms = names[pivot:]
break
for name in transforms:
def transform(field, alias, *, name, previous):
try:
wrapped = previous(field, alias)
return self.try_transform(wrapped, name)
except FieldError:
# FieldError is raised if the transform doesn't exist.
if isinstance(final_field, Field) and last_field_exception:
raise last_field_exception
else:
raise
final_transformer = functools.partial(
transform, name=name, previous=final_transformer
)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
if join.filtered_relation:
filtered_relation = join.filtered_relation.clone()
table_alias = filtered_relation.alias
else:
filtered_relation = None
table_alias = None
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = self.join_class(
opts.db_table,
alias,
table_alias,
INNER,
join.join_field,
nullable,
filtered_relation=filtered_relation,
)
reuse = can_reuse if join.m2m else None
alias = self.join(connection, reuse=reuse)
joins.append(alias)
if filtered_relation:
filtered_relation.path = joins[:]
return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
if info.filtered_relation:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {
r[1].column: r[0]
for r in info.join_field.related_fields
if r[1].column in cur_targets
}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
@classmethod
def _gen_cols(cls, exprs, include_external=False):
for expr in exprs:
if isinstance(expr, Col):
yield expr
elif include_external and callable(
getattr(expr, "get_external_cols", None)
):
yield from expr.get_external_cols()
elif hasattr(expr, "get_source_expressions"):
yield from cls._gen_cols(
expr.get_source_expressions(),
include_external=include_external,
)
@classmethod
def _gen_col_aliases(cls, exprs):
yield from (expr.alias for expr in cls._gen_cols(exprs))
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
annotation = self.annotations.get(name)
if annotation is not None:
if not allow_joins:
for alias in self._gen_col_aliases([annotation]):
if isinstance(self.alias_map[alias], Join):
raise FieldError(
"Joined field references are not permitted in this query"
)
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
if name not in self.annotation_select:
raise FieldError(
"Cannot aggregate over the '%s' alias. Use annotate() "
"to promote it." % name
)
return Ref(name, self.annotation_select[name])
else:
return annotation
else:
field_list = name.split(LOOKUP_SEP)
annotation = self.annotations.get(field_list[0])
if annotation is not None:
for transform in field_list[1:]:
annotation = self.try_transform(annotation, transform)
return annotation
join_info = self.setup_joins(
field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse
)
targets, final_alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if not allow_joins and len(join_list) > 1:
raise FieldError(
"Joined field references are not permitted in this query"
)
if len(targets) > 1:
raise FieldError(
"Referencing multicolumn fields with F() objects isn't supported"
)
# Verify that the last lookup in name is a field or a transform:
# transform_function() raises FieldError if not.
transform = join_info.transform_function(targets[0], final_alias)
if reuse is not None:
reuse.update(join_list)
return transform
def split_exclude(self, filter_expr, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
For example, if the origin filter is ~Q(child__name='foo'), filter_expr
is ('child__name', 'foo') and can_reuse is a set of joins usable for
filters in the original query.
We will turn this into equivalent of:
WHERE NOT EXISTS(
SELECT 1
FROM child
WHERE name = 'foo' AND child.parent_id = parent.id
LIMIT 1
)
"""
# Generate the inner query.
query = self.__class__(self.model)
query._filtered_relations = self._filtered_relations
filter_lhs, filter_rhs = filter_expr
if isinstance(filter_rhs, OuterRef):
filter_rhs = OuterRef(filter_rhs)
elif isinstance(filter_rhs, F):
filter_rhs = OuterRef(filter_rhs.name)
query.add_filter(filter_lhs, filter_rhs)
query.clear_ordering(force=True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
col = query.select[0]
select_field = col.target
alias = col.alias
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup("exact")
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases[alias] = True
lookup_class = select_field.get_lookup("exact")
lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix))
query.where.add(lookup, AND)
condition, needed_inner = self.build_filter(Exists(query))
if contains_louter:
or_null_condition, _ = self.build_filter(
("%s__isnull" % trimmed_prefix, True),
current_negated=True,
branch_negated=True,
can_reuse=can_reuse,
)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
for query in self.combined_queries:
query.set_empty()
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
@property
def is_sliced(self):
return self.low_mark != 0 or self.high_mark is not None
def has_limit_one(self):
return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.is_sliced
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def add_select_col(self, col, name):
self.select += (col,)
self.values_select += (name,)
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m
)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(join_info.transform_function(target, final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
elif name in self.annotations:
raise FieldError(
"Cannot select the '%s' alias. Use annotate() to promote "
"it." % name
)
else:
names = sorted(
[
*get_field_names_from_opts(opts),
*self.extra,
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names))
)
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if isinstance(item, str):
if item == "?":
continue
if item.startswith("-"):
item = item[1:]
if item in self.annotations:
continue
if self.extra and item in self.extra:
continue
# names_to_path() validates the lookup. A descriptive
# FieldError will be raise if it's not.
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
elif not hasattr(item, "resolve_expression"):
errors.append(item)
if getattr(item, "contains_aggregate", False):
raise FieldError(
"Using an aggregate in order_by() without also including "
"it in annotate() is not allowed: %s" % item
)
if errors:
raise FieldError("Invalid order_by arguments: %s" % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force=False, clear_default=True):
"""
Remove any ordering settings if the current query allows it without
side effects, set 'force' to True to clear the ordering regardless.
If 'clear_default' is True, there will be no ordering in the resulting
query (not even the model's default).
"""
if not force and (
self.is_sliced or self.distinct_fields or self.select_for_update
):
return
self.order_by = ()
self.extra_order_by = ()
if clear_default:
self.default_ordering = False
def set_group_by(self, allow_aliases=True):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
# Column names from JOINs to check collisions with aliases.
if allow_aliases:
column_names = set()
seen_models = set()
for join in list(self.alias_map.values())[1:]: # Skip base table.
model = join.join_field.related_model
if model not in seen_models:
column_names.update(
{field.column for field in model._meta.local_concrete_fields}
)
seen_models.add(model)
group_by = list(self.select)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
if not allow_aliases or alias in column_names:
alias = None
group_by_cols = annotation.get_group_by_cols(alias=alias)
group_by.extend(group_by_cols)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = {}
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = str(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != "%":
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
if new_existing := existing.difference(field_names):
self.deferred_loading = new_existing, False
else:
self.clear_deferred_loading()
if new_only := set(field_names).difference(existing):
self.deferred_loading = new_only, True
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if "pk" in field_names:
field_names.remove("pk")
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, return a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, return an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""Callback used by get_deferred_field_names()."""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(self.annotation_select_mask.union(names))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self.extra and not self.annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
selected = frozenset(field_names + extra_names + annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
selected = frozenset(field_names)
# Selected annotations must be known before setting the GROUP BY
# clause.
if self.group_by is True:
self.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
self.set_group_by(allow_aliases=False)
self.clear_select_fields()
elif self.group_by:
# Resolve GROUP BY annotation references if they are not part of
# the selected fields anymore.
group_by = []
for expr in self.group_by:
if isinstance(expr, Ref) and expr.refs not in selected:
expr = self.annotations[expr.refs]
group_by.append(expr)
self.group_by = tuple(group_by)
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the dictionary of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self.annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = {
k: v
for k, v in self.annotations.items()
if k in self.annotation_select_mask
}
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self.extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = {
k: v for k, v in self.extra.items() if k in self.extra_select_mask
}
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for:
# - LEFT JOINs because we would miss those rows that have nothing on
# the outer side,
# - INNER JOINs from filtered relations because we would miss their
# filters.
first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]
if first_join.join_type != LOUTER and not first_join.filtered_relation:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
None, lookup_tables[trimmed_paths + 1]
)
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a join_class instead of a
# base_table_class reference. But the first entry in the query's FROM
# clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = self.base_table_class(
self.alias_map[table].table_name,
table,
)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
return field.null or (
field.empty_strings_allowed
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
)
def get_order_dir(field, default="ASC"):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == "-":
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
Add "value" to the set of values for "key", whether or not "key" already
exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
Check if the given field is reverse-o2o. The field is expected to be some
sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def __repr__(self):
return (
f"{self.__class__.__qualname__}(connector={self.connector!r}, "
f"num_children={self.num_children!r}, negated={self.negated!r})"
)
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == "OR" and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == "AND" or (
self.effective_connector == "OR" and votes == self.num_children
):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
|
efc9f818a9c081341adb1727d500f20eb108427bd01aa0e93f09fd5e84a44494 | """
Code to manage the creation and SQL rendering of 'where' constraints.
"""
import operator
from functools import reduce
from django.core.exceptions import EmptyResultSet
from django.db.models.expressions import Case, When
from django.db.models.lookups import Exact
from django.utils import tree
from django.utils.functional import cached_property
# Connection types
AND = "AND"
OR = "OR"
XOR = "XOR"
class WhereNode(tree.Node):
"""
An SQL WHERE clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
resolved = False
conditional = True
def split_having(self, negated=False):
"""
Return two possibly None nodes: one for those parts of self that
should be included in the WHERE clause and one for those parts of
self that must be included in the HAVING clause.
"""
if not self.contains_aggregate:
return self, None
in_negated = negated ^ self.negated
# If the effective connector is OR or XOR and this node contains an
# aggregate, then we need to push the whole branch to HAVING clause.
may_need_split = (
(in_negated and self.connector == AND)
or (not in_negated and self.connector == OR)
or self.connector == XOR
)
if may_need_split and self.contains_aggregate:
return None, self
where_parts = []
having_parts = []
for c in self.children:
if hasattr(c, "split_having"):
where_part, having_part = c.split_having(in_negated)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
having_node = (
self.__class__(having_parts, self.connector, self.negated)
if having_parts
else None
)
where_node = (
self.__class__(where_parts, self.connector, self.negated)
if where_parts
else None
)
return where_node, having_node
def as_sql(self, compiler, connection):
"""
Return the SQL version of the where clause and the value to be
substituted in. Return '', [] if this node matches everything,
None, [] if this node is empty, and raise EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
if self.connector == XOR and not connection.features.supports_logical_xor:
# Convert if the database doesn't support XOR:
# a XOR b XOR c XOR ...
# to:
# (a OR b OR c OR ...) AND (a + b + c + ...) == 1
lhs = self.__class__(self.children, OR)
rhs_sum = reduce(
operator.add,
(Case(When(c, then=1), default=0) for c in self.children),
)
rhs = Exact(1, rhs_sum)
return self.__class__([lhs, rhs], AND, self.negated).as_sql(
compiler, connection
)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
return "", []
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
return "", []
conn = " %s " % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
# Some backends (Oracle at least) need parentheses
# around the inner SQL in the negated case, even if the
# inner SQL contains just a single expression.
sql_string = "NOT (%s)" % sql_string
elif len(result) > 1 or self.resolved:
sql_string = "(%s)" % sql_string
return sql_string, result_params
def get_group_by_cols(self, alias=None):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def get_source_expressions(self):
return self.children[:]
def set_source_expressions(self, children):
assert len(children) == len(self.children)
self.children = children
def relabel_aliases(self, change_map):
"""
Relabel the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, "relabel_aliases"):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, "relabeled_clone"):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
"""
Create a clone of the tree. Must only be called on root nodes (nodes
with empty subtree_parents). Childs must be either (Constraint, lookup,
value) tuples, or objects supporting .clone().
"""
clone = self.__class__._new_instance(
children=None,
connector=self.connector,
negated=self.negated,
)
for child in self.children:
if hasattr(child, "clone"):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
def copy(self):
return self.clone()
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
@classmethod
def _contains_over_clause(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_over_clause(c) for c in obj.children)
return obj.contains_over_clause
@cached_property
def contains_over_clause(self):
return self._contains_over_clause(self)
@staticmethod
def _resolve_leaf(expr, query, *args, **kwargs):
if hasattr(expr, "resolve_expression"):
expr = expr.resolve_expression(query, *args, **kwargs)
return expr
@classmethod
def _resolve_node(cls, node, query, *args, **kwargs):
if hasattr(node, "children"):
for child in node.children:
cls._resolve_node(child, query, *args, **kwargs)
if hasattr(node, "lhs"):
node.lhs = cls._resolve_leaf(node.lhs, query, *args, **kwargs)
if hasattr(node, "rhs"):
node.rhs = cls._resolve_leaf(node.rhs, query, *args, **kwargs)
def resolve_expression(self, *args, **kwargs):
clone = self.clone()
clone._resolve_node(clone, *args, **kwargs)
clone.resolved = True
return clone
@cached_property
def output_field(self):
from django.db.models import BooleanField
return BooleanField()
def select_format(self, compiler, sql, params):
# Wrap filters with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
return sql, params
def get_db_converters(self, connection):
return self.output_field.get_db_converters(connection)
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
class NothingNode:
"""A node that matches nothing."""
contains_aggregate = False
def as_sql(self, compiler=None, connection=None):
raise EmptyResultSet
class ExtraWhere:
# The contents are a black box - assume no aggregates are used.
contains_aggregate = False
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, compiler=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class SubqueryConstraint:
# Even if aggregates would be used in a subquery, the outer query isn't
# interested about those.
contains_aggregate = False
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
query_object.clear_ordering(clear_default=True)
self.query_object = query_object
def as_sql(self, compiler, connection):
query = self.query_object
query.set_values(self.targets)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
|
0466751929f4ddd4ce3bc3d8c7abc5d7e22613783591e66c43be001c9a10c264 | from django.db.models.sql.query import * # NOQA
from django.db.models.sql.query import Query
from django.db.models.sql.subqueries import * # NOQA
from django.db.models.sql.where import AND, OR, XOR
__all__ = ["Query", "AND", "OR", "XOR"]
|
fd13fdce1dc10ff295a907a082de1bc0c5b61cdd2489f8fe027ed0d6d01b8e2d | from django.db import ProgrammingError
from django.utils.functional import cached_property
class BaseDatabaseFeatures:
# An optional tuple indicating the minimum supported database version.
minimum_database_version = None
gis_enabled = False
# Oracle can't group by LOB (large object) data types.
allows_group_by_lob = True
allows_group_by_pk = False
allows_group_by_selected_pks = False
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate NULL rows in a nullable
# unique field? All core backends implement this correctly, but other
# databases such as SQL Server do not.
supports_nullable_unique_constraints = True
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists and some fields are nullable but not all of them?
supports_partially_nullable_unique_constraints = True
# Does the backend support initially deferrable unique constraints?
supports_deferrable_unique_constraints = False
can_use_chunked_reads = True
can_return_columns_from_insert = False
can_return_rows_from_bulk_insert = False
has_bulk_insert = True
uses_savepoints = True
can_release_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries_with_in = True
has_select_for_update = False
has_select_for_update_nowait = False
has_select_for_update_skip_locked = False
has_select_for_update_of = False
has_select_for_no_key_update = False
# Does the database's SELECT FOR UPDATE OF syntax require a column rather
# than a table?
select_for_update_of_column = False
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does the backend truncate names properly when they are too long?
truncates_names = False
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
# Does the backend ignore unnecessary ORDER BY clauses in subqueries?
ignores_unnecessary_order_by_in_subqueries = True
# Is there a true datatype for uuid?
has_native_uuid_field = False
# Is there a true datatype for timedeltas?
has_native_duration_field = False
# Does the database driver supports same type temporal data subtraction
# by returning the type used to store duration field?
supports_temporal_subtraction = False
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Does the backend order NULL values as largest or smallest?
nulls_order_largest = False
# Does the backend support NULLS FIRST and NULLS LAST in ORDER BY?
supports_order_by_nulls_modifier = True
# Does the backend orders NULLS FIRST by default?
order_by_nulls_first = False
# The database's limit on the number of query parameters.
max_query_params = None
# Can an object have an autoincrement primary key of 0?
allows_auto_pk_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Can the backend introspect the default value of a column?
can_introspect_default = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Map fields which some backends may not be able to differentiate to the
# field it's introspected as.
introspected_field_types = {
"AutoField": "AutoField",
"BigAutoField": "BigAutoField",
"BigIntegerField": "BigIntegerField",
"BinaryField": "BinaryField",
"BooleanField": "BooleanField",
"CharField": "CharField",
"DurationField": "DurationField",
"GenericIPAddressField": "GenericIPAddressField",
"IntegerField": "IntegerField",
"PositiveBigIntegerField": "PositiveBigIntegerField",
"PositiveIntegerField": "PositiveIntegerField",
"PositiveSmallIntegerField": "PositiveSmallIntegerField",
"SmallAutoField": "SmallAutoField",
"SmallIntegerField": "SmallIntegerField",
"TimeField": "TimeField",
}
# Can the backend introspect the column order (ASC/DESC) for indexes?
supports_index_column_ordering = True
# Does the backend support introspection of materialized views?
can_introspect_materialized_views = False
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Does it support operations requiring references rename in a transaction?
supports_atomic_references_rename = True
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# Does it support foreign keys?
supports_foreign_keys = True
# Can it create foreign key constraints inline when adding columns?
can_create_inline_fk = True
# Does it automatically index foreign keys?
indexes_foreign_keys = True
# Does it support CHECK constraints?
supports_column_check_constraints = True
supports_table_check_constraints = True
# Does the backend support introspection of CHECK constraints?
can_introspect_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterized ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
# Does 'a' LIKE 'A' match?
has_case_insensitive_like = False
# Suffix for backends that don't support "SELECT xxx;" queries.
bare_select_suffix = ""
# If NULL is implied on columns without needing to be explicitly specified
implied_column_null = False
# Does the backend support "select for update" queries with limit (and offset)?
supports_select_for_update_with_limit = True
# Does the backend ignore null expressions in GREATEST and LEAST queries unless
# every expression is null?
greatest_least_ignores_nulls = False
# Can the backend clone databases for parallel test execution?
# Defaults to False to allow third-party backends to opt-in.
can_clone_databases = False
# Does the backend consider table names with different casing to
# be equal?
ignores_table_name_case = False
# Place FOR UPDATE right after FROM clause. Used on MSSQL.
for_update_after_from = False
# Combinatorial flags
supports_select_union = True
supports_select_intersection = True
supports_select_difference = True
supports_slicing_ordering_in_compound = False
supports_parentheses_in_compound = True
# Does the database support SQL 2003 FILTER (WHERE ...) in aggregate
# expressions?
supports_aggregate_filter_clause = False
# Does the backend support indexing a TextField?
supports_index_on_text_field = True
# Does the backend support window expressions (expression OVER (...))?
supports_over_clause = False
supports_frame_range_fixed_distance = False
only_supports_unbounded_with_preceding_and_following = False
# Does the backend support CAST with precision?
supports_cast_with_precision = True
# How many second decimals does the database return when casting a value to
# a type with time?
time_cast_precision = 6
# SQL to create a procedure for use by the Django test suite. The
# functionality of the procedure isn't important.
create_test_procedure_without_params_sql = None
create_test_procedure_with_int_param_sql = None
# Does the backend support keyword parameters for cursor.callproc()?
supports_callproc_kwargs = False
# What formats does the backend EXPLAIN syntax support?
supported_explain_formats = set()
# Does DatabaseOperations.explain_query_prefix() raise ValueError if
# unknown kwargs are passed to QuerySet.explain()?
validates_explain_options = True
# Does the backend support the default parameter in lead() and lag()?
supports_default_in_lead_lag = True
# Does the backend support ignoring constraint or uniqueness errors during
# INSERT?
supports_ignore_conflicts = True
# Does the backend support updating rows on constraint or uniqueness errors
# during INSERT?
supports_update_conflicts = False
supports_update_conflicts_with_target = False
# Does this backend require casting the results of CASE expressions used
# in UPDATE statements to ensure the expression has the correct type?
requires_casted_case_in_updates = False
# Does the backend support partial indexes (CREATE INDEX ... WHERE ...)?
supports_partial_indexes = True
supports_functions_in_partial_indexes = True
# Does the backend support covering indexes (CREATE INDEX ... INCLUDE ...)?
supports_covering_indexes = False
# Does the backend support indexes on expressions?
supports_expression_indexes = True
# Does the backend treat COLLATE as an indexed expression?
collate_as_index_expression = False
# Does the database allow more than one constraint or index on the same
# field(s)?
allows_multiple_constraints_on_same_fields = True
# Does the backend support boolean expressions in SELECT and GROUP BY
# clauses?
supports_boolean_expr_in_select_clause = True
# Does the backend support JSONField?
supports_json_field = True
# Can the backend introspect a JSONField?
can_introspect_json_field = True
# Does the backend support primitives in JSONField?
supports_primitives_in_json_field = True
# Is there a true datatype for JSON?
has_native_json_field = False
# Does the backend use PostgreSQL-style JSON operators like '->'?
has_json_operators = False
# Does the backend support __contains and __contained_by lookups for
# a JSONField?
supports_json_field_contains = True
# Does value__d__contains={'f': 'g'} (without a list around the dict) match
# {'d': [{'f': 'g'}]}?
json_key_contains_list_matching_requires_list = False
# Does the backend support JSONObject() database function?
has_json_object_function = True
# Does the backend support column collations?
supports_collation_on_charfield = True
supports_collation_on_textfield = True
# Does the backend support non-deterministic collations?
supports_non_deterministic_collations = True
# Does the backend support the logical XOR operator?
supports_logical_xor = False
# Collation names for use by the Django test suite.
test_collations = {
"ci": None, # Case-insensitive.
"cs": None, # Case-sensitive.
"non_default": None, # Non-default.
"swedish_ci": None, # Swedish case-insensitive.
}
# SQL template override for tests.aggregation.tests.NowUTC
test_now_utc_template = None
# A set of dotted paths to tests in Django's test suite that are expected
# to fail on this database.
django_test_expected_failures = set()
# A map of reasons to sets of dotted paths to tests in Django's test suite
# that should be skipped for this database.
django_test_skips = {}
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_explaining_query_execution(self):
"""Does this backend support explaining query execution?"""
return self.connection.ops.explain_prefix is not None
@cached_property
def supports_transactions(self):
"""Confirm support for transactions."""
with self.connection.cursor() as cursor:
cursor.execute("CREATE TABLE ROLLBACK_TEST (X INT)")
self.connection.set_autocommit(False)
cursor.execute("INSERT INTO ROLLBACK_TEST (X) VALUES (8)")
self.connection.rollback()
self.connection.set_autocommit(True)
cursor.execute("SELECT COUNT(X) FROM ROLLBACK_TEST")
(count,) = cursor.fetchone()
cursor.execute("DROP TABLE ROLLBACK_TEST")
return count == 0
def allows_group_by_selected_pks_on_model(self, model):
if not self.allows_group_by_selected_pks:
return False
return model._meta.managed
|
2a606957d3588262b9b05da68cd52e19571cdf1a32413680aa9bffa01aa97506 | import os
import sys
from io import StringIO
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.db import router
from django.db.transaction import atomic
from django.utils.module_loading import import_string
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = "test_"
class BaseDatabaseCreation:
"""
Encapsulate backend-specific differences pertaining to creation and
destruction of the test database.
"""
def __init__(self, connection):
self.connection = connection
def _nodb_cursor(self):
return self.connection._nodb_cursor()
def log(self, msg):
sys.stderr.write(msg + os.linesep)
def create_test_db(
self, verbosity=1, autoclobber=False, serialize=True, keepdb=False
):
"""
Create a test database, prompting the user for confirmation if the
database already exists. Return the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
action = "Creating"
if keepdb:
action = "Using existing"
self.log(
"%s test database for alias %s..."
% (
action,
self._get_database_display_str(verbosity, test_database_name),
)
)
# We could skip this call if keepdb is True, but we instead
# give it the keepdb param. This is to handle the case
# where the test DB doesn't exist, in which case we need to
# create it, then just not destroy it. If we instead skip
# this, we will get an exception.
self._create_test_db(verbosity, autoclobber, keepdb)
self.connection.close()
settings.DATABASES[self.connection.alias]["NAME"] = test_database_name
self.connection.settings_dict["NAME"] = test_database_name
try:
if self.connection.settings_dict["TEST"]["MIGRATE"] is False:
# Disable migrations for all apps.
old_migration_modules = settings.MIGRATION_MODULES
settings.MIGRATION_MODULES = {
app.label: None for app in apps.get_app_configs()
}
# We report migrate messages at one level lower than that
# requested. This ensures we don't get flooded with messages during
# testing (unless you really ask to be flooded).
call_command(
"migrate",
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
run_syncdb=True,
)
finally:
if self.connection.settings_dict["TEST"]["MIGRATE"] is False:
settings.MIGRATION_MODULES = old_migration_modules
# We then serialize the current state of the database into a string
# and store it on the connection. This slightly horrific process is so people
# who are testing on databases without transactions or who are using
# a TransactionTestCase still get a clean database on every test run.
if serialize:
self.connection._test_serialized_contents = self.serialize_db_to_string()
call_command("createcachetable", database=self.connection.alias)
# Ensure a connection for the side effect of initializing the test database.
self.connection.ensure_connection()
if os.environ.get("RUNNING_DJANGOS_TEST_SUITE") == "true":
self.mark_expected_failures_and_skips()
return test_database_name
def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary
database whose settings are given.
"""
self.connection.settings_dict["NAME"] = primary_settings_dict["NAME"]
def serialize_db_to_string(self):
"""
Serialize all data in the database into a JSON string.
Designed only for test runner usage; will not handle large
amounts of data.
"""
# Iteratively return every object for all models to serialize.
def get_objects():
from django.db.migrations.loader import MigrationLoader
loader = MigrationLoader(self.connection)
for app_config in apps.get_app_configs():
if (
app_config.models_module is not None
and app_config.label in loader.migrated_apps
and app_config.name not in settings.TEST_NON_SERIALIZED_APPS
):
for model in app_config.get_models():
if model._meta.can_migrate(
self.connection
) and router.allow_migrate_model(self.connection.alias, model):
queryset = model._base_manager.using(
self.connection.alias,
).order_by(model._meta.pk.name)
yield from queryset.iterator()
# Serialize to a string
out = StringIO()
serializers.serialize("json", get_objects(), indent=None, stream=out)
return out.getvalue()
def deserialize_db_from_string(self, data):
"""
Reload the database with data from a string generated by
the serialize_db_to_string() method.
"""
data = StringIO(data)
table_names = set()
# Load data in a transaction to handle forward references and cycles.
with atomic(using=self.connection.alias):
# Disable constraint checks, because some databases (MySQL) doesn't
# support deferred checks.
with self.connection.constraint_checks_disabled():
for obj in serializers.deserialize(
"json", data, using=self.connection.alias
):
obj.save()
table_names.add(obj.object.__class__._meta.db_table)
# Manually check for any invalid keys that might have been added,
# because constraint checks were disabled.
self.connection.check_constraints(table_names=table_names)
def _get_database_display_str(self, verbosity, database_name):
"""
Return display string for a database for use in various actions.
"""
return "'%s'%s" % (
self.connection.alias,
(" ('%s')" % database_name) if verbosity >= 2 else "",
)
def _get_test_db_name(self):
"""
Internal implementation - return the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
settings.
"""
if self.connection.settings_dict["TEST"]["NAME"]:
return self.connection.settings_dict["TEST"]["NAME"]
return TEST_DATABASE_PREFIX + self.connection.settings_dict["NAME"]
def _execute_create_test_db(self, cursor, parameters, keepdb=False):
cursor.execute("CREATE DATABASE %(dbname)s %(suffix)s" % parameters)
def _create_test_db(self, verbosity, autoclobber, keepdb=False):
"""
Internal implementation - create the test db tables.
"""
test_database_name = self._get_test_db_name()
test_db_params = {
"dbname": self.connection.ops.quote_name(test_database_name),
"suffix": self.sql_table_creation_suffix(),
}
# Create the test database and connect to it.
with self._nodb_cursor() as cursor:
try:
self._execute_create_test_db(cursor, test_db_params, keepdb)
except Exception as e:
# if we want to keep the db, then no need to do any of the below,
# just return and skip it all.
if keepdb:
return test_database_name
self.log("Got an error creating the test database: %s" % e)
if not autoclobber:
confirm = input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name
)
if autoclobber or confirm == "yes":
try:
if verbosity >= 1:
self.log(
"Destroying old test database for alias %s..."
% (
self._get_database_display_str(
verbosity, test_database_name
),
)
)
cursor.execute("DROP DATABASE %(dbname)s" % test_db_params)
self._execute_create_test_db(cursor, test_db_params, keepdb)
except Exception as e:
self.log("Got an error recreating the test database: %s" % e)
sys.exit(2)
else:
self.log("Tests cancelled.")
sys.exit(1)
return test_database_name
def clone_test_db(self, suffix, verbosity=1, autoclobber=False, keepdb=False):
"""
Clone a test database.
"""
source_database_name = self.connection.settings_dict["NAME"]
if verbosity >= 1:
action = "Cloning test database"
if keepdb:
action = "Using existing clone"
self.log(
"%s for alias %s..."
% (
action,
self._get_database_display_str(verbosity, source_database_name),
)
)
# We could skip this call if keepdb is True, but we instead
# give it the keepdb param. See create_test_db for details.
self._clone_test_db(suffix, verbosity, keepdb)
def get_test_db_clone_settings(self, suffix):
"""
Return a modified connection settings dict for the n-th clone of a DB.
"""
# When this function is called, the test database has been created
# already and its name has been copied to settings_dict['NAME'] so
# we don't need to call _get_test_db_name.
orig_settings_dict = self.connection.settings_dict
return {
**orig_settings_dict,
"NAME": "{}_{}".format(orig_settings_dict["NAME"], suffix),
}
def _clone_test_db(self, suffix, verbosity, keepdb=False):
"""
Internal implementation - duplicate the test db tables.
"""
raise NotImplementedError(
"The database backend doesn't support cloning databases. "
"Disable the option to run tests in parallel processes."
)
def destroy_test_db(
self, old_database_name=None, verbosity=1, keepdb=False, suffix=None
):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists.
"""
self.connection.close()
if suffix is None:
test_database_name = self.connection.settings_dict["NAME"]
else:
test_database_name = self.get_test_db_clone_settings(suffix)["NAME"]
if verbosity >= 1:
action = "Destroying"
if keepdb:
action = "Preserving"
self.log(
"%s test database for alias %s..."
% (
action,
self._get_database_display_str(verbosity, test_database_name),
)
)
# if we want to preserve the database
# skip the actual destroying piece.
if not keepdb:
self._destroy_test_db(test_database_name, verbosity)
# Restore the original database name
if old_database_name is not None:
settings.DATABASES[self.connection.alias]["NAME"] = old_database_name
self.connection.settings_dict["NAME"] = old_database_name
def _destroy_test_db(self, test_database_name, verbosity):
"""
Internal implementation - remove the test db tables.
"""
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
with self._nodb_cursor() as cursor:
cursor.execute(
"DROP DATABASE %s" % self.connection.ops.quote_name(test_database_name)
)
def mark_expected_failures_and_skips(self):
"""
Mark tests in Django's test suite which are expected failures on this
database and test which should be skipped on this database.
"""
# Only load unittest if we're actually testing.
from unittest import expectedFailure, skip
for test_name in self.connection.features.django_test_expected_failures:
test_case_name, _, test_method_name = test_name.rpartition(".")
test_app = test_name.split(".")[0]
# Importing a test app that isn't installed raises RuntimeError.
if test_app in settings.INSTALLED_APPS:
test_case = import_string(test_case_name)
test_method = getattr(test_case, test_method_name)
setattr(test_case, test_method_name, expectedFailure(test_method))
for reason, tests in self.connection.features.django_test_skips.items():
for test_name in tests:
test_case_name, _, test_method_name = test_name.rpartition(".")
test_app = test_name.split(".")[0]
# Importing a test app that isn't installed raises RuntimeError.
if test_app in settings.INSTALLED_APPS:
test_case = import_string(test_case_name)
test_method = getattr(test_case, test_method_name)
setattr(test_case, test_method_name, skip(reason)(test_method))
def sql_table_creation_suffix(self):
"""
SQL to append to the end of the test table creation statements.
"""
return ""
def test_db_signature(self):
"""
Return a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict["HOST"],
settings_dict["PORT"],
settings_dict["ENGINE"],
self._get_test_db_name(),
)
def setup_worker_connection(self, _worker_id):
settings_dict = self.get_test_db_clone_settings(str(_worker_id))
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
self.connection.settings_dict.update(settings_dict)
self.connection.close()
|
a1f545af96c125729585c136f4a717aae45f4d034167a1471bebe894688d1d18 | import operator
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
allows_group_by_pk = True
related_fields_match_type = True
# MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME.
allow_sliced_subqueries_with_in = False
has_select_for_update = True
supports_forward_references = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
can_release_savepoints = True
atomic_transactions = False
can_clone_databases = True
supports_temporal_subtraction = True
supports_select_intersection = False
supports_select_difference = False
supports_slicing_ordering_in_compound = True
supports_index_on_text_field = False
supports_update_conflicts = True
create_test_procedure_without_params_sql = """
CREATE PROCEDURE test_procedure ()
BEGIN
DECLARE V_I INTEGER;
SET V_I = 1;
END;
"""
create_test_procedure_with_int_param_sql = """
CREATE PROCEDURE test_procedure (P_I INTEGER)
BEGIN
DECLARE V_I INTEGER;
SET V_I = P_I;
END;
"""
# Neither MySQL nor MariaDB support partial indexes.
supports_partial_indexes = False
# COLLATE must be wrapped in parentheses because MySQL treats COLLATE as an
# indexed expression.
collate_as_index_expression = True
supports_order_by_nulls_modifier = False
order_by_nulls_first = True
supports_logical_xor = True
@cached_property
def minimum_database_version(self):
if self.connection.mysql_is_mariadb:
return (10, 2)
else:
return (5, 7)
@cached_property
def test_collations(self):
charset = "utf8"
if self.connection.mysql_is_mariadb and self.connection.mysql_version >= (
10,
6,
):
# utf8 is an alias for utf8mb3 in MariaDB 10.6+.
charset = "utf8mb3"
return {
"ci": f"{charset}_general_ci",
"non_default": f"{charset}_esperanto_ci",
"swedish_ci": f"{charset}_swedish_ci",
}
test_now_utc_template = "UTC_TIMESTAMP"
@cached_property
def django_test_skips(self):
skips = {
"This doesn't work on MySQL.": {
"db_functions.comparison.test_greatest.GreatestTests."
"test_coalesce_workaround",
"db_functions.comparison.test_least.LeastTests."
"test_coalesce_workaround",
},
"Running on MySQL requires utf8mb4 encoding (#18392).": {
"model_fields.test_textfield.TextFieldTests.test_emoji",
"model_fields.test_charfield.TestCharField.test_emoji",
},
"MySQL doesn't support functional indexes on a function that "
"returns JSON": {
"schema.tests.SchemaTests.test_func_index_json_key_transform",
},
"MySQL supports multiplying and dividing DurationFields by a "
"scalar value but it's not implemented (#25287).": {
"expressions.tests.FTimeDeltaTests.test_durationfield_multiply_divide",
},
}
if "ONLY_FULL_GROUP_BY" in self.connection.sql_mode:
skips.update(
{
"GROUP BY optimization does not work properly when "
"ONLY_FULL_GROUP_BY mode is enabled on MySQL, see #31331.": {
"aggregation.tests.AggregateTestCase."
"test_aggregation_subquery_annotation_multivalued",
"annotations.tests.NonAggregateAnnotationTestCase."
"test_annotation_aggregate_with_m2o",
},
}
)
if not self.connection.mysql_is_mariadb and self.connection.mysql_version < (
8,
):
skips.update(
{
"Casting to datetime/time is not supported by MySQL < 8.0. "
"(#30224)": {
"aggregation.tests.AggregateTestCase."
"test_aggregation_default_using_time_from_python",
"aggregation.tests.AggregateTestCase."
"test_aggregation_default_using_datetime_from_python",
},
"MySQL < 8.0 returns string type instead of datetime/time. "
"(#30224)": {
"aggregation.tests.AggregateTestCase."
"test_aggregation_default_using_time_from_database",
"aggregation.tests.AggregateTestCase."
"test_aggregation_default_using_datetime_from_database",
},
}
)
if self.connection.mysql_is_mariadb and (
10,
4,
3,
) < self.connection.mysql_version < (10, 5, 2):
skips.update(
{
"https://jira.mariadb.org/browse/MDEV-19598": {
"schema.tests.SchemaTests."
"test_alter_not_unique_field_to_primary_key",
},
}
)
if self.connection.mysql_is_mariadb and (
10,
4,
12,
) < self.connection.mysql_version < (10, 5):
skips.update(
{
"https://jira.mariadb.org/browse/MDEV-22775": {
"schema.tests.SchemaTests."
"test_alter_pk_with_self_referential_field",
},
}
)
if not self.supports_explain_analyze:
skips.update(
{
"MariaDB and MySQL >= 8.0.18 specific.": {
"queries.test_explain.ExplainTests.test_mysql_analyze",
},
}
)
return skips
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
return self.connection.mysql_server_data["default_storage_engine"]
@cached_property
def allows_auto_pk_0(self):
"""
Autoincrement primary key can be set to 0 if it doesn't generate new
autoincrement values.
"""
return "NO_AUTO_VALUE_ON_ZERO" in self.connection.sql_mode
@cached_property
def update_can_self_select(self):
return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (
10,
3,
2,
)
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != "MyISAM"
@cached_property
def introspected_field_types(self):
return {
**super().introspected_field_types,
"BinaryField": "TextField",
"BooleanField": "IntegerField",
"DurationField": "BigIntegerField",
"GenericIPAddressField": "CharField",
}
@cached_property
def can_return_columns_from_insert(self):
return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (
10,
5,
0,
)
can_return_rows_from_bulk_insert = property(
operator.attrgetter("can_return_columns_from_insert")
)
@cached_property
def has_zoneinfo_database(self):
return self.connection.mysql_server_data["has_zoneinfo_database"]
@cached_property
def is_sql_auto_is_null_enabled(self):
return self.connection.mysql_server_data["sql_auto_is_null"]
@cached_property
def supports_over_clause(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 2)
supports_frame_range_fixed_distance = property(
operator.attrgetter("supports_over_clause")
)
@cached_property
def supports_column_check_constraints(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 16)
supports_table_check_constraints = property(
operator.attrgetter("supports_column_check_constraints")
)
@cached_property
def can_introspect_check_constraints(self):
if self.connection.mysql_is_mariadb:
version = self.connection.mysql_version
return version >= (10, 3, 10)
return self.connection.mysql_version >= (8, 0, 16)
@cached_property
def has_select_for_update_skip_locked(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 6)
return self.connection.mysql_version >= (8, 0, 1)
@cached_property
def has_select_for_update_nowait(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 1)
@cached_property
def has_select_for_update_of(self):
return (
not self.connection.mysql_is_mariadb
and self.connection.mysql_version >= (8, 0, 1)
)
@cached_property
def supports_explain_analyze(self):
return self.connection.mysql_is_mariadb or self.connection.mysql_version >= (
8,
0,
18,
)
@cached_property
def supported_explain_formats(self):
# Alias MySQL's TRADITIONAL to TEXT for consistency with other
# backends.
formats = {"JSON", "TEXT", "TRADITIONAL"}
if not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (
8,
0,
16,
):
formats.add("TREE")
return formats
@cached_property
def supports_transactions(self):
"""
All storage engines except MyISAM support transactions.
"""
return self._mysql_storage_engine != "MyISAM"
@cached_property
def ignores_table_name_case(self):
return self.connection.mysql_server_data["lower_case_table_names"]
@cached_property
def supports_default_in_lead_lag(self):
# To be added in https://jira.mariadb.org/browse/MDEV-12981.
return not self.connection.mysql_is_mariadb
@cached_property
def supports_json_field(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (5, 7, 8)
@cached_property
def can_introspect_json_field(self):
if self.connection.mysql_is_mariadb:
return self.supports_json_field and self.can_introspect_check_constraints
return self.supports_json_field
@cached_property
def supports_index_column_ordering(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 8)
return self.connection.mysql_version >= (8, 0, 1)
@cached_property
def supports_expression_indexes(self):
return (
not self.connection.mysql_is_mariadb
and self.connection.mysql_version >= (8, 0, 13)
)
|
c44f2263f1868e74e1e9367772d6ed45d13839a1cc9d5fb34e300288ad461945 | """
Implementations of SQL functions for SQLite.
"""
import functools
import random
import statistics
from datetime import timedelta
from hashlib import sha1, sha224, sha256, sha384, sha512
from math import (
acos,
asin,
atan,
atan2,
ceil,
cos,
degrees,
exp,
floor,
fmod,
log,
pi,
radians,
sin,
sqrt,
tan,
)
from re import search as re_search
from django.db.backends.base.base import timezone_constructor
from django.db.backends.utils import (
split_tzname_delta,
typecast_time,
typecast_timestamp,
)
from django.utils import timezone
from django.utils.crypto import md5
from django.utils.duration import duration_microseconds
def register(connection):
create_deterministic_function = functools.partial(
connection.create_function,
deterministic=True,
)
create_deterministic_function("django_date_extract", 2, _sqlite_datetime_extract)
create_deterministic_function("django_date_trunc", 4, _sqlite_date_trunc)
create_deterministic_function(
"django_datetime_cast_date", 3, _sqlite_datetime_cast_date
)
create_deterministic_function(
"django_datetime_cast_time", 3, _sqlite_datetime_cast_time
)
create_deterministic_function(
"django_datetime_extract", 4, _sqlite_datetime_extract
)
create_deterministic_function("django_datetime_trunc", 4, _sqlite_datetime_trunc)
create_deterministic_function("django_time_extract", 2, _sqlite_time_extract)
create_deterministic_function("django_time_trunc", 4, _sqlite_time_trunc)
create_deterministic_function("django_time_diff", 2, _sqlite_time_diff)
create_deterministic_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
create_deterministic_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
create_deterministic_function("regexp", 2, _sqlite_regexp)
create_deterministic_function("BITXOR", 2, _sqlite_bitxor)
create_deterministic_function("COT", 1, _sqlite_cot)
create_deterministic_function("LPAD", 3, _sqlite_lpad)
create_deterministic_function("MD5", 1, _sqlite_md5)
create_deterministic_function("REPEAT", 2, _sqlite_repeat)
create_deterministic_function("REVERSE", 1, _sqlite_reverse)
create_deterministic_function("RPAD", 3, _sqlite_rpad)
create_deterministic_function("SHA1", 1, _sqlite_sha1)
create_deterministic_function("SHA224", 1, _sqlite_sha224)
create_deterministic_function("SHA256", 1, _sqlite_sha256)
create_deterministic_function("SHA384", 1, _sqlite_sha384)
create_deterministic_function("SHA512", 1, _sqlite_sha512)
create_deterministic_function("SIGN", 1, _sqlite_sign)
# Don't use the built-in RANDOM() function because it returns a value
# in the range [-1 * 2^63, 2^63 - 1] instead of [0, 1).
connection.create_function("RAND", 0, random.random)
connection.create_aggregate("STDDEV_POP", 1, StdDevPop)
connection.create_aggregate("STDDEV_SAMP", 1, StdDevSamp)
connection.create_aggregate("VAR_POP", 1, VarPop)
connection.create_aggregate("VAR_SAMP", 1, VarSamp)
# Some math functions are enabled by default in SQLite 3.35+.
sql = "select sqlite_compileoption_used('ENABLE_MATH_FUNCTIONS')"
if not connection.execute(sql).fetchone()[0]:
create_deterministic_function("ACOS", 1, _sqlite_acos)
create_deterministic_function("ASIN", 1, _sqlite_asin)
create_deterministic_function("ATAN", 1, _sqlite_atan)
create_deterministic_function("ATAN2", 2, _sqlite_atan2)
create_deterministic_function("CEILING", 1, _sqlite_ceiling)
create_deterministic_function("COS", 1, _sqlite_cos)
create_deterministic_function("DEGREES", 1, _sqlite_degrees)
create_deterministic_function("EXP", 1, _sqlite_exp)
create_deterministic_function("FLOOR", 1, _sqlite_floor)
create_deterministic_function("LN", 1, _sqlite_ln)
create_deterministic_function("LOG", 2, _sqlite_log)
create_deterministic_function("MOD", 2, _sqlite_mod)
create_deterministic_function("PI", 0, _sqlite_pi)
create_deterministic_function("POWER", 2, _sqlite_power)
create_deterministic_function("RADIANS", 1, _sqlite_radians)
create_deterministic_function("SIN", 1, _sqlite_sin)
create_deterministic_function("SQRT", 1, _sqlite_sqrt)
create_deterministic_function("TAN", 1, _sqlite_tan)
def _sqlite_datetime_parse(dt, tzname=None, conn_tzname=None):
if dt is None:
return None
try:
dt = typecast_timestamp(dt)
except (TypeError, ValueError):
return None
if conn_tzname:
dt = dt.replace(tzinfo=timezone_constructor(conn_tzname))
if tzname is not None and tzname != conn_tzname:
tzname, sign, offset = split_tzname_delta(tzname)
if offset:
hours, minutes = offset.split(":")
offset_delta = timedelta(hours=int(hours), minutes=int(minutes))
dt += offset_delta if sign == "+" else -offset_delta
dt = timezone.localtime(dt, timezone_constructor(tzname))
return dt
def _sqlite_date_trunc(lookup_type, dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
if lookup_type == "year":
return f"{dt.year:04d}-01-01"
elif lookup_type == "quarter":
month_in_quarter = dt.month - (dt.month - 1) % 3
return f"{dt.year:04d}-{month_in_quarter:02d}-01"
elif lookup_type == "month":
return f"{dt.year:04d}-{dt.month:02d}-01"
elif lookup_type == "week":
dt = dt - timedelta(days=dt.weekday())
return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d}"
elif lookup_type == "day":
return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d}"
raise ValueError(f"Unsupported lookup type: {lookup_type!r}")
def _sqlite_time_trunc(lookup_type, dt, tzname, conn_tzname):
if dt is None:
return None
dt_parsed = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt_parsed is None:
try:
dt = typecast_time(dt)
except (ValueError, TypeError):
return None
else:
dt = dt_parsed
if lookup_type == "hour":
return f"{dt.hour:02d}:00:00"
elif lookup_type == "minute":
return f"{dt.hour:02d}:{dt.minute:02d}:00"
elif lookup_type == "second":
return f"{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}"
raise ValueError(f"Unsupported lookup type: {lookup_type!r}")
def _sqlite_datetime_cast_date(dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname=None, conn_tzname=None):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
if lookup_type == "week_day":
return (dt.isoweekday() % 7) + 1
elif lookup_type == "iso_week_day":
return dt.isoweekday()
elif lookup_type == "week":
return dt.isocalendar()[1]
elif lookup_type == "quarter":
return ceil(dt.month / 3)
elif lookup_type == "iso_year":
return dt.isocalendar()[0]
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname, conn_tzname):
dt = _sqlite_datetime_parse(dt, tzname, conn_tzname)
if dt is None:
return None
if lookup_type == "year":
return f"{dt.year:04d}-01-01 00:00:00"
elif lookup_type == "quarter":
month_in_quarter = dt.month - (dt.month - 1) % 3
return f"{dt.year:04d}-{month_in_quarter:02d}-01 00:00:00"
elif lookup_type == "month":
return f"{dt.year:04d}-{dt.month:02d}-01 00:00:00"
elif lookup_type == "week":
dt = dt - timedelta(days=dt.weekday())
return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} 00:00:00"
elif lookup_type == "day":
return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} 00:00:00"
elif lookup_type == "hour":
return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} {dt.hour:02d}:00:00"
elif lookup_type == "minute":
return (
f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} "
f"{dt.hour:02d}:{dt.minute:02d}:00"
)
elif lookup_type == "second":
return (
f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} "
f"{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}"
)
raise ValueError(f"Unsupported lookup type: {lookup_type!r}")
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_prepare_dtdelta_param(conn, param):
if conn in ["+", "-"]:
if isinstance(param, int):
return timedelta(0, 0, param)
else:
return typecast_timestamp(param)
return param
def _sqlite_format_dtdelta(connector, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a datetime
- A scalar value, e.g. float
"""
if connector is None or lhs is None or rhs is None:
return None
connector = connector.strip()
try:
real_lhs = _sqlite_prepare_dtdelta_param(connector, lhs)
real_rhs = _sqlite_prepare_dtdelta_param(connector, rhs)
except (ValueError, TypeError):
return None
if connector == "+":
# typecast_timestamp() returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
out = str(real_lhs + real_rhs)
elif connector == "-":
out = str(real_lhs - real_rhs)
elif connector == "*":
out = real_lhs * real_rhs
else:
out = real_lhs / real_rhs
return out
def _sqlite_time_diff(lhs, rhs):
if lhs is None or rhs is None:
return None
left = typecast_time(lhs)
right = typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000)
+ (left.minute * 60 * 1000000)
+ (left.second * 1000000)
+ (left.microsecond)
- (right.hour * 60 * 60 * 1000000)
- (right.minute * 60 * 1000000)
- (right.second * 1000000)
- (right.microsecond)
)
def _sqlite_timestamp_diff(lhs, rhs):
if lhs is None or rhs is None:
return None
left = typecast_timestamp(lhs)
right = typecast_timestamp(rhs)
return duration_microseconds(left - right)
def _sqlite_regexp(pattern, string):
if pattern is None or string is None:
return None
if not isinstance(string, str):
string = str(string)
return bool(re_search(pattern, string))
def _sqlite_acos(x):
if x is None:
return None
return acos(x)
def _sqlite_asin(x):
if x is None:
return None
return asin(x)
def _sqlite_atan(x):
if x is None:
return None
return atan(x)
def _sqlite_atan2(y, x):
if y is None or x is None:
return None
return atan2(y, x)
def _sqlite_bitxor(x, y):
if x is None or y is None:
return None
return x ^ y
def _sqlite_ceiling(x):
if x is None:
return None
return ceil(x)
def _sqlite_cos(x):
if x is None:
return None
return cos(x)
def _sqlite_cot(x):
if x is None:
return None
return 1 / tan(x)
def _sqlite_degrees(x):
if x is None:
return None
return degrees(x)
def _sqlite_exp(x):
if x is None:
return None
return exp(x)
def _sqlite_floor(x):
if x is None:
return None
return floor(x)
def _sqlite_ln(x):
if x is None:
return None
return log(x)
def _sqlite_log(base, x):
if base is None or x is None:
return None
# Arguments reversed to match SQL standard.
return log(x, base)
def _sqlite_lpad(text, length, fill_text):
if text is None or length is None or fill_text is None:
return None
delta = length - len(text)
if delta <= 0:
return text[:length]
return (fill_text * length)[:delta] + text
def _sqlite_md5(text):
if text is None:
return None
return md5(text.encode()).hexdigest()
def _sqlite_mod(x, y):
if x is None or y is None:
return None
return fmod(x, y)
def _sqlite_pi():
return pi
def _sqlite_power(x, y):
if x is None or y is None:
return None
return x**y
def _sqlite_radians(x):
if x is None:
return None
return radians(x)
def _sqlite_repeat(text, count):
if text is None or count is None:
return None
return text * count
def _sqlite_reverse(text):
if text is None:
return None
return text[::-1]
def _sqlite_rpad(text, length, fill_text):
if text is None or length is None or fill_text is None:
return None
return (text + fill_text * length)[:length]
def _sqlite_sha1(text):
if text is None:
return None
return sha1(text.encode()).hexdigest()
def _sqlite_sha224(text):
if text is None:
return None
return sha224(text.encode()).hexdigest()
def _sqlite_sha256(text):
if text is None:
return None
return sha256(text.encode()).hexdigest()
def _sqlite_sha384(text):
if text is None:
return None
return sha384(text.encode()).hexdigest()
def _sqlite_sha512(text):
if text is None:
return None
return sha512(text.encode()).hexdigest()
def _sqlite_sign(x):
if x is None:
return None
return (x > 0) - (x < 0)
def _sqlite_sin(x):
if x is None:
return None
return sin(x)
def _sqlite_sqrt(x):
if x is None:
return None
return sqrt(x)
def _sqlite_tan(x):
if x is None:
return None
return tan(x)
class ListAggregate(list):
step = list.append
class StdDevPop(ListAggregate):
finalize = statistics.pstdev
class StdDevSamp(ListAggregate):
finalize = statistics.stdev
class VarPop(ListAggregate):
finalize = statistics.pvariance
class VarSamp(ListAggregate):
finalize = statistics.variance
|
8f2c6d960bd5674d9f97e2fb3db1a74db2a63222b4a6afcd6cf3cc6c67f5cd17 | import warnings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.db.models import Exists, OuterRef, Q
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.inspect import func_supports_parameter
UserModel = get_user_model()
class BaseBackend:
def authenticate(self, request, **kwargs):
return None
def get_user(self, user_id):
return None
def get_user_permissions(self, user_obj, obj=None):
return set()
def get_group_permissions(self, user_obj, obj=None):
return set()
def get_all_permissions(self, user_obj, obj=None):
return {
*self.get_user_permissions(user_obj, obj=obj),
*self.get_group_permissions(user_obj, obj=obj),
}
def has_perm(self, user_obj, perm, obj=None):
return perm in self.get_all_permissions(user_obj, obj=obj)
class ModelBackend(BaseBackend):
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
def authenticate(self, request, username=None, password=None, **kwargs):
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
if username is None or password is None:
return
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
# Run the default password hasher once to reduce the timing
# difference between an existing and a nonexistent user (#20760).
UserModel().set_password(password)
else:
if user.check_password(password) and self.user_can_authenticate(user):
return user
def user_can_authenticate(self, user):
"""
Reject users with is_active=False. Custom user models that don't have
that attribute are allowed.
"""
is_active = getattr(user, "is_active", None)
return is_active or is_active is None
def _get_user_permissions(self, user_obj):
return user_obj.user_permissions.all()
def _get_group_permissions(self, user_obj):
user_groups_field = get_user_model()._meta.get_field("groups")
user_groups_query = "group__%s" % user_groups_field.related_query_name()
return Permission.objects.filter(**{user_groups_query: user_obj})
def _get_permissions(self, user_obj, obj, from_name):
"""
Return the permissions of `user_obj` from `from_name`. `from_name` can
be either "group" or "user" to return permissions from
`_get_group_permissions` or `_get_user_permissions` respectively.
"""
if not user_obj.is_active or user_obj.is_anonymous or obj is not None:
return set()
perm_cache_name = "_%s_perm_cache" % from_name
if not hasattr(user_obj, perm_cache_name):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = getattr(self, "_get_%s_permissions" % from_name)(user_obj)
perms = perms.values_list("content_type__app_label", "codename").order_by()
setattr(
user_obj, perm_cache_name, {"%s.%s" % (ct, name) for ct, name in perms}
)
return getattr(user_obj, perm_cache_name)
def get_user_permissions(self, user_obj, obj=None):
"""
Return a set of permission strings the user `user_obj` has from their
`user_permissions`.
"""
return self._get_permissions(user_obj, obj, "user")
def get_group_permissions(self, user_obj, obj=None):
"""
Return a set of permission strings the user `user_obj` has from the
groups they belong.
"""
return self._get_permissions(user_obj, obj, "group")
def get_all_permissions(self, user_obj, obj=None):
if not user_obj.is_active or user_obj.is_anonymous or obj is not None:
return set()
if not hasattr(user_obj, "_perm_cache"):
user_obj._perm_cache = super().get_all_permissions(user_obj)
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
return user_obj.is_active and super().has_perm(user_obj, perm, obj=obj)
def has_module_perms(self, user_obj, app_label):
"""
Return True if user_obj has any permissions in the given app_label.
"""
return user_obj.is_active and any(
perm[: perm.index(".")] == app_label
for perm in self.get_all_permissions(user_obj)
)
def with_perm(self, perm, is_active=True, include_superusers=True, obj=None):
"""
Return users that have permission "perm". By default, filter out
inactive users and include superusers.
"""
if isinstance(perm, str):
try:
app_label, codename = perm.split(".")
except ValueError:
raise ValueError(
"Permission name should be in the form "
"app_label.permission_codename."
)
elif not isinstance(perm, Permission):
raise TypeError(
"The `perm` argument must be a string or a permission instance."
)
if obj is not None:
return UserModel._default_manager.none()
permission_q = Q(group__user=OuterRef("pk")) | Q(user=OuterRef("pk"))
if isinstance(perm, Permission):
permission_q &= Q(pk=perm.pk)
else:
permission_q &= Q(codename=codename, content_type__app_label=app_label)
user_q = Exists(Permission.objects.filter(permission_q))
if include_superusers:
user_q |= Q(is_superuser=True)
if is_active is not None:
user_q &= Q(is_active=is_active)
return UserModel._default_manager.filter(user_q)
def get_user(self, user_id):
try:
user = UserModel._default_manager.get(pk=user_id)
except UserModel.DoesNotExist:
return None
return user if self.user_can_authenticate(user) else None
class AllowAllUsersModelBackend(ModelBackend):
def user_can_authenticate(self, user):
return True
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, request, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. Return
the ``User`` object with the given username. Create a new ``User``
object if ``create_unknown_user`` is ``True``.
Return None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
created = False
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = UserModel._default_manager.get_or_create(
**{UserModel.USERNAME_FIELD: username}
)
else:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
pass
# RemovedInDjango50Warning: When the deprecation ends, replace with:
# user = self.configure_user(request, user, created=created)
if func_supports_parameter(self.configure_user, "created"):
user = self.configure_user(request, user, created=created)
else:
warnings.warn(
f"`created=True` must be added to the signature of "
f"{self.__class__.__qualname__}.configure_user().",
category=RemovedInDjango50Warning,
)
if created:
user = self.configure_user(request, user)
return user if self.user_can_authenticate(user) else None
def clean_username(self, username):
"""
Perform any cleaning on the "username" prior to using it to get or
create the user object. Return the cleaned username.
By default, return the username unchanged.
"""
return username
def configure_user(self, request, user, created=True):
"""
Configure a user and return the updated user.
By default, return the user unmodified.
"""
return user
class AllowAllUsersRemoteUserBackend(RemoteUserBackend):
def user_can_authenticate(self, user):
return True
|
f3d4558dc78e5bcc5323ca11e0dd02aec379317ffc3c46d94ff01ae9d76219d1 | import copy
import json
import re
from functools import partial, update_wrapper
from urllib.parse import quote as urlquote
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks,
InlineModelAdminChecks,
ModelAdminChecks,
)
from django.contrib.admin.decorators import display
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects,
construct_change_message,
flatten_fieldsets,
get_deleted_objects,
lookup_spawns_duplicates,
model_format_dict,
model_ngettext,
quote,
unquote,
)
from django.contrib.admin.widgets import AutocompleteSelect, AutocompleteSelectMultiple
from django.contrib.auth import get_permission_codename
from django.core.exceptions import (
FieldDoesNotExist,
FieldError,
PermissionDenied,
ValidationError,
)
from django.core.paginator import Paginator
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet,
inlineformset_factory,
modelform_defines_fields,
modelform_factory,
modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.html import format_html
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import (
capfirst,
format_lazy,
get_text_list,
smart_split,
unescape_string_literal,
)
from django.utils.translation import gettext as _
from django.utils.translation import ngettext
from django.views.decorators.csrf import csrf_protect
from django.views.generic import RedirectView
IS_POPUP_VAR = "_popup"
TO_FIELD_VAR = "_to_field"
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return "radiolist" if radio_style == VERTICAL else "radiolist inline"
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
"form_class": forms.SplitDateTimeField,
"widget": widgets.AdminSplitDateTime,
},
models.DateField: {"widget": widgets.AdminDateWidget},
models.TimeField: {"widget": widgets.AdminTimeWidget},
models.TextField: {"widget": widgets.AdminTextareaWidget},
models.URLField: {"widget": widgets.AdminURLFieldWidget},
models.IntegerField: {"widget": widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {"widget": widgets.AdminBigIntegerFieldWidget},
models.CharField: {"widget": widgets.AdminTextInputWidget},
models.ImageField: {"widget": widgets.AdminFileWidget},
models.FileField: {"widget": widgets.AdminFileWidget},
models.EmailField: {"widget": widgets.AdminEmailInputWidget},
models.UUIDField: {"widget": widgets.AdminUUIDInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(metaclass=forms.MediaDefiningClass):
"""Functionality common to both ModelAdmin and InlineAdmin."""
autocomplete_fields = ()
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
sortable_by = None
view_on_site = True
show_full_result_count = True
checks_class = BaseModelAdminChecks
def check(self, **kwargs):
return self.checks_class().check(self, **kwargs)
def __init__(self):
# Merge FORMFIELD_FOR_DBFIELD_DEFAULTS with the formfield_overrides
# rather than simply overwriting.
overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS)
for k, v in self.formfield_overrides.items():
overrides.setdefault(k, {}).update(v)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, request, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = {**self.formfield_overrides[db_field.__class__], **kwargs}
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(
db_field.remote_field.model
)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(
request
),
can_delete_related=related_modeladmin.has_delete_permission(
request
),
can_view_related=related_modeladmin.has_view_permission(
request
),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget,
db_field.remote_field,
self.admin_site,
**wrapper_kwargs,
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = {**copy.deepcopy(self.formfield_overrides[klass]), **kwargs}
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if "widget" not in kwargs:
kwargs["widget"] = widgets.AdminRadioSelect(
attrs={
"class": get_ul_class(self.radio_fields[db_field.name]),
}
)
if "choices" not in kwargs:
kwargs["choices"] = db_field.get_choices(
include_blank=db_field.blank, blank_choice=[("", _("None"))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(return None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.remote_field.model)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.remote_field.model._default_manager.using(db).order_by(
*ordering
)
return None
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get("using")
if "widget" not in kwargs:
if db_field.name in self.get_autocomplete_fields(request):
kwargs["widget"] = AutocompleteSelect(
db_field, self.admin_site, using=db
)
elif db_field.name in self.raw_id_fields:
kwargs["widget"] = widgets.ForeignKeyRawIdWidget(
db_field.remote_field, self.admin_site, using=db
)
elif db_field.name in self.radio_fields:
kwargs["widget"] = widgets.AdminRadioSelect(
attrs={
"class": get_ul_class(self.radio_fields[db_field.name]),
}
)
kwargs["empty_label"] = (
kwargs.get("empty_label", _("None")) if db_field.blank else None
)
if "queryset" not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs["queryset"] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get("using")
if "widget" not in kwargs:
autocomplete_fields = self.get_autocomplete_fields(request)
if db_field.name in autocomplete_fields:
kwargs["widget"] = AutocompleteSelectMultiple(
db_field,
self.admin_site,
using=db,
)
elif db_field.name in self.raw_id_fields:
kwargs["widget"] = widgets.ManyToManyRawIdWidget(
db_field.remote_field,
self.admin_site,
using=db,
)
elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]:
kwargs["widget"] = widgets.FilteredSelectMultiple(
db_field.verbose_name, db_field.name in self.filter_vertical
)
if "queryset" not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs["queryset"] = queryset
form_field = db_field.formfield(**kwargs)
if isinstance(form_field.widget, SelectMultiple) and not isinstance(
form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple)
):
msg = _(
"Hold down “Control”, or “Command” on a Mac, to select more than one."
)
help_text = form_field.help_text
form_field.help_text = (
format_lazy("{} {}", help_text, msg) if help_text else msg
)
return form_field
def get_autocomplete_fields(self, request):
"""
Return a list of ForeignKey and/or ManyToMany fields which should use
an autocomplete widget.
"""
return self.autocomplete_fields
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif hasattr(obj, "get_absolute_url"):
# use the ContentType lookup if view_on_site is True
return reverse(
"admin:view_on_site",
kwargs={
"content_type_id": get_content_type_for_model(obj).pk,
"object_id": obj.pk,
},
)
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin or AdminSite.
"""
try:
return mark_safe(self.empty_value_display)
except AttributeError:
return mark_safe(self.admin_site.empty_value_display)
def get_exclude(self, request, obj=None):
"""
Hook for specifying exclude.
"""
return self.exclude
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
if self.fields:
return self.fields
# _get_form_for_get_fields() is implemented in subclasses.
form = self._get_form_for_get_fields(request, obj)
return [*form.base_fields, *self.get_readonly_fields(request, obj)]
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
if self.fieldsets:
return self.fieldsets
return [(None, {"fields": self.get_fields(request, obj)})]
def get_inlines(self, request, obj):
"""Hook for specifying custom inlines."""
return self.inlines
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Return a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def get_sortable_by(self, request):
"""Hook for specifying which fields can be sorted in the changelist."""
return (
self.sortable_by
if self.sortable_by is not None
else self.get_list_display(request)
)
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for fk_lookup in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(fk_lookup):
fk_lookup = fk_lookup()
if (lookup, value) in widgets.url_params_from_lookup_dict(
fk_lookup
).items():
return True
relation_parts = []
prev_field = None
for part in lookup.split(LOOKUP_SEP):
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on nonexistent fields are ok, since they're ignored
# later.
break
# It is allowed to filter on values that would be found from local
# model anyways. For example, if you filter on employee__department__id,
# then the id value would be found already from employee__department_id.
if not prev_field or (
prev_field.is_relation
and field not in prev_field.path_infos[-1].target_fields
):
relation_parts.append(part)
if not getattr(field, "path_infos", None):
# This is not a relational field, so further parts
# must be transforms.
break
prev_field = field
model = field.path_infos[-1].to_opts.model
if len(relation_parts) <= 1:
# Either a local field filter, or no fields at all.
return True
valid_lookups = {self.date_hierarchy}
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(
filter_item, SimpleListFilter
):
valid_lookups.add(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.add(filter_item[0])
else:
valid_lookups.add(filter_item)
# Is it a valid relational lookup?
return not {
LOOKUP_SEP.join(relation_parts),
LOOKUP_SEP.join(relation_parts + [part]),
}.isdisjoint(valid_lookups)
def to_field_allowed(self, request, to_field):
"""
Return True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f
for f in opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
remote_field = related_object.field.remote_field
if (
any(issubclass(model, related_model) for model in registered_models)
and hasattr(remote_field, "get_related_field")
and remote_field.get_related_field() == field
):
return True
return False
def has_add_permission(self, request):
"""
Return True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename("add", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Return True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename("change", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Return True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename("delete", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_view_permission(self, request, obj=None):
"""
Return True if the given request has permission to view the given
Django model instance. The default implementation doesn't examine the
`obj` parameter.
If overridden by the user in subclasses, it should return True if the
given request has permission to view the `obj` model instance. If `obj`
is None, it should return True if the request has permission to view
any object of the given type.
"""
opts = self.opts
codename_view = get_permission_codename("view", opts)
codename_change = get_permission_codename("change", opts)
return request.user.has_perm(
"%s.%s" % (opts.app_label, codename_view)
) or request.user.has_perm("%s.%s" % (opts.app_label, codename_change))
def has_view_or_change_permission(self, request, obj=None):
return self.has_view_permission(request, obj) or self.has_change_permission(
request, obj
)
def has_module_permission(self, request):
"""
Return True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
class ModelAdmin(BaseModelAdmin):
"""Encapsulate all admin options and functionality for a given model."""
list_display = ("__str__",)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
search_help_text = None
date_hierarchy = None
save_as = False
save_as_continue = True
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = ()
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
popup_response_template = None
# Actions
actions = ()
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super().__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def __repr__(self):
return (
f"<{self.__class__.__qualname__}: model={self.model.__qualname__} "
f"site={self.admin_site!r}>"
)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.get_inlines(request, obj):
inline = inline_class(self.model, self.admin_site)
if request:
if not (
inline.has_view_or_change_permission(request, obj)
or inline.has_add_permission(request, obj)
or inline.has_delete_permission(request, obj)
):
continue
if not inline.has_add_permission(request, obj):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.urls import path
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
return [
path("", wrap(self.changelist_view), name="%s_%s_changelist" % info),
path("add/", wrap(self.add_view), name="%s_%s_add" % info),
path(
"<path:object_id>/history/",
wrap(self.history_view),
name="%s_%s_history" % info,
),
path(
"<path:object_id>/delete/",
wrap(self.delete_view),
name="%s_%s_delete" % info,
),
path(
"<path:object_id>/change/",
wrap(self.change_view),
name="%s_%s_change" % info,
),
# For backwards compatibility (was the change url before 1.9)
path(
"<path:object_id>/",
wrap(
RedirectView.as_view(
pattern_name="%s:%s_%s_change"
% ((self.admin_site.name,) + info)
)
),
),
]
@property
def urls(self):
return self.get_urls()
@property
def media(self):
extra = "" if settings.DEBUG else ".min"
js = [
"vendor/jquery/jquery%s.js" % extra,
"jquery.init.js",
"core.js",
"admin/RelatedObjectLookups.js",
"actions.js",
"urlify.js",
"prepopulate.js",
"vendor/xregexp/xregexp%s.js" % extra,
]
return forms.Media(js=["admin/js/%s" % url for url in js])
def get_model_perms(self, request):
"""
Return a dict of all perms for this model. This dict has the keys
``add``, ``change``, ``delete``, and ``view`` mapping to the True/False
for each of those actions.
"""
return {
"add": self.has_add_permission(request),
"change": self.has_change_permission(request),
"delete": self.has_delete_permission(request),
"view": self.has_view_permission(request),
}
def _get_form_for_get_fields(self, request, obj):
return self.get_form(request, obj, fields=None)
def get_form(self, request, obj=None, change=False, **kwargs):
"""
Return a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if "fields" in kwargs:
fields = kwargs.pop("fields")
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
readonly_fields = self.get_readonly_fields(request, obj)
exclude.extend(readonly_fields)
# Exclude all fields if it's a change form and the user doesn't have
# the change permission.
if (
change
and hasattr(request, "user")
and not self.has_change_permission(request, obj)
):
exclude.extend(fields)
if excluded is None and hasattr(self.form, "_meta") and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
# Remove declared form fields which are in readonly_fields.
new_attrs = dict.fromkeys(
f for f in readonly_fields if f in self.form.declared_fields
)
form = type(self.form.__name__, (self.form,), new_attrs)
defaults = {
"form": form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
if defaults["fields"] is None and not modelform_defines_fields(
defaults["form"]
):
defaults["fields"] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError(
"%s. Check fields/fieldsets/exclude attributes of class %s."
% (e, self.__class__.__name__)
)
def get_changelist(self, request, **kwargs):
"""
Return the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_changelist_instance(self, request):
"""
Return a `ChangeList` instance based on `request`. May raise
`IncorrectLookupParameters`.
"""
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
# Add the action checkboxes if any actions are available.
if self.get_actions(request):
list_display = ["action_checkbox", *list_display]
sortable_by = self.get_sortable_by(request)
ChangeList = self.get_changelist(request)
return ChangeList(
request,
self.model,
list_display,
list_display_links,
self.get_list_filter(request),
self.date_hierarchy,
self.get_search_fields(request),
self.get_list_select_related(request),
self.list_per_page,
self.list_max_show_all,
self.list_editable,
self,
sortable_by,
self.search_help_text,
)
def get_object(self, request, object_id, from_field=None):
"""
Return an instance matching the field and value provided, the primary
key is used if no field is provided. Return ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = (
model._meta.pk if from_field is None else model._meta.get_field(from_field)
)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Return a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
if defaults.get("fields") is None and not modelform_defines_fields(
defaults.get("form")
):
defaults["fields"] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Return a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
**kwargs,
}
return modelformset_factory(
self.model,
self.get_changelist_form(request),
extra=0,
fields=self.list_editable,
**defaults,
)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yield formsets and the corresponding inlines.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(
self, request, queryset, per_page, orphans=0, allow_empty_first_page=True
):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, obj, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import ADDITION, LogEntry
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj).pk,
object_id=obj.pk,
object_repr=str(obj),
action_flag=ADDITION,
change_message=message,
)
def log_change(self, request, obj, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import CHANGE, LogEntry
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj).pk,
object_id=obj.pk,
object_repr=str(obj),
action_flag=CHANGE,
change_message=message,
)
def log_deletion(self, request, obj, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import DELETION, LogEntry
return LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(obj).pk,
object_id=obj.pk,
object_repr=object_repr,
action_flag=DELETION,
)
@display(description=mark_safe('<input type="checkbox" id="action-toggle">'))
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, str(obj.pk))
@staticmethod
def _get_action_description(func, name):
return getattr(func, "short_description", capfirst(name.replace("_", " ")))
def _get_base_actions(self):
"""Return the list of actions, prior to any request-based filtering."""
actions = []
base_actions = (self.get_action(action) for action in self.actions or [])
# get_action might have returned None, so filter any of those out.
base_actions = [action for action in base_actions if action]
base_action_names = {name for _, name, _ in base_actions}
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
if name in base_action_names:
continue
description = self._get_action_description(func, name)
actions.append((func, name, description))
# Add actions from this ModelAdmin.
actions.extend(base_actions)
return actions
def _filter_actions_by_permissions(self, request, actions):
"""Filter out any actions that the user doesn't have access to."""
filtered_actions = []
for action in actions:
callable = action[0]
if not hasattr(callable, "allowed_permissions"):
filtered_actions.append(action)
continue
permission_checks = (
getattr(self, "has_%s_permission" % permission)
for permission in callable.allowed_permissions
)
if any(has_permission(request) for has_permission in permission_checks):
filtered_actions.append(action)
return filtered_actions
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is set to None that means actions are disabled on
# this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return {}
actions = self._filter_actions_by_permissions(request, self._get_base_actions())
return {name: (func, name, desc) for func, name, desc in actions}
def get_action_choices(self, request, default_choices=models.BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in self.get_actions(request).values():
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
description = self._get_action_description(func, action)
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if (
self.list_display_links
or self.list_display_links is None
or not list_display
):
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Return a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_list_select_related(self, request):
"""
Return a list of fields to add to the select_related() part of the
changelist items query.
"""
return self.list_select_related
def get_search_fields(self, request):
"""
Return a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Return a tuple containing a queryset to implement the search
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith("^"):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith("="):
return "%s__iexact" % field_name[1:]
elif field_name.startswith("@"):
return "%s__search" % field_name[1:]
# Use field_name if it includes a lookup.
opts = queryset.model._meta
lookup_fields = field_name.split(LOOKUP_SEP)
# Go through the fields, following all relations.
prev_field = None
for path_part in lookup_fields:
if path_part == "pk":
path_part = opts.pk.name
try:
field = opts.get_field(path_part)
except FieldDoesNotExist:
# Use valid query lookups.
if prev_field and prev_field.get_lookup(path_part):
return field_name
else:
prev_field = field
if hasattr(field, "path_infos"):
# Update opts to follow the relation.
opts = field.path_infos[-1].to_opts
# Otherwise, use the field with icontains.
return "%s__icontains" % field_name
may_have_duplicates = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [
construct_search(str(search_field)) for search_field in search_fields
]
term_queries = []
for bit in smart_split(search_term):
if bit.startswith(('"', "'")) and bit[0] == bit[-1]:
bit = unescape_string_literal(bit)
or_queries = models.Q(
*((orm_lookup, bit) for orm_lookup in orm_lookups),
_connector=models.Q.OR,
)
term_queries.append(or_queries)
queryset = queryset.filter(models.Q(*term_queries))
may_have_duplicates |= any(
lookup_spawns_duplicates(self.opts, search_spec)
for search_spec in orm_lookups
)
return queryset, may_have_duplicates
def get_preserved_filters(self, request):
"""
Return the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = "%s:%s" % (match.app_name, match.url_name)
changelist_url = "admin:%s_%s_changelist" % (
opts.app_label,
opts.model_name,
)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get("_changelist_filters")
if preserved_filters:
return urlencode({"_changelist_filters": preserved_filters})
return ""
def construct_change_message(self, request, form, formsets, add=False):
"""
Construct a JSON structure describing changes from a changed object.
"""
return construct_change_message(form, formsets, add)
def message_user(
self, request, message, level=messages.INFO, extra_tags="", fail_silently=False
):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ", ".join("`%s`" % level for level in levels)
raise ValueError(
"Bad message level string: `%s`. Possible values are: %s"
% (level, levels_repr)
)
messages.add_message(
request, level, message, extra_tags=extra_tags, fail_silently=fail_silently
)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def delete_queryset(self, request, queryset):
"""Given a queryset, delete it from the database."""
queryset.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(
self, request, context, add=False, change=False, form_url="", obj=None
):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, form_url
)
view_on_site_url = self.get_view_on_site_url(obj)
has_editable_inline_admin_formsets = False
for inline in context["inline_admin_formsets"]:
if (
inline.has_add_permission
or inline.has_change_permission
or inline.has_delete_permission
):
has_editable_inline_admin_formsets = True
break
context.update(
{
"add": add,
"change": change,
"has_view_permission": self.has_view_permission(request, obj),
"has_add_permission": self.has_add_permission(request),
"has_change_permission": self.has_change_permission(request, obj),
"has_delete_permission": self.has_delete_permission(request, obj),
"has_editable_inline_admin_formsets": (
has_editable_inline_admin_formsets
),
"has_file_field": context["adminform"].form.is_multipart()
or any(
admin_formset.formset.is_multipart()
for admin_formset in context["inline_admin_formsets"]
),
"has_absolute_url": view_on_site_url is not None,
"absolute_url": view_on_site_url,
"form_url": form_url,
"opts": opts,
"content_type_id": get_content_type_for_model(self.model).pk,
"save_as": self.save_as,
"save_on_top": self.save_on_top,
"to_field_var": TO_FIELD_VAR,
"is_popup_var": IS_POPUP_VAR,
"app_label": app_label,
}
)
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(
request,
form_template
or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html",
],
context,
)
def response_add(self, request, obj, post_url_continue=None):
"""
Determine the HttpResponse for the add_view stage.
"""
opts = obj._meta
preserved_filters = self.get_preserved_filters(request)
obj_url = reverse(
"admin:%s_%s_change" % (opts.app_label, opts.model_name),
args=(quote(obj.pk),),
current_app=self.admin_site.name,
)
# Add a link to the object's change form if the user can edit the obj.
if self.has_change_permission(request, obj):
obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj)
else:
obj_repr = str(obj)
msg_dict = {
"name": opts.verbose_name,
"obj": obj_repr,
}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
popup_response_data = json.dumps(
{
"value": str(value),
"obj": str(obj),
}
)
return TemplateResponse(
request,
self.popup_response_template
or [
"admin/%s/%s/popup_response.html"
% (opts.app_label, opts.model_name),
"admin/%s/popup_response.html" % opts.app_label,
"admin/popup_response.html",
],
{
"popup_response_data": popup_response_data,
},
)
elif "_continue" in request.POST or (
# Redirecting after "Save as new".
"_saveasnew" in request.POST
and self.save_as_continue
and self.has_change_permission(request, obj)
):
msg = _("The {name} “{obj}” was added successfully.")
if self.has_change_permission(request, obj):
msg += " " + _("You may edit it again below.")
self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS)
if post_url_continue is None:
post_url_continue = obj_url
post_url_continue = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts},
post_url_continue,
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was added successfully. You may add another "
"{name} below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_("The {name} “{obj}” was added successfully."), **msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determine the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
opts = obj._meta
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else opts.pk.attname
value = request.resolver_match.kwargs["object_id"]
new_value = obj.serializable_value(attr)
popup_response_data = json.dumps(
{
"action": "change",
"value": str(value),
"obj": str(obj),
"new_value": str(new_value),
}
)
return TemplateResponse(
request,
self.popup_response_template
or [
"admin/%s/%s/popup_response.html"
% (opts.app_label, opts.model_name),
"admin/%s/popup_response.html" % opts.app_label,
"admin/popup_response.html",
],
{
"popup_response_data": popup_response_data,
},
)
opts = self.model._meta
preserved_filters = self.get_preserved_filters(request)
msg_dict = {
"name": opts.verbose_name,
"obj": format_html('<a href="{}">{}</a>', urlquote(request.path), obj),
}
if "_continue" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was changed successfully. You may edit it "
"again below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was added successfully. You may edit it again "
"below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse(
"admin:%s_%s_change" % (opts.app_label, opts.model_name),
args=(obj.pk,),
current_app=self.admin_site.name,
)
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = format_html(
_(
"The {name} “{obj}” was changed successfully. You may add another "
"{name} below."
),
**msg_dict,
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse(
"admin:%s_%s_add" % (opts.app_label, opts.model_name),
current_app=self.admin_site.name,
)
redirect_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, redirect_url
)
return HttpResponseRedirect(redirect_url)
else:
msg = format_html(
_("The {name} “{obj}” was changed successfully."), **msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def _response_post_save(self, request, obj):
opts = self.model._meta
if self.has_view_or_change_permission(request):
post_url = reverse(
"admin:%s_%s_changelist" % (opts.app_label, opts.model_name),
current_app=self.admin_site.name,
)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, post_url
)
else:
post_url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
return self._response_post_save(request, obj)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
return self._response_post_save(request, obj)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get("index", 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({"action": data.getlist("action")[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields["action"].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data["action"]
select_across = action_form.cleaned_data["select_across"]
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _(
"Items must be selected in order to perform "
"actions on them. No items have been changed."
)
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determine the HttpResponse for the delete_view stage.
"""
opts = self.model._meta
if IS_POPUP_VAR in request.POST:
popup_response_data = json.dumps(
{
"action": "delete",
"value": str(obj_id),
}
)
return TemplateResponse(
request,
self.popup_response_template
or [
"admin/%s/%s/popup_response.html"
% (opts.app_label, opts.model_name),
"admin/%s/popup_response.html" % opts.app_label,
"admin/popup_response.html",
],
{
"popup_response_data": popup_response_data,
},
)
self.message_user(
request,
_("The %(name)s “%(obj)s” was deleted successfully.")
% {
"name": opts.verbose_name,
"obj": obj_display,
},
messages.SUCCESS,
)
if self.has_change_permission(request, None):
post_url = reverse(
"admin:%s_%s_changelist" % (opts.app_label, opts.model_name),
current_app=self.admin_site.name,
)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{"preserved_filters": preserved_filters, "opts": opts}, post_url
)
else:
post_url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
media=self.media,
)
return TemplateResponse(
request,
self.delete_confirmation_template
or [
"admin/{}/{}/delete_confirmation.html".format(
app_label, opts.model_name
),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html",
],
context,
)
def get_inline_formsets(self, request, formsets, inline_instances, obj=None):
# Edit permissions on parent model are required for editable inlines.
can_edit_parent = (
self.has_change_permission(request, obj)
if obj
else self.has_add_permission(request)
)
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
if can_edit_parent:
has_add_permission = inline.has_add_permission(request, obj)
has_change_permission = inline.has_change_permission(request, obj)
has_delete_permission = inline.has_delete_permission(request, obj)
else:
# Disable all edit-permissions, and overide formset settings.
has_add_permission = (
has_change_permission
) = has_delete_permission = False
formset.extra = formset.max_num = 0
has_view_permission = inline.has_view_permission(request, obj)
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(
inline,
formset,
fieldsets,
prepopulated,
readonly,
model_admin=self,
has_add_permission=has_add_permission,
has_change_permission=has_change_permission,
has_delete_permission=has_delete_permission,
has_view_permission=has_view_permission,
)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data from the request's GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
def _get_obj_does_not_exist_redirect(self, request, opts, object_id):
"""
Create a message informing the user that the object doesn't exist
and return a redirect to the admin index page.
"""
msg = _("%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?") % {
"name": opts.verbose_name,
"key": unquote(object_id),
}
self.message_user(request, msg, messages.WARNING)
url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(url)
@csrf_protect_m
def changeform_view(self, request, object_id=None, form_url="", extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._changeform_view(request, object_id, form_url, extra_context)
def _changeform_view(self, request, object_id, form_url, extra_context):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField(
"The field %s cannot be referenced." % to_field
)
model = self.model
opts = model._meta
if request.method == "POST" and "_saveasnew" in request.POST:
object_id = None
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if request.method == "POST":
if not self.has_change_permission(request, obj):
raise PermissionDenied
else:
if not self.has_view_or_change_permission(request, obj):
raise PermissionDenied
if obj is None:
return self._get_obj_does_not_exist_redirect(request, opts, object_id)
fieldsets = self.get_fieldsets(request, obj)
ModelForm = self.get_form(
request, obj, change=not add, fields=flatten_fieldsets(fieldsets)
)
if request.method == "POST":
form = ModelForm(request.POST, request.FILES, instance=obj)
formsets, inline_instances = self._create_formsets(
request,
form.instance,
change=not add,
)
form_validated = form.is_valid()
if form_validated:
new_object = self.save_form(request, form, change=not add)
else:
new_object = form.instance
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(
request, form, formsets, add
)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(
request, form.instance, change=False
)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(
request, obj, change=True
)
if not add and not self.has_change_permission(request, obj):
readonly_fields = flatten_fieldsets(fieldsets)
else:
readonly_fields = self.get_readonly_fields(request, obj)
adminForm = helpers.AdminForm(
form,
list(fieldsets),
# Clear prepopulated fields on a view-only form to avoid a crash.
self.get_prepopulated_fields(request, obj)
if add or self.has_change_permission(request, obj)
else {},
readonly_fields,
model_admin=self,
)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(
request, formsets, inline_instances, obj
)
for inline_formset in inline_formsets:
media = media + inline_formset.media
if add:
title = _("Add %s")
elif self.has_change_permission(request, obj):
title = _("Change %s")
else:
title = _("View %s")
context = {
**self.admin_site.each_context(request),
"title": title % opts.verbose_name,
"subtitle": str(obj) if obj else None,
"adminform": adminForm,
"object_id": object_id,
"original": obj,
"is_popup": IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET,
"to_field": to_field,
"media": media,
"inline_admin_formsets": inline_formsets,
"errors": helpers.AdminErrorList(form, formsets),
"preserved_filters": self.get_preserved_filters(request),
}
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if (
request.method == "POST"
and not form_validated
and "_saveasnew" in request.POST
):
context["show_save"] = False
context["show_save_and_continue"] = False
# Use the change template instead of the add template.
add = False
context.update(extra_context or {})
return self.render_change_form(
request, context, add=add, change=not add, obj=obj, form_url=form_url
)
def add_view(self, request, form_url="", extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url="", extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
def _get_edited_object_pks(self, request, prefix):
"""Return POST data values of list_editable primary keys."""
pk_pattern = re.compile(
r"{}-\d+-{}$".format(re.escape(prefix), self.model._meta.pk.name)
)
return [value for key, value in request.POST.items() if pk_pattern.match(key)]
def _get_list_editable_queryset(self, request, prefix):
"""
Based on POST data, return a queryset of the objects that were edited
via list_editable.
"""
object_pks = self._get_edited_object_pks(request, prefix)
queryset = self.get_queryset(request)
validate = queryset.model._meta.pk.to_python
try:
for pk in object_pks:
validate(pk)
except ValidationError:
# Disable the optimization if the POST data was tampered with.
return queryset
return queryset.filter(pk__in=object_pks)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_view_or_change_permission(request):
raise PermissionDenied
try:
cl = self.get_changelist_instance(request)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET:
return SimpleTemplateResponse(
"admin/invalid_setup.html",
{
"title": _("Database error"),
},
)
return HttpResponseRedirect(request.path + "?" + ERROR_FLAG + "=1")
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
actions = self.get_actions(request)
# Actions with no confirmation
if (
actions
and request.method == "POST"
and "index" in request.POST
and "_save" not in request.POST
):
if selected:
response = self.response_action(
request, queryset=cl.get_queryset(request)
)
if response:
return response
else:
action_failed = True
else:
msg = _(
"Items must be selected in order to perform "
"actions on them. No items have been changed."
)
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (
actions
and request.method == "POST"
and helpers.ACTION_CHECKBOX_NAME in request.POST
and "index" not in request.POST
and "_save" not in request.POST
):
if selected:
response = self.response_action(
request, queryset=cl.get_queryset(request)
)
if response:
return response
else:
action_failed = True
if action_failed:
# Redirect back to the changelist page to avoid resubmitting the
# form if the user refreshes the browser or uses the "No, take
# me back" button on the action confirmation page.
return HttpResponseRedirect(request.get_full_path())
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if request.method == "POST" and cl.list_editable and "_save" in request.POST:
if not self.has_change_permission(request):
raise PermissionDenied
FormSet = self.get_changelist_formset(request)
modified_objects = self._get_list_editable_queryset(
request, FormSet.get_default_prefix()
)
formset = cl.formset = FormSet(
request.POST, request.FILES, queryset=modified_objects
)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
msg = ngettext(
"%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount,
) % {
"count": changecount,
"name": model_ngettext(opts, changecount),
}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable and self.has_change_permission(request):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields["action"].choices = self.get_action_choices(request)
media += action_form.media
else:
action_form = None
selection_note_all = ngettext(
"%(total_count)s selected", "All %(total_count)s selected", cl.result_count
)
context = {
**self.admin_site.each_context(request),
"module_name": str(opts.verbose_name_plural),
"selection_note": _("0 of %(cnt)s selected") % {"cnt": len(cl.result_list)},
"selection_note_all": selection_note_all % {"total_count": cl.result_count},
"title": cl.title,
"subtitle": None,
"is_popup": cl.is_popup,
"to_field": cl.to_field,
"cl": cl,
"media": media,
"has_add_permission": self.has_add_permission(request),
"opts": cl.opts,
"action_form": action_form,
"actions_on_top": self.actions_on_top,
"actions_on_bottom": self.actions_on_bottom,
"actions_selection_counter": self.actions_selection_counter,
"preserved_filters": self.get_preserved_filters(request),
**(extra_context or {}),
}
request.current_app = self.admin_site.name
return TemplateResponse(
request,
self.change_list_template
or [
"admin/%s/%s/change_list.html" % (app_label, opts.model_name),
"admin/%s/change_list.html" % app_label,
"admin/change_list.html",
],
context,
)
def get_deleted_objects(self, objs, request):
"""
Hook for customizing the delete process for the delete view and the
"delete selected" action.
"""
return get_deleted_objects(objs, request, self.admin_site)
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._delete_view(request, object_id, extra_context)
def _delete_view(self, request, object_id, extra_context):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField(
"The field %s cannot be referenced." % to_field
)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
return self._get_obj_does_not_exist_redirect(request, opts, object_id)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(
deleted_objects,
model_count,
perms_needed,
protected,
) = self.get_deleted_objects([obj], request)
if request.POST and not protected: # The user has confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = str(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = str(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = {
**self.admin_site.each_context(request),
"title": title,
"subtitle": None,
"object_name": object_name,
"object": obj,
"deleted_objects": deleted_objects,
"model_count": dict(model_count).items(),
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
"preserved_filters": self.get_preserved_filters(request),
"is_popup": IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET,
"to_field": to_field,
**(extra_context or {}),
}
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
from django.contrib.admin.views.main import PAGE_VAR
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
return self._get_obj_does_not_exist_redirect(
request, model._meta, object_id
)
if not self.has_view_or_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = (
LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model),
)
.select_related()
.order_by("action_time")
)
paginator = self.get_paginator(request, action_list, 100)
page_number = request.GET.get(PAGE_VAR, 1)
page_obj = paginator.get_page(page_number)
page_range = paginator.get_elided_page_range(page_obj.number)
context = {
**self.admin_site.each_context(request),
"title": _("Change history: %s") % obj,
"subtitle": None,
"action_list": page_obj,
"page_range": page_range,
"page_var": PAGE_VAR,
"pagination_required": paginator.count > 100,
"module_name": str(capfirst(opts.verbose_name_plural)),
"object": obj,
"opts": opts,
"preserved_filters": self.get_preserved_filters(request),
**(extra_context or {}),
}
request.current_app = self.admin_site.name
return TemplateResponse(
request,
self.object_history_template
or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html",
],
context,
)
def get_formset_kwargs(self, request, obj, inline, prefix):
formset_params = {
"instance": obj,
"prefix": prefix,
"queryset": inline.get_queryset(request),
}
if request.method == "POST":
formset_params.update(
{
"data": request.POST.copy(),
"files": request.FILES,
"save_as_new": "_saveasnew" in request.POST,
}
)
return formset_params
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = self.get_formset_kwargs(request, obj, inline, prefix)
formset = FormSet(**formset_params)
def user_deleted_form(request, obj, formset, index):
"""Return whether or not the user deleted the form."""
return (
inline.has_delete_permission(request, obj)
and "{}-{}-DELETE".format(formset.prefix, index) in request.POST
)
# Bypass validation of each view-only inline form (since the form's
# data won't be in request.POST), unless the form was deleted.
if not inline.has_change_permission(request, obj if change else None):
for index, form in enumerate(formset.initial_forms):
if user_deleted_form(request, obj, formset, index):
continue
form._errors = {}
form.cleaned_data = form.initial
formsets.append(formset)
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
classes = None
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super().__init__()
if self.verbose_name_plural is None:
if self.verbose_name is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
else:
self.verbose_name_plural = format_lazy("{}s", self.verbose_name)
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
@property
def media(self):
extra = "" if settings.DEBUG else ".min"
js = ["vendor/jquery/jquery%s.js" % extra, "jquery.init.js", "inlines.js"]
if self.filter_vertical or self.filter_horizontal:
js.extend(["SelectBox.js", "SelectFilter2.js"])
if self.classes and "collapse" in self.classes:
js.append("collapse.js")
return forms.Media(js=["admin/js/%s" % url for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Return a BaseInlineFormSet class for use in admin add/change views."""
if "fields" in kwargs:
fields = kwargs.pop("fields")
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
excluded = self.get_exclude(request, obj)
exclude = [] if excluded is None else list(excluded)
exclude.extend(self.get_readonly_fields(request, obj))
if excluded is None and hasattr(self.form, "_meta") and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
**kwargs,
}
base_model_form = defaults["form"]
can_change = self.has_change_permission(request, obj) if request else True
can_add = self.has_add_permission(request, obj) if request else True
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance._state.adding:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance
# representation, suitable to be an item in a
# list.
_("%(class_name)s %(instance)s")
% {"class_name": p._meta.verbose_name, "instance": p}
)
params = {
"class_name": self._meta.model._meta.verbose_name,
"instance": self.instance,
"related_objects": get_text_list(objs, _("and")),
}
msg = _(
"Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s"
)
raise ValidationError(
msg, code="deleting_protected", params=params
)
def is_valid(self):
result = super().is_valid()
self.hand_clean_DELETE()
return result
def has_changed(self):
# Protect against unauthorized edits.
if not can_change and not self.instance._state.adding:
return False
if not can_add and self.instance._state.adding:
return False
return super().has_changed()
defaults["form"] = DeleteProtectedModelForm
if defaults["fields"] is None and not modelform_defines_fields(
defaults["form"]
):
defaults["fields"] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def _get_form_for_get_fields(self, request, obj=None):
return self.get_formset(request, obj, fields=None).form
def get_queryset(self, request):
queryset = super().get_queryset(request)
if not self.has_view_or_change_permission(request):
queryset = queryset.none()
return queryset
def _has_any_perms_for_target_model(self, request, perms):
"""
This method is called only when the ModelAdmin's model is for an
ManyToManyField's implicit through model (if self.opts.auto_created).
Return True if the user has any of the given permissions ('add',
'change', etc.) for the model that points to the through model.
"""
opts = self.opts
# Find the target model of an auto-created many-to-many relationship.
for field in opts.fields:
if field.remote_field and field.remote_field.model != self.parent_model:
opts = field.remote_field.model._meta
break
return any(
request.user.has_perm(
"%s.%s" % (opts.app_label, get_permission_codename(perm, opts))
)
for perm in perms
)
def has_add_permission(self, request, obj):
if self.opts.auto_created:
# Auto-created intermediate models don't have their own
# permissions. The user needs to have the change permission for the
# related model in order to be able to do anything with the
# intermediate model.
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_add_permission(request)
def has_change_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission().
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_change_permission(request)
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission().
return self._has_any_perms_for_target_model(request, ["change"])
return super().has_delete_permission(request, obj)
def has_view_permission(self, request, obj=None):
if self.opts.auto_created:
# Same comment as has_add_permission(). The 'change' permission
# also implies the 'view' permission.
return self._has_any_perms_for_target_model(request, ["view", "change"])
return super().has_view_permission(request)
class StackedInline(InlineModelAdmin):
template = "admin/edit_inline/stacked.html"
class TabularInline(InlineModelAdmin):
template = "admin/edit_inline/tabular.html"
|
2c48f15e77963188d9307b27e94e7b1776b5a0344339f61107d691671289ddee | import json
import os
import posixpath
import re
from urllib.parse import unquote, urldefrag, urlsplit, urlunsplit
from django.conf import settings
from django.contrib.staticfiles.utils import check_settings, matches_patterns
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.crypto import md5
from django.utils.functional import LazyObject
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super().__init__(location, base_url, *args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path."
)
return super().path(name)
class HashedFilesMixin:
default_template = """url("%(url)s")"""
max_post_process_passes = 5
patterns = (
(
"*.css",
(
r"""(?P<matched>url\(['"]{0,1}\s*(?P<url>.*?)["']{0,1}\))""",
(
r"""(?P<matched>@import\s*["']\s*(?P<url>.*?)["'])""",
"""@import url("%(url)s")""",
),
(
(
r"(?m)(?P<matched>)^(/\*#[ \t]"
r"(?-i:sourceMappingURL)=(?P<url>.*)[ \t]*\*/)$"
),
"/*# sourceMappingURL=%(url)s */",
),
),
),
(
"*.js",
(
(
r"(?m)(?P<matched>)^(//# (?-i:sourceMappingURL)=(?P<url>.*))$",
"//# sourceMappingURL=%(url)s",
),
),
),
)
keep_intermediate_files = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._patterns = {}
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Return a hash of the file with the given name and optional content.
"""
if content is None:
return None
hasher = md5(usedforsecurity=False)
for chunk in content.chunks():
hasher.update(chunk)
return hasher.hexdigest()[:12]
def hashed_name(self, name, content=None, filename=None):
# `filename` is the name of file to hash if `content` isn't given.
# `name` is the base name to construct the new hashed filename from.
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
filename = (filename and urlsplit(unquote(filename)).path.strip()) or clean_name
opened = content is None
if opened:
if not self.exists(filename):
raise ValueError(
"The file '%s' could not be found with %r." % (filename, self)
)
try:
content = self.open(filename)
except OSError:
# Handle directory paths and fragments
return name
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
file_hash = (".%s" % file_hash) if file_hash else ""
hashed_name = os.path.join(path, "%s%s%s" % (root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if "?#" in name and not unparsed_name[3]:
unparsed_name[2] += "?"
return urlunsplit(unparsed_name)
def _url(self, hashed_name_func, name, force=False, hashed_files=None):
"""
Return the non-hashed URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ""
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith("/"): # don't hash paths
hashed_name = name
else:
args = (clean_name,)
if hashed_files is not None:
args += (hashed_files,)
hashed_name = hashed_name_func(*args)
final_url = super().url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = "?#" in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += "?"
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url(self, name, force=False):
"""
Return the non-hashed URL in DEBUG mode.
"""
return self._url(self.stored_name, name, force)
def url_converter(self, name, hashed_files, template=None):
"""
Return the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Convert the matched URL to a normalized and hashed URL.
This requires figuring out which files the matched URL resolves
to and calling the url() method of the storage.
"""
matches = matchobj.groupdict()
matched = matches["matched"]
url = matches["url"]
# Ignore absolute/protocol-relative and data-uri URLs.
if re.match(r"^[a-z]+:", url):
return matched
# Ignore absolute URLs that don't point to a static file (dynamic
# CSS / JS?). Note that STATIC_URL cannot be empty.
if url.startswith("/") and not url.startswith(settings.STATIC_URL):
return matched
# Strip off the fragment so a path-like fragment won't interfere.
url_path, fragment = urldefrag(url)
if url_path.startswith("/"):
# Otherwise the condition above would have returned prematurely.
assert url_path.startswith(settings.STATIC_URL)
target_name = url_path[len(settings.STATIC_URL) :]
else:
# We're using the posixpath module to mix paths and URLs conveniently.
source_name = name if os.sep == "/" else name.replace(os.sep, "/")
target_name = posixpath.join(posixpath.dirname(source_name), url_path)
# Determine the hashed name of the target file with the storage backend.
hashed_url = self._url(
self._stored_name,
unquote(target_name),
force=True,
hashed_files=hashed_files,
)
transformed_url = "/".join(
url_path.split("/")[:-1] + hashed_url.split("/")[-1:]
)
# Restore the fragment that was stripped off earlier.
if fragment:
transformed_url += ("?#" if "?#" in url else "#") + fragment
# Return the hashed version to the file
matches["url"] = unquote(transformed_url)
return template % matches
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given dictionary of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = {}
# build a list of adjustable files
adjustable_paths = [
path for path in paths if matches_patterns(path, self._patterns)
]
# Adjustable files to yield at end, keyed by the original path.
processed_adjustable_paths = {}
# Do a single pass first. Post-process all files once, yielding not
# adjustable files and exceptions, and collecting adjustable files.
for name, hashed_name, processed, _ in self._post_process(
paths, adjustable_paths, hashed_files
):
if name not in adjustable_paths or isinstance(processed, Exception):
yield name, hashed_name, processed
else:
processed_adjustable_paths[name] = (name, hashed_name, processed)
paths = {path: paths[path] for path in adjustable_paths}
substitutions = False
for i in range(self.max_post_process_passes):
substitutions = False
for name, hashed_name, processed, subst in self._post_process(
paths, adjustable_paths, hashed_files
):
# Overwrite since hashed_name may be newer.
processed_adjustable_paths[name] = (name, hashed_name, processed)
substitutions = substitutions or subst
if not substitutions:
break
if substitutions:
yield "All", None, RuntimeError("Max post-process passes exceeded.")
# Store the processed paths
self.hashed_files.update(hashed_files)
# Yield adjustable files with final, hashed name.
yield from processed_adjustable_paths.values()
def _post_process(self, paths, adjustable_paths, hashed_files):
# Sort the files by directory level
def path_level(name):
return len(name.split(os.sep))
for name in sorted(paths, key=path_level, reverse=True):
substitutions = True
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
# generate the hash with the original content, even for
# adjustable files.
if hash_key not in hashed_files:
hashed_name = self.hashed_name(name, original_file)
else:
hashed_name = hashed_files[hash_key]
# then get the original's file content..
if hasattr(original_file, "seek"):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
old_hashed_name = hashed_name
content = original_file.read().decode("utf-8")
for extension, patterns in self._patterns.items():
if matches_patterns(path, (extension,)):
for pattern, template in patterns:
converter = self.url_converter(
name, hashed_files, template
)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc, False
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(content.encode())
if self.keep_intermediate_files:
# Save intermediate file for reference
self._save(hashed_name, content_file)
hashed_name = self.hashed_name(name, content_file)
if self.exists(hashed_name):
self.delete(hashed_name)
saved_name = self._save(hashed_name, content_file)
hashed_name = self.clean_name(saved_name)
# If the file hash stayed the same, this file didn't change
if old_hashed_name == hashed_name:
substitutions = False
processed = True
if not processed:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = self.clean_name(saved_name)
# and then set the cache accordingly
hashed_files[hash_key] = hashed_name
yield name, hashed_name, processed, substitutions
def clean_name(self, name):
return name.replace("\\", "/")
def hash_key(self, name):
return name
def _stored_name(self, name, hashed_files):
# Normalize the path to avoid multiple names for the same file like
# ../foo/bar.css and ../foo/../foo/bar.css which normalize to the same
# path.
name = posixpath.normpath(name)
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
return cache_name
def stored_name(self, name):
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name:
return cache_name
# No cached name found, recalculate it from the files.
intermediate_name = name
for i in range(self.max_post_process_passes + 1):
cache_name = self.clean_name(
self.hashed_name(name, content=None, filename=intermediate_name)
)
if intermediate_name == cache_name:
# Store the hashed name if there was a miss.
self.hashed_files[hash_key] = cache_name
return cache_name
else:
# Move on to the next intermediate file.
intermediate_name = cache_name
# If the cache name can't be determined after the max number of passes,
# the intermediate files on disk may be corrupt; avoid an infinite loop.
raise ValueError("The name '%s' could not be hashed with %r." % (name, self))
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = "1.0" # the manifest format standard
manifest_name = "staticfiles.json"
manifest_strict = True
keep_intermediate_files = False
def __init__(self, *args, manifest_storage=None, **kwargs):
super().__init__(*args, **kwargs)
if manifest_storage is None:
manifest_storage = self
self.manifest_storage = manifest_storage
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.manifest_storage.open(self.manifest_name) as manifest:
return manifest.read().decode()
except FileNotFoundError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return {}
try:
stored = json.loads(content)
except json.JSONDecodeError:
pass
else:
version = stored.get("version")
if version == "1.0":
return stored.get("paths", {})
raise ValueError(
"Couldn't load manifest '%s' (version %s)"
% (self.manifest_name, self.manifest_version)
)
def post_process(self, *args, **kwargs):
self.hashed_files = {}
yield from super().post_process(*args, **kwargs)
if not kwargs.get("dry_run"):
self.save_manifest()
def save_manifest(self):
payload = {"paths": self.hashed_files, "version": self.manifest_version}
if self.manifest_storage.exists(self.manifest_name):
self.manifest_storage.delete(self.manifest_name)
contents = json.dumps(payload).encode()
self.manifest_storage._save(self.manifest_name, ContentFile(contents))
def stored_name(self, name):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
hash_key = self.hash_key(clean_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
if self.manifest_strict:
raise ValueError(
"Missing staticfiles manifest entry for '%s'" % clean_name
)
cache_name = self.clean_name(self.hashed_name(name))
unparsed_name = list(parsed_name)
unparsed_name[2] = cache_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if "?#" in name and not unparsed_name[3]:
unparsed_name[2] += "?"
return urlunsplit(unparsed_name)
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
|
dedeab6a29e53f72874dbd0e2b53e4af44e50743e167c202ecef079d1ce28ccc | import json
from django import template
from django.template.context import Context
from .base import InclusionAdminNode
register = template.Library()
def prepopulated_fields_js(context):
"""
Create a list of prepopulated_fields that should render JavaScript for
the prepopulated fields for both the admin form and inlines.
"""
prepopulated_fields = []
if "adminform" in context:
prepopulated_fields.extend(context["adminform"].prepopulated_fields)
if "inline_admin_formsets" in context:
for inline_admin_formset in context["inline_admin_formsets"]:
for inline_admin_form in inline_admin_formset:
if inline_admin_form.original is None:
prepopulated_fields.extend(inline_admin_form.prepopulated_fields)
prepopulated_fields_json = []
for field in prepopulated_fields:
prepopulated_fields_json.append(
{
"id": "#%s" % field["field"].auto_id,
"name": field["field"].name,
"dependency_ids": [
"#%s" % dependency.auto_id for dependency in field["dependencies"]
],
"dependency_list": [
dependency.name for dependency in field["dependencies"]
],
"maxLength": field["field"].field.max_length or 50,
"allowUnicode": getattr(field["field"].field, "allow_unicode", False),
}
)
context.update(
{
"prepopulated_fields": prepopulated_fields,
"prepopulated_fields_json": json.dumps(prepopulated_fields_json),
}
)
return context
@register.tag(name="prepopulated_fields_js")
def prepopulated_fields_js_tag(parser, token):
return InclusionAdminNode(
parser,
token,
func=prepopulated_fields_js,
template_name="prepopulated_fields_js.html",
)
def submit_row(context):
"""
Display the row of buttons for delete and save.
"""
add = context["add"]
change = context["change"]
is_popup = context["is_popup"]
save_as = context["save_as"]
show_save = context.get("show_save", True)
show_save_and_add_another = context.get("show_save_and_add_another", True)
show_save_and_continue = context.get("show_save_and_continue", True)
has_add_permission = context["has_add_permission"]
has_change_permission = context["has_change_permission"]
has_view_permission = context["has_view_permission"]
has_editable_inline_admin_formsets = context["has_editable_inline_admin_formsets"]
can_save = (
(has_change_permission and change)
or (has_add_permission and add)
or has_editable_inline_admin_formsets
)
can_save_and_add_another = (
has_add_permission
and not is_popup
and (not save_as or add)
and can_save
and show_save_and_add_another
)
can_save_and_continue = (
not is_popup and can_save and has_view_permission and show_save_and_continue
)
can_change = has_change_permission or has_editable_inline_admin_formsets
ctx = Context(context)
ctx.update(
{
"can_change": can_change,
"show_delete_link": (
not is_popup
and context["has_delete_permission"]
and change
and context.get("show_delete", True)
),
"show_save_as_new": not is_popup
and has_change_permission
and change
and save_as,
"show_save_and_add_another": can_save_and_add_another,
"show_save_and_continue": can_save_and_continue,
"show_save": show_save and can_save,
"show_close": not (show_save and can_save),
}
)
return ctx
@register.tag(name="submit_row")
def submit_row_tag(parser, token):
return InclusionAdminNode(
parser, token, func=submit_row, template_name="submit_line.html"
)
@register.tag(name="change_form_object_tools")
def change_form_object_tools_tag(parser, token):
"""Display the row of change form object tools."""
return InclusionAdminNode(
parser,
token,
func=lambda context: context,
template_name="change_form_object_tools.html",
)
@register.filter
def cell_count(inline_admin_form):
"""Return the number of cells used in a tabular inline."""
count = 1 # Hidden cell with hidden 'id' field
for fieldset in inline_admin_form:
# Count all visible fields.
for line in fieldset:
for field in line:
try:
is_hidden = field.field.is_hidden
except AttributeError:
is_hidden = field.field["is_hidden"]
if not is_hidden:
count += 1
if inline_admin_form.formset.can_delete:
# Delete checkbox
count += 1
return count
|
23e9d6c99730a994c29a133275633ceadd30b5dc684b6b0f8207386eb861d923 | from django.db import migrations, models
def add_legacy_name(apps, schema_editor):
alias = schema_editor.connection.alias
ContentType = apps.get_model("contenttypes", "ContentType")
for ct in ContentType.objects.using(alias):
try:
ct.name = apps.get_model(ct.app_label, ct.model)._meta.object_name
except LookupError:
ct.name = ct.model
ct.save()
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(
name="contenttype",
options={
"verbose_name": "content type",
"verbose_name_plural": "content types",
},
),
migrations.AlterField(
model_name="contenttype",
name="name",
field=models.CharField(max_length=100, null=True),
),
migrations.RunPython(
migrations.RunPython.noop,
add_legacy_name,
hints={"model_name": "contenttype"},
),
migrations.RemoveField(
model_name="contenttype",
name="name",
),
]
|
438c38aab0cfa1aab94d0705a8918bea7f696783368c2a15a276fd53683f3a60 | """
Tests for django.core.servers.
"""
import errno
import os
import socket
import threading
from http.client import HTTPConnection
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import urlopen
from django.conf import settings
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIServer
from django.db import DEFAULT_DB_ALIAS, connections
from django.test import LiveServerTestCase, override_settings
from django.test.testcases import LiveServerThread, QuietWSGIRequestHandler
from .models import Person
TEST_ROOT = os.path.dirname(__file__)
TEST_SETTINGS = {
"MEDIA_URL": "media/",
"MEDIA_ROOT": os.path.join(TEST_ROOT, "media"),
"STATIC_URL": "static/",
"STATIC_ROOT": os.path.join(TEST_ROOT, "static"),
}
@override_settings(ROOT_URLCONF="servers.urls", **TEST_SETTINGS)
class LiveServerBase(LiveServerTestCase):
available_apps = [
"servers",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
]
fixtures = ["testdata.json"]
def urlopen(self, url):
return urlopen(self.live_server_url + url)
class CloseConnectionTestServer(ThreadedWSGIServer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This event is set right after the first time a request closes its
# database connections.
self._connections_closed = threading.Event()
def _close_connections(self):
super()._close_connections()
self._connections_closed.set()
class CloseConnectionTestLiveServerThread(LiveServerThread):
server_class = CloseConnectionTestServer
def _create_server(self, connections_override=None):
return super()._create_server(connections_override=self.connections_override)
class LiveServerTestCloseConnectionTest(LiveServerBase):
server_thread_class = CloseConnectionTestLiveServerThread
@classmethod
def _make_connections_override(cls):
conn = connections[DEFAULT_DB_ALIAS]
cls.conn = conn
cls.old_conn_max_age = conn.settings_dict["CONN_MAX_AGE"]
# Set the connection's CONN_MAX_AGE to None to simulate the
# CONN_MAX_AGE setting being set to None on the server. This prevents
# Django from closing the connection and allows testing that
# ThreadedWSGIServer closes connections.
conn.settings_dict["CONN_MAX_AGE"] = None
# Pass a database connection through to the server to check it is being
# closed by ThreadedWSGIServer.
return {DEFAULT_DB_ALIAS: conn}
@classmethod
def tearDownConnectionTest(cls):
cls.conn.settings_dict["CONN_MAX_AGE"] = cls.old_conn_max_age
@classmethod
def tearDownClass(cls):
cls.tearDownConnectionTest()
super().tearDownClass()
def test_closes_connections(self):
# The server's request thread sets this event after closing
# its database connections.
closed_event = self.server_thread.httpd._connections_closed
conn = self.conn
# Open a connection to the database.
conn.connect()
self.assertIsNotNone(conn.connection)
with self.urlopen("/model_view/") as f:
# The server can access the database.
self.assertEqual(f.read().splitlines(), [b"jane", b"robert"])
# Wait for the server's request thread to close the connection.
# A timeout of 0.1 seconds should be more than enough. If the wait
# times out, the assertion after should fail.
closed_event.wait(timeout=0.1)
self.assertIsNone(conn.connection)
class FailingLiveServerThread(LiveServerThread):
def _create_server(self):
raise RuntimeError("Error creating server.")
class LiveServerTestCaseSetupTest(LiveServerBase):
server_thread_class = FailingLiveServerThread
@classmethod
def check_allowed_hosts(cls, expected):
if settings.ALLOWED_HOSTS != expected:
raise RuntimeError(f"{settings.ALLOWED_HOSTS} != {expected}")
@classmethod
def setUpClass(cls):
cls.check_allowed_hosts(["testserver"])
try:
super().setUpClass()
except RuntimeError:
# LiveServerTestCase's change to ALLOWED_HOSTS should be reverted.
cls.doClassCleanups()
cls.check_allowed_hosts(["testserver"])
else:
raise RuntimeError("Server did not fail.")
cls.set_up_called = True
def test_set_up_class(self):
self.assertIs(self.set_up_called, True)
class LiveServerAddress(LiveServerBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# put it in a list to prevent descriptor lookups in test
cls.live_server_url_test = [cls.live_server_url]
def test_live_server_url_is_class_property(self):
self.assertIsInstance(self.live_server_url_test[0], str)
self.assertEqual(self.live_server_url_test[0], self.live_server_url)
class LiveServerSingleThread(LiveServerThread):
def _create_server(self):
return WSGIServer(
(self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False
)
class SingleThreadLiveServerTestCase(LiveServerTestCase):
server_thread_class = LiveServerSingleThread
class LiveServerViews(LiveServerBase):
def test_protocol(self):
"""Launched server serves with HTTP 1.1."""
with self.urlopen("/example_view/") as f:
self.assertEqual(f.version, 11)
def test_closes_connection_without_content_length(self):
"""
An HTTP 1.1 server is supposed to support keep-alive. Since our
development server is rather simple we support it only in cases where
we can detect a content length from the response. This should be doable
for all simple views and streaming responses where an iterable with
length of one is passed. The latter follows as result of `set_content_length`
from https://github.com/python/cpython/blob/main/Lib/wsgiref/handlers.py.
If we cannot detect a content length we explicitly set the `Connection`
header to `close` to notify the client that we do not actually support
it.
"""
conn = HTTPConnection(
LiveServerViews.server_thread.host,
LiveServerViews.server_thread.port,
timeout=1,
)
try:
conn.request(
"GET", "/streaming_example_view/", headers={"Connection": "keep-alive"}
)
response = conn.getresponse()
self.assertTrue(response.will_close)
self.assertEqual(response.read(), b"Iamastream")
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Connection"), "close")
conn.request(
"GET", "/streaming_example_view/", headers={"Connection": "close"}
)
response = conn.getresponse()
self.assertTrue(response.will_close)
self.assertEqual(response.read(), b"Iamastream")
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Connection"), "close")
finally:
conn.close()
def test_keep_alive_on_connection_with_content_length(self):
"""
See `test_closes_connection_without_content_length` for details. This
is a follow up test, which ensure that we do not close the connection
if not needed, hence allowing us to take advantage of keep-alive.
"""
conn = HTTPConnection(
LiveServerViews.server_thread.host, LiveServerViews.server_thread.port
)
try:
conn.request("GET", "/example_view/", headers={"Connection": "keep-alive"})
response = conn.getresponse()
self.assertFalse(response.will_close)
self.assertEqual(response.read(), b"example view")
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader("Connection"))
conn.request("GET", "/example_view/", headers={"Connection": "close"})
response = conn.getresponse()
self.assertFalse(response.will_close)
self.assertEqual(response.read(), b"example view")
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader("Connection"))
finally:
conn.close()
def test_keep_alive_connection_clears_previous_request_data(self):
conn = HTTPConnection(
LiveServerViews.server_thread.host, LiveServerViews.server_thread.port
)
try:
conn.request(
"POST", "/method_view/", b"{}", headers={"Connection": "keep-alive"}
)
response = conn.getresponse()
self.assertFalse(response.will_close)
self.assertEqual(response.status, 200)
self.assertEqual(response.read(), b"POST")
conn.request(
"POST", "/method_view/", b"{}", headers={"Connection": "close"}
)
response = conn.getresponse()
self.assertFalse(response.will_close)
self.assertEqual(response.status, 200)
self.assertEqual(response.read(), b"POST")
finally:
conn.close()
def test_404(self):
with self.assertRaises(HTTPError) as err:
self.urlopen("/")
err.exception.close()
self.assertEqual(err.exception.code, 404, "Expected 404 response")
def test_view(self):
with self.urlopen("/example_view/") as f:
self.assertEqual(f.read(), b"example view")
def test_static_files(self):
with self.urlopen("/static/example_static_file.txt") as f:
self.assertEqual(f.read().rstrip(b"\r\n"), b"example static file")
def test_no_collectstatic_emulation(self):
"""
LiveServerTestCase reports a 404 status code when HTTP client
tries to access a static file that isn't explicitly put under
STATIC_ROOT.
"""
with self.assertRaises(HTTPError) as err:
self.urlopen("/static/another_app/another_app_static_file.txt")
err.exception.close()
self.assertEqual(err.exception.code, 404, "Expected 404 response")
def test_media_files(self):
with self.urlopen("/media/example_media_file.txt") as f:
self.assertEqual(f.read().rstrip(b"\r\n"), b"example media file")
def test_environ(self):
with self.urlopen("/environ_view/?%s" % urlencode({"q": "тест"})) as f:
self.assertIn(b"QUERY_STRING: 'q=%D1%82%D0%B5%D1%81%D1%82'", f.read())
@override_settings(ROOT_URLCONF="servers.urls")
class SingleThreadLiveServerViews(SingleThreadLiveServerTestCase):
available_apps = ["servers"]
def test_closes_connection_with_content_length(self):
"""
Contrast to
LiveServerViews.test_keep_alive_on_connection_with_content_length().
Persistent connections require threading server.
"""
conn = HTTPConnection(
SingleThreadLiveServerViews.server_thread.host,
SingleThreadLiveServerViews.server_thread.port,
timeout=1,
)
try:
conn.request("GET", "/example_view/", headers={"Connection": "keep-alive"})
response = conn.getresponse()
self.assertTrue(response.will_close)
self.assertEqual(response.read(), b"example view")
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Connection"), "close")
finally:
conn.close()
class LiveServerDatabase(LiveServerBase):
def test_fixtures_loaded(self):
"""
Fixtures are properly loaded and visible to the live server thread.
"""
with self.urlopen("/model_view/") as f:
self.assertEqual(f.read().splitlines(), [b"jane", b"robert"])
def test_database_writes(self):
"""
Data written to the database by a view can be read.
"""
with self.urlopen("/create_model_instance/"):
pass
self.assertQuerysetEqual(
Person.objects.order_by("pk"),
["jane", "robert", "emily"],
lambda b: b.name,
)
class LiveServerPort(LiveServerBase):
def test_port_bind(self):
"""
Each LiveServerTestCase binds to a unique port or fails to start a
server thread when run concurrently (#26011).
"""
TestCase = type("TestCase", (LiveServerBase,), {})
try:
TestCase._start_server_thread()
except OSError as e:
if e.errno == errno.EADDRINUSE:
# We're out of ports, LiveServerTestCase correctly fails with
# an OSError.
return
# Unexpected error.
raise
self.assertNotEqual(
self.live_server_url,
TestCase.live_server_url,
f"Acquired duplicate server addresses for server threads: "
f"{self.live_server_url}",
)
def test_specified_port_bind(self):
"""LiveServerTestCase.port customizes the server's port."""
TestCase = type("TestCase", (LiveServerBase,), {})
# Find an open port and tell TestCase to use it.
s = socket.socket()
s.bind(("", 0))
TestCase.port = s.getsockname()[1]
s.close()
TestCase._start_server_thread()
self.assertEqual(
TestCase.port,
TestCase.server_thread.port,
f"Did not use specified port for LiveServerTestCase thread: "
f"{TestCase.port}",
)
class LiveServerThreadedTests(LiveServerBase):
"""If LiveServerTestCase isn't threaded, these tests will hang."""
def test_view_calls_subview(self):
url = "/subview_calling_view/?%s" % urlencode({"url": self.live_server_url})
with self.urlopen(url) as f:
self.assertEqual(f.read(), b"subview calling view: subview")
def test_check_model_instance_from_subview(self):
url = "/check_model_instance_from_subview/?%s" % urlencode(
{
"url": self.live_server_url,
}
)
with self.urlopen(url) as f:
self.assertIn(b"emily", f.read())
|
6c69ed479b5ce257d65380675b42259fa8141daca761ead6b2f6803d7c27d2a0 | from datetime import datetime
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.backends import RemoteUserBackend
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.contrib.auth.models import User
from django.middleware.csrf import _get_new_csrf_string, _mask_cipher_secret
from django.test import (
Client,
TestCase,
ignore_warnings,
modify_settings,
override_settings,
)
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
@override_settings(ROOT_URLCONF="auth_tests.urls")
class RemoteUserTest(TestCase):
middleware = "django.contrib.auth.middleware.RemoteUserMiddleware"
backend = "django.contrib.auth.backends.RemoteUserBackend"
header = "REMOTE_USER"
email_header = "REMOTE_EMAIL"
# Usernames to be passed in REMOTE_USER for the test_known_user test case.
known_user = "knownuser"
known_user2 = "knownuser2"
def setUp(self):
self.patched_settings = modify_settings(
AUTHENTICATION_BACKENDS={"append": self.backend},
MIDDLEWARE={"append": self.middleware},
)
self.patched_settings.enable()
def tearDown(self):
self.patched_settings.disable()
def test_no_remote_user(self):
"""Users are not created when remote user is not specified."""
num_users = User.objects.count()
response = self.client.get("/remote_user/")
self.assertTrue(response.context["user"].is_anonymous)
self.assertEqual(User.objects.count(), num_users)
response = self.client.get("/remote_user/", **{self.header: None})
self.assertTrue(response.context["user"].is_anonymous)
self.assertEqual(User.objects.count(), num_users)
response = self.client.get("/remote_user/", **{self.header: ""})
self.assertTrue(response.context["user"].is_anonymous)
self.assertEqual(User.objects.count(), num_users)
def test_csrf_validation_passes_after_process_request_login(self):
"""
CSRF check must access the CSRF token from the session or cookie,
rather than the request, as rotate_token() may have been called by an
authentication middleware during the process_request() phase.
"""
csrf_client = Client(enforce_csrf_checks=True)
csrf_secret = _get_new_csrf_string()
csrf_token = _mask_cipher_secret(csrf_secret)
csrf_token_form = _mask_cipher_secret(csrf_secret)
headers = {self.header: "fakeuser"}
data = {"csrfmiddlewaretoken": csrf_token_form}
# Verify that CSRF is configured for the view
csrf_client.cookies.load({settings.CSRF_COOKIE_NAME: csrf_token})
response = csrf_client.post("/remote_user/", **headers)
self.assertEqual(response.status_code, 403)
self.assertIn(b"CSRF verification failed.", response.content)
# This request will call django.contrib.auth.login() which will call
# django.middleware.csrf.rotate_token() thus changing the value of
# request.META['CSRF_COOKIE'] from the user submitted value set by
# CsrfViewMiddleware.process_request() to the new csrftoken value set
# by rotate_token(). Csrf validation should still pass when the view is
# later processed by CsrfViewMiddleware.process_view()
csrf_client.cookies.load({settings.CSRF_COOKIE_NAME: csrf_token})
response = csrf_client.post("/remote_user/", data, **headers)
self.assertEqual(response.status_code, 200)
def test_unknown_user(self):
"""
Tests the case where the username passed in the header does not exist
as a User.
"""
num_users = User.objects.count()
response = self.client.get("/remote_user/", **{self.header: "newuser"})
self.assertEqual(response.context["user"].username, "newuser")
self.assertEqual(User.objects.count(), num_users + 1)
User.objects.get(username="newuser")
# Another request with same user should not create any new users.
response = self.client.get("/remote_user/", **{self.header: "newuser"})
self.assertEqual(User.objects.count(), num_users + 1)
def test_known_user(self):
"""
Tests the case where the username passed in the header is a valid User.
"""
User.objects.create(username="knownuser")
User.objects.create(username="knownuser2")
num_users = User.objects.count()
response = self.client.get("/remote_user/", **{self.header: self.known_user})
self.assertEqual(response.context["user"].username, "knownuser")
self.assertEqual(User.objects.count(), num_users)
# A different user passed in the headers causes the new user
# to be logged in.
response = self.client.get("/remote_user/", **{self.header: self.known_user2})
self.assertEqual(response.context["user"].username, "knownuser2")
self.assertEqual(User.objects.count(), num_users)
def test_last_login(self):
"""
A user's last_login is set the first time they make a
request but not updated in subsequent requests with the same session.
"""
user = User.objects.create(username="knownuser")
# Set last_login to something so we can determine if it changes.
default_login = datetime(2000, 1, 1)
if settings.USE_TZ:
default_login = default_login.replace(tzinfo=timezone.utc)
user.last_login = default_login
user.save()
response = self.client.get("/remote_user/", **{self.header: self.known_user})
self.assertNotEqual(default_login, response.context["user"].last_login)
user = User.objects.get(username="knownuser")
user.last_login = default_login
user.save()
response = self.client.get("/remote_user/", **{self.header: self.known_user})
self.assertEqual(default_login, response.context["user"].last_login)
def test_header_disappears(self):
"""
A logged in user is logged out automatically when
the REMOTE_USER header disappears during the same browser session.
"""
User.objects.create(username="knownuser")
# Known user authenticates
response = self.client.get("/remote_user/", **{self.header: self.known_user})
self.assertEqual(response.context["user"].username, "knownuser")
# During the session, the REMOTE_USER header disappears. Should trigger logout.
response = self.client.get("/remote_user/")
self.assertTrue(response.context["user"].is_anonymous)
# verify the remoteuser middleware will not remove a user
# authenticated via another backend
User.objects.create_user(username="modeluser", password="foo")
self.client.login(username="modeluser", password="foo")
authenticate(username="modeluser", password="foo")
response = self.client.get("/remote_user/")
self.assertEqual(response.context["user"].username, "modeluser")
def test_user_switch_forces_new_login(self):
"""
If the username in the header changes between requests
that the original user is logged out
"""
User.objects.create(username="knownuser")
# Known user authenticates
response = self.client.get("/remote_user/", **{self.header: self.known_user})
self.assertEqual(response.context["user"].username, "knownuser")
# During the session, the REMOTE_USER changes to a different user.
response = self.client.get("/remote_user/", **{self.header: "newnewuser"})
# The current user is not the prior remote_user.
# In backends that create a new user, username is "newnewuser"
# In backends that do not create new users, it is '' (anonymous user)
self.assertNotEqual(response.context["user"].username, "knownuser")
def test_inactive_user(self):
User.objects.create(username="knownuser", is_active=False)
response = self.client.get("/remote_user/", **{self.header: "knownuser"})
self.assertTrue(response.context["user"].is_anonymous)
class RemoteUserNoCreateBackend(RemoteUserBackend):
"""Backend that doesn't create unknown users."""
create_unknown_user = False
class RemoteUserNoCreateTest(RemoteUserTest):
"""
Contains the same tests as RemoteUserTest, but using a custom auth backend
class that doesn't create unknown users.
"""
backend = "auth_tests.test_remote_user.RemoteUserNoCreateBackend"
def test_unknown_user(self):
num_users = User.objects.count()
response = self.client.get("/remote_user/", **{self.header: "newuser"})
self.assertTrue(response.context["user"].is_anonymous)
self.assertEqual(User.objects.count(), num_users)
class AllowAllUsersRemoteUserBackendTest(RemoteUserTest):
"""Backend that allows inactive users."""
backend = "django.contrib.auth.backends.AllowAllUsersRemoteUserBackend"
def test_inactive_user(self):
user = User.objects.create(username="knownuser", is_active=False)
response = self.client.get("/remote_user/", **{self.header: self.known_user})
self.assertEqual(response.context["user"].username, user.username)
class CustomRemoteUserBackend(RemoteUserBackend):
"""
Backend that overrides RemoteUserBackend methods.
"""
def clean_username(self, username):
"""
Grabs username before the @ character.
"""
return username.split("@")[0]
def configure_user(self, request, user, created=True):
"""
Sets user's email address using the email specified in an HTTP header.
Sets user's last name for existing users.
"""
user.email = request.META.get(RemoteUserTest.email_header, "")
if not created:
user.last_name = user.username
user.save()
return user
class RemoteUserCustomTest(RemoteUserTest):
"""
Tests a custom RemoteUserBackend subclass that overrides the clean_username
and configure_user methods.
"""
backend = "auth_tests.test_remote_user.CustomRemoteUserBackend"
# REMOTE_USER strings with email addresses for the custom backend to
# clean.
known_user = "[email protected]"
known_user2 = "[email protected]"
def test_known_user(self):
"""
The strings passed in REMOTE_USER should be cleaned and the known users
should not have been configured with an email address.
"""
super().test_known_user()
knownuser = User.objects.get(username="knownuser")
knownuser2 = User.objects.get(username="knownuser2")
self.assertEqual(knownuser.email, "")
self.assertEqual(knownuser2.email, "")
self.assertEqual(knownuser.last_name, "knownuser")
self.assertEqual(knownuser2.last_name, "knownuser2")
def test_unknown_user(self):
"""
The unknown user created should be configured with an email address
provided in the request header.
"""
num_users = User.objects.count()
response = self.client.get(
"/remote_user/",
**{
self.header: "newuser",
self.email_header: "[email protected]",
},
)
self.assertEqual(response.context["user"].username, "newuser")
self.assertEqual(response.context["user"].email, "[email protected]")
self.assertEqual(response.context["user"].last_name, "")
self.assertEqual(User.objects.count(), num_users + 1)
newuser = User.objects.get(username="newuser")
self.assertEqual(newuser.email, "[email protected]")
# RemovedInDjango50Warning.
class CustomRemoteUserNoCreatedArgumentBackend(CustomRemoteUserBackend):
def configure_user(self, request, user):
return super().configure_user(request, user)
@ignore_warnings(category=RemovedInDjango50Warning)
class RemoteUserCustomNoCreatedArgumentTest(RemoteUserTest):
backend = "auth_tests.test_remote_user.CustomRemoteUserNoCreatedArgumentBackend"
@override_settings(ROOT_URLCONF="auth_tests.urls")
@modify_settings(
AUTHENTICATION_BACKENDS={
"append": "auth_tests.test_remote_user.CustomRemoteUserNoCreatedArgumentBackend"
},
MIDDLEWARE={"append": "django.contrib.auth.middleware.RemoteUserMiddleware"},
)
class RemoteUserCustomNoCreatedArgumentDeprecationTest(TestCase):
def test_known_user_sync(self):
msg = (
"`created=True` must be added to the signature of "
"CustomRemoteUserNoCreatedArgumentBackend.configure_user()."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
self.client.get("/remote_user/", **{RemoteUserTest.header: "newuser"})
class CustomHeaderMiddleware(RemoteUserMiddleware):
"""
Middleware that overrides custom HTTP auth user header.
"""
header = "HTTP_AUTHUSER"
class CustomHeaderRemoteUserTest(RemoteUserTest):
"""
Tests a custom RemoteUserMiddleware subclass with custom HTTP auth user
header.
"""
middleware = "auth_tests.test_remote_user.CustomHeaderMiddleware"
header = "HTTP_AUTHUSER"
class PersistentRemoteUserTest(RemoteUserTest):
"""
PersistentRemoteUserMiddleware keeps the user logged in even if the
subsequent calls do not contain the header value.
"""
middleware = "django.contrib.auth.middleware.PersistentRemoteUserMiddleware"
require_header = False
def test_header_disappears(self):
"""
A logged in user is kept logged in even if the REMOTE_USER header
disappears during the same browser session.
"""
User.objects.create(username="knownuser")
# Known user authenticates
response = self.client.get("/remote_user/", **{self.header: self.known_user})
self.assertEqual(response.context["user"].username, "knownuser")
# Should stay logged in if the REMOTE_USER header disappears.
response = self.client.get("/remote_user/")
self.assertFalse(response.context["user"].is_anonymous)
self.assertEqual(response.context["user"].username, "knownuser")
|
a209edf06e04bfe4d13e312140b48c628016c6e327d32a2856394999aaceb0fd | import re
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest, HttpResponse, UnreadablePostError
from django.middleware.csrf import (
CSRF_ALLOWED_CHARS,
CSRF_SECRET_LENGTH,
CSRF_SESSION_KEY,
CSRF_TOKEN_LENGTH,
REASON_BAD_ORIGIN,
REASON_CSRF_TOKEN_MISSING,
REASON_NO_CSRF_COOKIE,
CsrfViewMiddleware,
InvalidTokenFormat,
RejectRequest,
_check_token_format,
_does_token_match,
_mask_cipher_secret,
_unmask_cipher_token,
get_token,
rotate_token,
)
from django.test import SimpleTestCase, override_settings
from django.test.utils import ignore_warnings
from django.utils.deprecation import RemovedInDjango50Warning
from django.views.decorators.csrf import csrf_exempt, requires_csrf_token
from .views import (
ensure_csrf_cookie_view,
ensured_and_protected_view,
non_token_view_using_request_processor,
post_form_view,
protected_view,
sandwiched_rotate_token_view,
token_view,
)
# This is a test (unmasked) CSRF cookie / secret.
TEST_SECRET = "lcccccccX2kcccccccY2jcccccccssIC"
# Two masked versions of TEST_SECRET for testing purposes.
MASKED_TEST_SECRET1 = "1bcdefghij2bcdefghij3bcdefghij4bcdefghij5bcdefghij6bcdefghijABCD"
MASKED_TEST_SECRET2 = "2JgchWvM1tpxT2lfz9aydoXW9yT1DN3NdLiejYxOOlzzV4nhBbYqmqZYbAV3V5Bf"
class CsrfFunctionTestMixin:
# This method depends on _unmask_cipher_token() being correct.
def assertMaskedSecretCorrect(self, masked_secret, secret):
"""Test that a string is a valid masked version of a secret."""
self.assertEqual(len(masked_secret), CSRF_TOKEN_LENGTH)
self.assertEqual(len(secret), CSRF_SECRET_LENGTH)
self.assertTrue(
set(masked_secret).issubset(set(CSRF_ALLOWED_CHARS)),
msg=f"invalid characters in {masked_secret!r}",
)
actual = _unmask_cipher_token(masked_secret)
self.assertEqual(actual, secret)
class CsrfFunctionTests(CsrfFunctionTestMixin, SimpleTestCase):
def test_unmask_cipher_token(self):
cases = [
(TEST_SECRET, MASKED_TEST_SECRET1),
(TEST_SECRET, MASKED_TEST_SECRET2),
(
32 * "a",
"vFioG3XOLyGyGsPRFyB9iYUs341ufzIEvFioG3XOLyGyGsPRFyB9iYUs341ufzIE",
),
(32 * "a", 64 * "a"),
(32 * "a", 64 * "b"),
(32 * "b", 32 * "a" + 32 * "b"),
(32 * "b", 32 * "b" + 32 * "c"),
(32 * "c", 32 * "a" + 32 * "c"),
]
for secret, masked_secret in cases:
with self.subTest(masked_secret=masked_secret):
actual = _unmask_cipher_token(masked_secret)
self.assertEqual(actual, secret)
def test_mask_cipher_secret(self):
cases = [
32 * "a",
TEST_SECRET,
"da4SrUiHJYoJ0HYQ0vcgisoIuFOxx4ER",
]
for secret in cases:
with self.subTest(secret=secret):
masked = _mask_cipher_secret(secret)
self.assertMaskedSecretCorrect(masked, secret)
def test_get_token_csrf_cookie_set(self):
request = HttpRequest()
request.META["CSRF_COOKIE"] = TEST_SECRET
self.assertNotIn("CSRF_COOKIE_NEEDS_UPDATE", request.META)
token = get_token(request)
self.assertMaskedSecretCorrect(token, TEST_SECRET)
# The existing cookie is preserved.
self.assertEqual(request.META["CSRF_COOKIE"], TEST_SECRET)
self.assertIs(request.META["CSRF_COOKIE_NEEDS_UPDATE"], True)
def test_get_token_csrf_cookie_not_set(self):
request = HttpRequest()
self.assertNotIn("CSRF_COOKIE", request.META)
self.assertNotIn("CSRF_COOKIE_NEEDS_UPDATE", request.META)
token = get_token(request)
cookie = request.META["CSRF_COOKIE"]
self.assertMaskedSecretCorrect(token, cookie)
self.assertIs(request.META["CSRF_COOKIE_NEEDS_UPDATE"], True)
def test_rotate_token(self):
request = HttpRequest()
request.META["CSRF_COOKIE"] = TEST_SECRET
self.assertNotIn("CSRF_COOKIE_NEEDS_UPDATE", request.META)
rotate_token(request)
# The underlying secret was changed.
cookie = request.META["CSRF_COOKIE"]
self.assertEqual(len(cookie), CSRF_SECRET_LENGTH)
self.assertNotEqual(cookie, TEST_SECRET)
self.assertIs(request.META["CSRF_COOKIE_NEEDS_UPDATE"], True)
def test_check_token_format_valid(self):
cases = [
# A token of length CSRF_SECRET_LENGTH.
TEST_SECRET,
# A token of length CSRF_TOKEN_LENGTH.
MASKED_TEST_SECRET1,
64 * "a",
]
for token in cases:
with self.subTest(token=token):
actual = _check_token_format(token)
self.assertIsNone(actual)
def test_check_token_format_invalid(self):
cases = [
(64 * "*", "has invalid characters"),
(16 * "a", "has incorrect length"),
]
for token, expected_message in cases:
with self.subTest(token=token):
with self.assertRaisesMessage(InvalidTokenFormat, expected_message):
_check_token_format(token)
def test_does_token_match(self):
cases = [
# Masked tokens match.
((MASKED_TEST_SECRET1, TEST_SECRET), True),
((MASKED_TEST_SECRET2, TEST_SECRET), True),
((64 * "a", _unmask_cipher_token(64 * "a")), True),
# Unmasked tokens match.
((TEST_SECRET, TEST_SECRET), True),
((32 * "a", 32 * "a"), True),
# Incorrect tokens don't match.
((32 * "a", TEST_SECRET), False),
((64 * "a", TEST_SECRET), False),
]
for (token, secret), expected in cases:
with self.subTest(token=token, secret=secret):
actual = _does_token_match(token, secret)
self.assertIs(actual, expected)
def test_does_token_match_wrong_token_length(self):
with self.assertRaises(AssertionError):
_does_token_match(16 * "a", TEST_SECRET)
class TestingSessionStore(SessionStore):
"""
A version of SessionStore that stores what cookie values are passed to
set_cookie() when CSRF_USE_SESSIONS=True.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This is a list of the cookie values passed to set_cookie() over
# the course of the request-response.
self._cookies_set = []
def __setitem__(self, key, value):
super().__setitem__(key, value)
self._cookies_set.append(value)
class TestingHttpRequest(HttpRequest):
"""
A version of HttpRequest that lets one track and change some things more
easily.
"""
def __init__(self):
super().__init__()
self.session = TestingSessionStore()
def is_secure(self):
return getattr(self, "_is_secure_override", False)
class PostErrorRequest(TestingHttpRequest):
"""
TestingHttpRequest that can raise errors when accessing POST data.
"""
post_error = None
def _get_post(self):
if self.post_error is not None:
raise self.post_error
return self._post
def _set_post(self, post):
self._post = post
POST = property(_get_post, _set_post)
class CsrfViewMiddlewareTestMixin(CsrfFunctionTestMixin):
"""
Shared methods and tests for session-based and cookie-based tokens.
"""
_csrf_id_cookie = MASKED_TEST_SECRET1
_csrf_id_token = MASKED_TEST_SECRET2
def _set_csrf_cookie(self, req, cookie):
raise NotImplementedError("This method must be implemented by a subclass.")
def _read_csrf_cookie(self, req, resp):
"""
Return the CSRF cookie as a string, or False if no cookie is present.
"""
raise NotImplementedError("This method must be implemented by a subclass.")
def _get_cookies_set(self, req, resp):
"""
Return a list of the cookie values passed to set_cookie() over the
course of the request-response.
"""
raise NotImplementedError("This method must be implemented by a subclass.")
def _get_request(self, method=None, cookie=None, request_class=None):
if method is None:
method = "GET"
if request_class is None:
request_class = TestingHttpRequest
req = request_class()
req.method = method
if cookie is not None:
self._set_csrf_cookie(req, cookie)
return req
def _get_csrf_cookie_request(
self,
method=None,
cookie=None,
post_token=None,
meta_token=None,
token_header=None,
request_class=None,
):
"""
The method argument defaults to "GET". The cookie argument defaults to
this class's default test cookie. The post_token and meta_token
arguments are included in the request's req.POST and req.META headers,
respectively, when that argument is provided and non-None. The
token_header argument is the header key to use for req.META, defaults
to "HTTP_X_CSRFTOKEN".
"""
if cookie is None:
cookie = self._csrf_id_cookie
if token_header is None:
token_header = "HTTP_X_CSRFTOKEN"
req = self._get_request(
method=method,
cookie=cookie,
request_class=request_class,
)
if post_token is not None:
req.POST["csrfmiddlewaretoken"] = post_token
if meta_token is not None:
req.META[token_header] = meta_token
return req
def _get_POST_csrf_cookie_request(
self,
cookie=None,
post_token=None,
meta_token=None,
token_header=None,
request_class=None,
):
return self._get_csrf_cookie_request(
method="POST",
cookie=cookie,
post_token=post_token,
meta_token=meta_token,
token_header=token_header,
request_class=request_class,
)
def _get_POST_request_with_token(self, cookie=None, request_class=None):
"""The cookie argument defaults to this class's default test cookie."""
return self._get_POST_csrf_cookie_request(
cookie=cookie,
post_token=self._csrf_id_token,
request_class=request_class,
)
# This method depends on _unmask_cipher_token() being correct.
def _check_token_present(self, response, csrf_secret=None):
if csrf_secret is None:
csrf_secret = TEST_SECRET
text = str(response.content, response.charset)
match = re.search('name="csrfmiddlewaretoken" value="(.*?)"', text)
self.assertTrue(
match,
f"Could not find a csrfmiddlewaretoken value in: {text}",
)
csrf_token = match[1]
self.assertMaskedSecretCorrect(csrf_token, csrf_secret)
def test_process_response_get_token_not_used(self):
"""
If get_token() is not called, the view middleware does not
add a cookie.
"""
# This is important to make pages cacheable. Pages which do call
# get_token(), assuming they use the token, are not cacheable because
# the token is specific to the user
req = self._get_request()
# non_token_view_using_request_processor does not call get_token(), but
# does use the csrf request processor. By using this, we are testing
# that the view processor is properly lazy and doesn't call get_token()
# until needed.
mw = CsrfViewMiddleware(non_token_view_using_request_processor)
mw.process_request(req)
mw.process_view(req, non_token_view_using_request_processor, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertIs(csrf_cookie, False)
def _check_bad_or_missing_cookie(self, cookie, expected):
"""Passing None for cookie includes no cookie."""
req = self._get_request(method="POST", cookie=cookie)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
with self.assertLogs("django.security.csrf", "WARNING") as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(cm.records[0].getMessage(), "Forbidden (%s): " % expected)
def test_no_csrf_cookie(self):
"""
If no CSRF cookies is present, the middleware rejects the incoming
request. This will stop login CSRF.
"""
self._check_bad_or_missing_cookie(None, REASON_NO_CSRF_COOKIE)
def _check_bad_or_missing_token(
self,
expected,
post_token=None,
meta_token=None,
token_header=None,
):
req = self._get_POST_csrf_cookie_request(
post_token=post_token,
meta_token=meta_token,
token_header=token_header,
)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
with self.assertLogs("django.security.csrf", "WARNING") as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(resp["Content-Type"], "text/html; charset=utf-8")
self.assertEqual(cm.records[0].getMessage(), "Forbidden (%s): " % expected)
def test_csrf_cookie_bad_or_missing_token(self):
"""
If a CSRF cookie is present but the token is missing or invalid, the
middleware rejects the incoming request.
"""
cases = [
(None, None, REASON_CSRF_TOKEN_MISSING),
(16 * "a", None, "CSRF token from POST has incorrect length."),
(64 * "*", None, "CSRF token from POST has invalid characters."),
(64 * "a", None, "CSRF token from POST incorrect."),
(
None,
16 * "a",
"CSRF token from the 'X-Csrftoken' HTTP header has incorrect length.",
),
(
None,
64 * "*",
"CSRF token from the 'X-Csrftoken' HTTP header has invalid characters.",
),
(
None,
64 * "a",
"CSRF token from the 'X-Csrftoken' HTTP header incorrect.",
),
]
for post_token, meta_token, expected in cases:
with self.subTest(post_token=post_token, meta_token=meta_token):
self._check_bad_or_missing_token(
expected,
post_token=post_token,
meta_token=meta_token,
)
@override_settings(CSRF_HEADER_NAME="HTTP_X_CSRFTOKEN_CUSTOMIZED")
def test_csrf_cookie_bad_token_custom_header(self):
"""
If a CSRF cookie is present and an invalid token is passed via a
custom CSRF_HEADER_NAME, the middleware rejects the incoming request.
"""
expected = (
"CSRF token from the 'X-Csrftoken-Customized' HTTP header has "
"incorrect length."
)
self._check_bad_or_missing_token(
expected,
meta_token=16 * "a",
token_header="HTTP_X_CSRFTOKEN_CUSTOMIZED",
)
def test_process_request_csrf_cookie_and_token(self):
"""
If both a cookie and a token is present, the middleware lets it through.
"""
req = self._get_POST_request_with_token()
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
def test_process_request_csrf_cookie_no_token_exempt_view(self):
"""
If a CSRF cookie is present and no token, but the csrf_exempt decorator
has been applied to the view, the middleware lets it through
"""
req = self._get_POST_csrf_cookie_request()
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, csrf_exempt(post_form_view), (), {})
self.assertIsNone(resp)
def test_csrf_token_in_header(self):
"""
The token may be passed in a header instead of in the form.
"""
req = self._get_POST_csrf_cookie_request(meta_token=self._csrf_id_token)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(CSRF_HEADER_NAME="HTTP_X_CSRFTOKEN_CUSTOMIZED")
def test_csrf_token_in_header_with_customized_name(self):
"""
settings.CSRF_HEADER_NAME can be used to customize the CSRF header name
"""
req = self._get_POST_csrf_cookie_request(
meta_token=self._csrf_id_token,
token_header="HTTP_X_CSRFTOKEN_CUSTOMIZED",
)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
def test_put_and_delete_rejected(self):
"""
HTTP PUT and DELETE methods have protection
"""
req = self._get_request(method="PUT")
mw = CsrfViewMiddleware(post_form_view)
with self.assertLogs("django.security.csrf", "WARNING") as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(
cm.records[0].getMessage(), "Forbidden (%s): " % REASON_NO_CSRF_COOKIE
)
req = self._get_request(method="DELETE")
with self.assertLogs("django.security.csrf", "WARNING") as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(
cm.records[0].getMessage(), "Forbidden (%s): " % REASON_NO_CSRF_COOKIE
)
def test_put_and_delete_allowed(self):
"""
HTTP PUT and DELETE can get through with X-CSRFToken and a cookie.
"""
req = self._get_csrf_cookie_request(
method="PUT", meta_token=self._csrf_id_token
)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
req = self._get_csrf_cookie_request(
method="DELETE", meta_token=self._csrf_id_token
)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
def test_rotate_token_triggers_second_reset(self):
"""
If rotate_token() is called after the token is reset in
CsrfViewMiddleware's process_response() and before another call to
the same process_response(), the cookie is reset a second time.
"""
req = self._get_POST_request_with_token()
resp = sandwiched_rotate_token_view(req)
self.assertContains(resp, "OK")
actual_secret = self._read_csrf_cookie(req, resp)
# set_cookie() was called a second time with a different secret.
cookies_set = self._get_cookies_set(req, resp)
# Only compare the last two to exclude a spurious entry that's present
# when CsrfViewMiddlewareUseSessionsTests is running.
self.assertEqual(cookies_set[-2:], [TEST_SECRET, actual_secret])
self.assertNotEqual(actual_secret, TEST_SECRET)
# Tests for the template tag method
def test_token_node_no_csrf_cookie(self):
"""
CsrfTokenNode works when no CSRF cookie is set.
"""
req = self._get_request()
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
csrf_secret = _unmask_cipher_token(token)
self._check_token_present(resp, csrf_secret)
def test_token_node_empty_csrf_cookie(self):
"""
A new token is sent if the csrf_cookie is the empty string.
"""
req = self._get_request(cookie="")
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
csrf_secret = _unmask_cipher_token(token)
self._check_token_present(resp, csrf_secret)
def test_token_node_with_csrf_cookie(self):
"""
CsrfTokenNode works when a CSRF cookie is set.
"""
req = self._get_csrf_cookie_request()
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
mw.process_view(req, token_view, (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_exempt_view(self):
"""
get_token still works for a view decorated with 'csrf_exempt'.
"""
req = self._get_csrf_cookie_request()
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
mw.process_view(req, csrf_exempt(token_view), (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_requires_csrf_token_view(self):
"""
get_token() works for a view decorated solely with requires_csrf_token.
"""
req = self._get_csrf_cookie_request()
resp = requires_csrf_token(token_view)(req)
self._check_token_present(resp)
def test_token_node_with_new_csrf_cookie(self):
"""
CsrfTokenNode works when a CSRF cookie is created by
the middleware (when one was not already present)
"""
req = self._get_request()
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self._check_token_present(resp, csrf_cookie)
def test_cookie_not_reset_on_accepted_request(self):
"""
The csrf token used in posts is changed on every request (although
stays equivalent). The csrf cookie should not change on accepted
requests. If it appears in the response, it should keep its value.
"""
req = self._get_POST_request_with_token()
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(
csrf_cookie,
TEST_SECRET,
"CSRF cookie was changed on an accepted request",
)
@override_settings(DEBUG=True, ALLOWED_HOSTS=["www.example.com"])
def test_https_bad_referer(self):
"""
A POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_REFERER"] = "https://www.evil.org/somepage"
req.META["SERVER_PORT"] = "443"
mw = CsrfViewMiddleware(post_form_view)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
"Referer checking failed - https://www.evil.org/somepage does not "
"match any trusted origins.",
status_code=403,
)
def _check_referer_rejects(self, mw, req):
with self.assertRaises(RejectRequest):
mw._check_referer(req)
@override_settings(DEBUG=True)
def test_https_no_referer(self):
"""A POST HTTPS request with a missing referer is rejected."""
req = self._get_POST_request_with_token()
req._is_secure_override = True
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
"Referer checking failed - no Referer.",
status_code=403,
)
def test_https_malformed_host(self):
"""
CsrfViewMiddleware generates a 403 response if it receives an HTTPS
request with a bad host.
"""
req = self._get_request(method="POST")
req._is_secure_override = True
req.META["HTTP_HOST"] = "@malformed"
req.META["HTTP_REFERER"] = "https://www.evil.org/somepage"
req.META["SERVER_PORT"] = "443"
mw = CsrfViewMiddleware(token_view)
expected = (
"Referer checking failed - https://www.evil.org/somepage does not "
"match any trusted origins."
)
with self.assertRaisesMessage(RejectRequest, expected):
mw._check_referer(req)
response = mw.process_view(req, token_view, (), {})
self.assertEqual(response.status_code, 403)
def test_origin_malformed_host(self):
req = self._get_request(method="POST")
req._is_secure_override = True
req.META["HTTP_HOST"] = "@malformed"
req.META["HTTP_ORIGIN"] = "https://www.evil.org"
mw = CsrfViewMiddleware(token_view)
self._check_referer_rejects(mw, req)
response = mw.process_view(req, token_view, (), {})
self.assertEqual(response.status_code, 403)
@override_settings(DEBUG=True)
def test_https_malformed_referer(self):
"""
A POST HTTPS request with a bad referer is rejected.
"""
malformed_referer_msg = "Referer checking failed - Referer is malformed."
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_REFERER"] = "http://http://www.example.com/"
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
"Referer checking failed - Referer is insecure while host is secure.",
status_code=403,
)
# Empty
req.META["HTTP_REFERER"] = ""
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# Non-ASCII
req.META["HTTP_REFERER"] = "ØBöIß"
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# missing scheme
# >>> urlparse('//example.com/')
# ParseResult(
# scheme='', netloc='example.com', path='/', params='', query='', fragment='',
# )
req.META["HTTP_REFERER"] = "//example.com/"
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# missing netloc
# >>> urlparse('https://')
# ParseResult(
# scheme='https', netloc='', path='', params='', query='', fragment='',
# )
req.META["HTTP_REFERER"] = "https://"
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# Invalid URL
# >>> urlparse('https://[')
# ValueError: Invalid IPv6 URL
req.META["HTTP_REFERER"] = "https://["
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
@override_settings(ALLOWED_HOSTS=["www.example.com"])
def test_https_good_referer(self):
"""
A POST HTTPS request with a good referer is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_REFERER"] = "https://www.example.com/somepage"
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(ALLOWED_HOSTS=["www.example.com"])
def test_https_good_referer_2(self):
"""
A POST HTTPS request with a good referer is accepted where the referer
contains no trailing slash.
"""
# See ticket #15617
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_REFERER"] = "https://www.example.com"
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
def _test_https_good_referer_behind_proxy(self):
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META.update(
{
"HTTP_HOST": "10.0.0.2",
"HTTP_REFERER": "https://www.example.com/somepage",
"SERVER_PORT": "8080",
"HTTP_X_FORWARDED_HOST": "www.example.com",
"HTTP_X_FORWARDED_PORT": "443",
}
)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(CSRF_TRUSTED_ORIGINS=["https://dashboard.example.com"])
def test_https_good_referer_malformed_host(self):
"""
A POST HTTPS request is accepted if it receives a good referer with
a bad host.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "@malformed"
req.META["HTTP_REFERER"] = "https://dashboard.example.com/somepage"
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(
ALLOWED_HOSTS=["www.example.com"],
CSRF_TRUSTED_ORIGINS=["https://dashboard.example.com"],
)
def test_https_csrf_trusted_origin_allowed(self):
"""
A POST HTTPS request with a referer added to the CSRF_TRUSTED_ORIGINS
setting is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_REFERER"] = "https://dashboard.example.com"
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(
ALLOWED_HOSTS=["www.example.com"],
CSRF_TRUSTED_ORIGINS=["https://*.example.com"],
)
def test_https_csrf_wildcard_trusted_origin_allowed(self):
"""
A POST HTTPS request with a referer that matches a CSRF_TRUSTED_ORIGINS
wildcard is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_REFERER"] = "https://dashboard.example.com"
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
def _test_https_good_referer_matches_cookie_domain(self):
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_REFERER"] = "https://foo.example.com/"
req.META["SERVER_PORT"] = "443"
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
def _test_https_good_referer_matches_cookie_domain_with_different_port(self):
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_REFERER"] = "https://foo.example.com:4443/"
req.META["SERVER_PORT"] = "4443"
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
def test_ensures_csrf_cookie_no_logging(self):
"""
ensure_csrf_cookie() doesn't log warnings (#19436).
"""
with self.assertNoLogs("django.request", "WARNING"):
req = self._get_request()
ensure_csrf_cookie_view(req)
def test_reading_post_data_raises_unreadable_post_error(self):
"""
An UnreadablePostError raised while reading the POST data should be
handled by the middleware.
"""
req = self._get_POST_request_with_token()
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
req = self._get_POST_request_with_token(request_class=PostErrorRequest)
req.post_error = UnreadablePostError("Error reading input data.")
mw.process_request(req)
with self.assertLogs("django.security.csrf", "WARNING") as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(resp.status_code, 403)
self.assertEqual(
cm.records[0].getMessage(),
"Forbidden (%s): " % REASON_CSRF_TOKEN_MISSING,
)
def test_reading_post_data_raises_os_error(self):
"""
An OSError raised while reading the POST data should not be handled by
the middleware.
"""
mw = CsrfViewMiddleware(post_form_view)
req = self._get_POST_request_with_token(request_class=PostErrorRequest)
req.post_error = OSError("Deleted directories/Missing permissions.")
mw.process_request(req)
with self.assertRaises(OSError):
mw.process_view(req, post_form_view, (), {})
@override_settings(ALLOWED_HOSTS=["www.example.com"])
def test_bad_origin_bad_domain(self):
"""A request with a bad origin is rejected."""
req = self._get_POST_request_with_token()
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_ORIGIN"] = "https://www.evil.org"
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
self.assertIs(mw._origin_verified(req), False)
with self.assertLogs("django.security.csrf", "WARNING") as cm:
response = mw.process_view(req, post_form_view, (), {})
self.assertEqual(response.status_code, 403)
msg = REASON_BAD_ORIGIN % req.META["HTTP_ORIGIN"]
self.assertEqual(cm.records[0].getMessage(), "Forbidden (%s): " % msg)
@override_settings(ALLOWED_HOSTS=["www.example.com"])
def test_bad_origin_null_origin(self):
"""A request with a null origin is rejected."""
req = self._get_POST_request_with_token()
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_ORIGIN"] = "null"
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
self.assertIs(mw._origin_verified(req), False)
with self.assertLogs("django.security.csrf", "WARNING") as cm:
response = mw.process_view(req, post_form_view, (), {})
self.assertEqual(response.status_code, 403)
msg = REASON_BAD_ORIGIN % req.META["HTTP_ORIGIN"]
self.assertEqual(cm.records[0].getMessage(), "Forbidden (%s): " % msg)
@override_settings(ALLOWED_HOSTS=["www.example.com"])
def test_bad_origin_bad_protocol(self):
"""A request with an origin with wrong protocol is rejected."""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_ORIGIN"] = "http://example.com"
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
self.assertIs(mw._origin_verified(req), False)
with self.assertLogs("django.security.csrf", "WARNING") as cm:
response = mw.process_view(req, post_form_view, (), {})
self.assertEqual(response.status_code, 403)
msg = REASON_BAD_ORIGIN % req.META["HTTP_ORIGIN"]
self.assertEqual(cm.records[0].getMessage(), "Forbidden (%s): " % msg)
@override_settings(
ALLOWED_HOSTS=["www.example.com"],
CSRF_TRUSTED_ORIGINS=[
"http://no-match.com",
"https://*.example.com",
"http://*.no-match.com",
"http://*.no-match-2.com",
],
)
def test_bad_origin_csrf_trusted_origin_bad_protocol(self):
"""
A request with an origin with the wrong protocol compared to
CSRF_TRUSTED_ORIGINS is rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_ORIGIN"] = "http://foo.example.com"
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
self.assertIs(mw._origin_verified(req), False)
with self.assertLogs("django.security.csrf", "WARNING") as cm:
response = mw.process_view(req, post_form_view, (), {})
self.assertEqual(response.status_code, 403)
msg = REASON_BAD_ORIGIN % req.META["HTTP_ORIGIN"]
self.assertEqual(cm.records[0].getMessage(), "Forbidden (%s): " % msg)
self.assertEqual(mw.allowed_origins_exact, {"http://no-match.com"})
self.assertEqual(
mw.allowed_origin_subdomains,
{
"https": [".example.com"],
"http": [".no-match.com", ".no-match-2.com"],
},
)
@override_settings(ALLOWED_HOSTS=["www.example.com"])
def test_bad_origin_cannot_be_parsed(self):
"""
A POST request with an origin that can't be parsed by urlparse() is
rejected.
"""
req = self._get_POST_request_with_token()
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_ORIGIN"] = "https://["
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
self.assertIs(mw._origin_verified(req), False)
with self.assertLogs("django.security.csrf", "WARNING") as cm:
response = mw.process_view(req, post_form_view, (), {})
self.assertEqual(response.status_code, 403)
msg = REASON_BAD_ORIGIN % req.META["HTTP_ORIGIN"]
self.assertEqual(cm.records[0].getMessage(), "Forbidden (%s): " % msg)
@override_settings(ALLOWED_HOSTS=["www.example.com"])
def test_good_origin_insecure(self):
"""A POST HTTP request with a good origin is accepted."""
req = self._get_POST_request_with_token()
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_ORIGIN"] = "http://www.example.com"
mw = CsrfViewMiddleware(post_form_view)
self.assertIs(mw._origin_verified(req), True)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
@override_settings(ALLOWED_HOSTS=["www.example.com"])
def test_good_origin_secure(self):
"""A POST HTTPS request with a good origin is accepted."""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_ORIGIN"] = "https://www.example.com"
mw = CsrfViewMiddleware(post_form_view)
self.assertIs(mw._origin_verified(req), True)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
@override_settings(
ALLOWED_HOSTS=["www.example.com"],
CSRF_TRUSTED_ORIGINS=["https://dashboard.example.com"],
)
def test_good_origin_csrf_trusted_origin_allowed(self):
"""
A POST request with an origin added to the CSRF_TRUSTED_ORIGINS
setting is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_ORIGIN"] = "https://dashboard.example.com"
mw = CsrfViewMiddleware(post_form_view)
self.assertIs(mw._origin_verified(req), True)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
self.assertEqual(mw.allowed_origins_exact, {"https://dashboard.example.com"})
self.assertEqual(mw.allowed_origin_subdomains, {})
@override_settings(
ALLOWED_HOSTS=["www.example.com"],
CSRF_TRUSTED_ORIGINS=["https://*.example.com"],
)
def test_good_origin_wildcard_csrf_trusted_origin_allowed(self):
"""
A POST request with an origin that matches a CSRF_TRUSTED_ORIGINS
wildcard is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_HOST"] = "www.example.com"
req.META["HTTP_ORIGIN"] = "https://foo.example.com"
mw = CsrfViewMiddleware(post_form_view)
self.assertIs(mw._origin_verified(req), True)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
self.assertEqual(mw.allowed_origins_exact, set())
self.assertEqual(mw.allowed_origin_subdomains, {"https": [".example.com"]})
class CsrfViewMiddlewareTests(CsrfViewMiddlewareTestMixin, SimpleTestCase):
def _set_csrf_cookie(self, req, cookie):
req.COOKIES[settings.CSRF_COOKIE_NAME] = cookie
def _read_csrf_cookie(self, req, resp):
"""
Return the CSRF cookie as a string, or False if no cookie is present.
"""
if settings.CSRF_COOKIE_NAME not in resp.cookies:
return False
csrf_cookie = resp.cookies[settings.CSRF_COOKIE_NAME]
return csrf_cookie.value
def _get_cookies_set(self, req, resp):
return resp._cookies_set
def test_ensures_csrf_cookie_no_middleware(self):
"""
The ensure_csrf_cookie() decorator works without middleware.
"""
req = self._get_request()
resp = ensure_csrf_cookie_view(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertTrue(csrf_cookie)
self.assertIn("Cookie", resp.get("Vary", ""))
def test_ensures_csrf_cookie_with_middleware(self):
"""
The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware
enabled.
"""
req = self._get_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertTrue(csrf_cookie)
self.assertIn("Cookie", resp.get("Vary", ""))
def test_csrf_cookie_age(self):
"""
CSRF cookie age can be set using settings.CSRF_COOKIE_AGE.
"""
req = self._get_request()
MAX_AGE = 123
with self.settings(
CSRF_COOKIE_NAME="csrfcookie",
CSRF_COOKIE_DOMAIN=".example.com",
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH="/test/",
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True,
):
# token_view calls get_token() indirectly
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
max_age = resp.cookies.get("csrfcookie").get("max-age")
self.assertEqual(max_age, MAX_AGE)
def test_csrf_cookie_age_none(self):
"""
CSRF cookie age does not have max age set and therefore uses
session-based cookies.
"""
req = self._get_request()
MAX_AGE = None
with self.settings(
CSRF_COOKIE_NAME="csrfcookie",
CSRF_COOKIE_DOMAIN=".example.com",
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH="/test/",
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True,
):
# token_view calls get_token() indirectly
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
max_age = resp.cookies.get("csrfcookie").get("max-age")
self.assertEqual(max_age, "")
def test_csrf_cookie_samesite(self):
req = self._get_request()
with self.settings(
CSRF_COOKIE_NAME="csrfcookie", CSRF_COOKIE_SAMESITE="Strict"
):
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
self.assertEqual(resp.cookies["csrfcookie"]["samesite"], "Strict")
def test_bad_csrf_cookie_characters(self):
"""
If the CSRF cookie has invalid characters in a POST request, the
middleware rejects the incoming request.
"""
self._check_bad_or_missing_cookie(
64 * "*", "CSRF cookie has invalid characters."
)
def test_bad_csrf_cookie_length(self):
"""
If the CSRF cookie has an incorrect length in a POST request, the
middleware rejects the incoming request.
"""
self._check_bad_or_missing_cookie(16 * "a", "CSRF cookie has incorrect length.")
def test_process_view_token_too_long(self):
"""
If the token is longer than expected, it is ignored and a new token is
created.
"""
req = self._get_request(cookie="x" * 100000)
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(len(csrf_cookie), CSRF_SECRET_LENGTH)
def test_process_view_token_invalid_chars(self):
"""
If the token contains non-alphanumeric characters, it is ignored and a
new token is created.
"""
token = ("!@#" + self._csrf_id_token)[:CSRF_TOKEN_LENGTH]
req = self._get_request(cookie=token)
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(len(csrf_cookie), CSRF_SECRET_LENGTH)
self.assertNotEqual(csrf_cookie, token)
def test_masked_unmasked_combinations(self):
"""
All combinations are allowed of (1) masked and unmasked cookies,
(2) masked and unmasked tokens, and (3) tokens provided via POST and
the X-CSRFToken header.
"""
cases = [
(TEST_SECRET, TEST_SECRET, None),
(TEST_SECRET, MASKED_TEST_SECRET2, None),
(TEST_SECRET, None, TEST_SECRET),
(TEST_SECRET, None, MASKED_TEST_SECRET2),
(MASKED_TEST_SECRET1, TEST_SECRET, None),
(MASKED_TEST_SECRET1, MASKED_TEST_SECRET2, None),
(MASKED_TEST_SECRET1, None, TEST_SECRET),
(MASKED_TEST_SECRET1, None, MASKED_TEST_SECRET2),
]
for args in cases:
with self.subTest(args=args):
cookie, post_token, meta_token = args
req = self._get_POST_csrf_cookie_request(
cookie=cookie,
post_token=post_token,
meta_token=meta_token,
)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
def test_set_cookie_called_only_once(self):
"""
set_cookie() is called only once when the view is decorated with both
ensure_csrf_cookie and csrf_protect.
"""
req = self._get_POST_request_with_token()
resp = ensured_and_protected_view(req)
self.assertContains(resp, "OK")
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(csrf_cookie, TEST_SECRET)
# set_cookie() was called only once and with the expected secret.
cookies_set = self._get_cookies_set(req, resp)
self.assertEqual(cookies_set, [TEST_SECRET])
def test_invalid_cookie_replaced_on_GET(self):
"""
A CSRF cookie with the wrong format is replaced during a GET request.
"""
req = self._get_request(cookie="badvalue")
resp = protected_view(req)
self.assertContains(resp, "OK")
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertTrue(csrf_cookie, msg="No CSRF cookie was sent.")
self.assertEqual(len(csrf_cookie), CSRF_SECRET_LENGTH)
def test_valid_secret_not_replaced_on_GET(self):
"""
Masked and unmasked CSRF cookies are not replaced during a GET request.
"""
cases = [
TEST_SECRET,
MASKED_TEST_SECRET1,
]
for cookie in cases:
with self.subTest(cookie=cookie):
req = self._get_request(cookie=cookie)
resp = protected_view(req)
self.assertContains(resp, "OK")
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertFalse(csrf_cookie, msg="A CSRF cookie was sent.")
def test_masked_secret_accepted_and_replaced(self):
"""
For a view that uses the csrf_token, the csrf cookie is replaced with
the unmasked version if originally masked.
"""
req = self._get_POST_request_with_token(cookie=MASKED_TEST_SECRET1)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(csrf_cookie, TEST_SECRET)
self._check_token_present(resp, csrf_cookie)
def test_bare_secret_accepted_and_not_replaced(self):
"""
The csrf cookie is left unchanged if originally not masked.
"""
req = self._get_POST_request_with_token(cookie=TEST_SECRET)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(csrf_cookie, TEST_SECRET)
self._check_token_present(resp, csrf_cookie)
@override_settings(
ALLOWED_HOSTS=["www.example.com"],
CSRF_COOKIE_DOMAIN=".example.com",
USE_X_FORWARDED_PORT=True,
)
def test_https_good_referer_behind_proxy(self):
"""
A POST HTTPS request is accepted when USE_X_FORWARDED_PORT=True.
"""
self._test_https_good_referer_behind_proxy()
@override_settings(
ALLOWED_HOSTS=["www.example.com"], CSRF_COOKIE_DOMAIN=".example.com"
)
def test_https_good_referer_matches_cookie_domain(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN.
"""
self._test_https_good_referer_matches_cookie_domain()
@override_settings(
ALLOWED_HOSTS=["www.example.com"], CSRF_COOKIE_DOMAIN=".example.com"
)
def test_https_good_referer_matches_cookie_domain_with_different_port(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN and a non-443 port.
"""
self._test_https_good_referer_matches_cookie_domain_with_different_port()
@override_settings(CSRF_COOKIE_DOMAIN=".example.com", DEBUG=True)
def test_https_reject_insecure_referer(self):
"""
A POST HTTPS request from an insecure referer should be rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_REFERER"] = "http://example.com/"
req.META["SERVER_PORT"] = "443"
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
"Referer checking failed - Referer is insecure while host is secure.",
status_code=403,
)
@override_settings(CSRF_USE_SESSIONS=True, CSRF_COOKIE_DOMAIN=None)
class CsrfViewMiddlewareUseSessionsTests(CsrfViewMiddlewareTestMixin, SimpleTestCase):
"""
CSRF tests with CSRF_USE_SESSIONS=True.
"""
def _set_csrf_cookie(self, req, cookie):
req.session[CSRF_SESSION_KEY] = cookie
def _read_csrf_cookie(self, req, resp=None):
"""
Return the CSRF cookie as a string, or False if no cookie is present.
"""
if CSRF_SESSION_KEY not in req.session:
return False
return req.session[CSRF_SESSION_KEY]
def _get_cookies_set(self, req, resp):
return req.session._cookies_set
def test_no_session_on_request(self):
msg = (
"CSRF_USE_SESSIONS is enabled, but request.session is not set. "
"SessionMiddleware must appear before CsrfViewMiddleware in MIDDLEWARE."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
mw = CsrfViewMiddleware(lambda req: HttpResponse())
mw.process_request(HttpRequest())
def test_masked_unmasked_combinations(self):
"""
Masked and unmasked tokens are allowed both as POST and as the
X-CSRFToken header.
"""
cases = [
# Bare secrets are not allowed when CSRF_USE_SESSIONS=True.
(MASKED_TEST_SECRET1, TEST_SECRET, None),
(MASKED_TEST_SECRET1, MASKED_TEST_SECRET2, None),
(MASKED_TEST_SECRET1, None, TEST_SECRET),
(MASKED_TEST_SECRET1, None, MASKED_TEST_SECRET2),
]
for args in cases:
with self.subTest(args=args):
cookie, post_token, meta_token = args
req = self._get_POST_csrf_cookie_request(
cookie=cookie,
post_token=post_token,
meta_token=meta_token,
)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
def test_process_response_get_token_used(self):
"""The ensure_csrf_cookie() decorator works without middleware."""
req = self._get_request()
ensure_csrf_cookie_view(req)
csrf_cookie = self._read_csrf_cookie(req)
self.assertTrue(csrf_cookie)
def test_session_modify(self):
"""The session isn't saved if the CSRF cookie is unchanged."""
req = self._get_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
csrf_cookie = self._read_csrf_cookie(req)
self.assertTrue(csrf_cookie)
req.session.modified = False
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
self.assertFalse(req.session.modified)
def test_ensures_csrf_cookie_with_middleware(self):
"""
The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware
enabled.
"""
req = self._get_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
csrf_cookie = self._read_csrf_cookie(req)
self.assertTrue(csrf_cookie)
@override_settings(
ALLOWED_HOSTS=["www.example.com"],
SESSION_COOKIE_DOMAIN=".example.com",
USE_X_FORWARDED_PORT=True,
DEBUG=True,
)
def test_https_good_referer_behind_proxy(self):
"""
A POST HTTPS request is accepted when USE_X_FORWARDED_PORT=True.
"""
self._test_https_good_referer_behind_proxy()
@override_settings(
ALLOWED_HOSTS=["www.example.com"], SESSION_COOKIE_DOMAIN=".example.com"
)
def test_https_good_referer_matches_cookie_domain(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by SESSION_COOKIE_DOMAIN.
"""
self._test_https_good_referer_matches_cookie_domain()
@override_settings(
ALLOWED_HOSTS=["www.example.com"], SESSION_COOKIE_DOMAIN=".example.com"
)
def test_https_good_referer_matches_cookie_domain_with_different_port(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by SESSION_COOKIE_DOMAIN and a non-443 port.
"""
self._test_https_good_referer_matches_cookie_domain_with_different_port()
@override_settings(SESSION_COOKIE_DOMAIN=".example.com", DEBUG=True)
def test_https_reject_insecure_referer(self):
"""
A POST HTTPS request from an insecure referer should be rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_REFERER"] = "http://example.com/"
req.META["SERVER_PORT"] = "443"
mw = CsrfViewMiddleware(post_form_view)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
"Referer checking failed - Referer is insecure while host is secure.",
status_code=403,
)
@override_settings(ROOT_URLCONF="csrf_tests.csrf_token_error_handler_urls", DEBUG=False)
class CsrfInErrorHandlingViewsTests(CsrfFunctionTestMixin, SimpleTestCase):
def test_csrf_token_on_404_stays_constant(self):
response = self.client.get("/does not exist/")
# The error handler returns status code 599.
self.assertEqual(response.status_code, 599)
token1 = response.content.decode("ascii")
response = self.client.get("/does not exist/")
self.assertEqual(response.status_code, 599)
token2 = response.content.decode("ascii")
secret2 = _unmask_cipher_token(token2)
self.assertMaskedSecretCorrect(token1, secret2)
@ignore_warnings(category=RemovedInDjango50Warning)
class CsrfCookieMaskedTests(CsrfFunctionTestMixin, SimpleTestCase):
@override_settings(CSRF_COOKIE_MASKED=True)
def test_get_token_csrf_cookie_not_set(self):
request = HttpRequest()
self.assertNotIn("CSRF_COOKIE", request.META)
self.assertNotIn("CSRF_COOKIE_NEEDS_UPDATE", request.META)
token = get_token(request)
cookie = request.META["CSRF_COOKIE"]
self.assertEqual(len(cookie), CSRF_TOKEN_LENGTH)
unmasked_cookie = _unmask_cipher_token(cookie)
self.assertMaskedSecretCorrect(token, unmasked_cookie)
self.assertIs(request.META["CSRF_COOKIE_NEEDS_UPDATE"], True)
@override_settings(CSRF_COOKIE_MASKED=True)
def test_rotate_token(self):
request = HttpRequest()
request.META["CSRF_COOKIE"] = MASKED_TEST_SECRET1
self.assertNotIn("CSRF_COOKIE_NEEDS_UPDATE", request.META)
rotate_token(request)
# The underlying secret was changed.
cookie = request.META["CSRF_COOKIE"]
self.assertEqual(len(cookie), CSRF_TOKEN_LENGTH)
unmasked_cookie = _unmask_cipher_token(cookie)
self.assertNotEqual(unmasked_cookie, TEST_SECRET)
self.assertIs(request.META["CSRF_COOKIE_NEEDS_UPDATE"], True)
|
542265fe0347127db78097a5cc085370778db97cfaee802630694c032e36a95c | import logging
import os
import unittest
import warnings
from io import StringIO
from unittest import mock
from django.conf import settings
from django.contrib.staticfiles.finders import get_finder, get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage
from django.db import (
IntegrityError,
connection,
connections,
models,
router,
transaction,
)
from django.forms import (
CharField,
EmailField,
Form,
IntegerField,
ValidationError,
formset_factory,
)
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.test import (
SimpleTestCase,
TestCase,
TransactionTestCase,
skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.html import HTMLParseError, parse_html
from django.test.testcases import DatabaseOperationForbidden
from django.test.utils import (
CaptureQueriesContext,
TestContextDecorator,
ignore_warnings,
isolate_apps,
override_settings,
setup_test_environment,
)
from django.urls import NoReverseMatch, path, reverse, reverse_lazy
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.log import DEFAULT_LOGGING
from .models import Car, Person, PossessedCar
from .views import empty_response
class SkippingTestCase(SimpleTestCase):
def _assert_skipping(self, func, expected_exc, msg=None):
try:
if msg is not None:
with self.assertRaisesMessage(expected_exc, msg):
func()
else:
with self.assertRaises(expected_exc):
func()
except unittest.SkipTest:
self.fail("%s should not result in a skipped test." % func.__name__)
def test_skip_unless_db_feature(self):
"""
Testing the django.test.skipUnlessDBFeature decorator.
"""
# Total hack, but it works, just want an attribute that's always true.
@skipUnlessDBFeature("__class__")
def test_func():
raise ValueError
@skipUnlessDBFeature("notprovided")
def test_func2():
raise ValueError
@skipUnlessDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipUnlessDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
self._assert_skipping(test_func, ValueError)
self._assert_skipping(test_func2, unittest.SkipTest)
self._assert_skipping(test_func3, ValueError)
self._assert_skipping(test_func4, unittest.SkipTest)
class SkipTestCase(SimpleTestCase):
@skipUnlessDBFeature("missing")
def test_foo(self):
pass
self._assert_skipping(
SkipTestCase("test_foo").test_foo,
ValueError,
"skipUnlessDBFeature cannot be used on test_foo (test_utils.tests."
"SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase) "
"as SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase "
"doesn't allow queries against the 'default' database.",
)
def test_skip_if_db_feature(self):
"""
Testing the django.test.skipIfDBFeature decorator.
"""
@skipIfDBFeature("__class__")
def test_func():
raise ValueError
@skipIfDBFeature("notprovided")
def test_func2():
raise ValueError
@skipIfDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipIfDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
@skipIfDBFeature("notprovided", "notprovided")
def test_func5():
raise ValueError
self._assert_skipping(test_func, unittest.SkipTest)
self._assert_skipping(test_func2, ValueError)
self._assert_skipping(test_func3, unittest.SkipTest)
self._assert_skipping(test_func4, unittest.SkipTest)
self._assert_skipping(test_func5, ValueError)
class SkipTestCase(SimpleTestCase):
@skipIfDBFeature("missing")
def test_foo(self):
pass
self._assert_skipping(
SkipTestCase("test_foo").test_foo,
ValueError,
"skipIfDBFeature cannot be used on test_foo (test_utils.tests."
"SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase) "
"as SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase "
"doesn't allow queries against the 'default' database.",
)
class SkippingClassTestCase(TestCase):
def test_skip_class_unless_db_feature(self):
@skipUnlessDBFeature("__class__")
class NotSkippedTests(TestCase):
def test_dummy(self):
return
@skipUnlessDBFeature("missing")
@skipIfDBFeature("__class__")
class SkippedTests(TestCase):
def test_will_be_skipped(self):
self.fail("We should never arrive here.")
@skipIfDBFeature("__dict__")
class SkippedTestsSubclass(SkippedTests):
pass
test_suite = unittest.TestSuite()
test_suite.addTest(NotSkippedTests("test_dummy"))
try:
test_suite.addTest(SkippedTests("test_will_be_skipped"))
test_suite.addTest(SkippedTestsSubclass("test_will_be_skipped"))
except unittest.SkipTest:
self.fail("SkipTest should not be raised here.")
result = unittest.TextTestRunner(stream=StringIO()).run(test_suite)
self.assertEqual(result.testsRun, 3)
self.assertEqual(len(result.skipped), 2)
self.assertEqual(result.skipped[0][1], "Database has feature(s) __class__")
self.assertEqual(result.skipped[1][1], "Database has feature(s) __class__")
def test_missing_default_databases(self):
@skipIfDBFeature("missing")
class MissingDatabases(SimpleTestCase):
def test_assertion_error(self):
pass
suite = unittest.TestSuite()
try:
suite.addTest(MissingDatabases("test_assertion_error"))
except unittest.SkipTest:
self.fail("SkipTest should not be raised at this stage")
runner = unittest.TextTestRunner(stream=StringIO())
msg = (
"skipIfDBFeature cannot be used on <class 'test_utils.tests."
"SkippingClassTestCase.test_missing_default_databases.<locals>."
"MissingDatabases'> as it doesn't allow queries against the "
"'default' database."
)
with self.assertRaisesMessage(ValueError, msg):
runner.run(suite)
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertNumQueriesTests(TestCase):
def test_assert_num_queries(self):
def test_func():
raise ValueError
with self.assertRaises(ValueError):
self.assertNumQueries(2, test_func)
def test_assert_num_queries_with_client(self):
person = Person.objects.create(name="test")
self.assertNumQueries(
1, self.client.get, "/test_utils/get_person/%s/" % person.pk
)
self.assertNumQueries(
1, self.client.get, "/test_utils/get_person/%s/" % person.pk
)
def test_func():
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.assertNumQueries(2, test_func)
@unittest.skipUnless(
connection.vendor != "sqlite" or not connection.is_in_memory_db(),
"For SQLite in-memory tests, closing the connection destroys the database.",
)
class AssertNumQueriesUponConnectionTests(TransactionTestCase):
available_apps = []
def test_ignores_connection_configuration_queries(self):
real_ensure_connection = connection.ensure_connection
connection.close()
def make_configuration_query():
is_opening_connection = connection.connection is None
real_ensure_connection()
if is_opening_connection:
# Avoid infinite recursion. Creating a cursor calls
# ensure_connection() which is currently mocked by this method.
with connection.cursor() as cursor:
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
ensure_connection = (
"django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection"
)
with mock.patch(ensure_connection, side_effect=make_configuration_query):
with self.assertNumQueries(1):
list(Car.objects.all())
class AssertQuerysetEqualTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.p1 = Person.objects.create(name="p1")
cls.p2 = Person.objects.create(name="p2")
def test_empty(self):
self.assertQuerysetEqual(Person.objects.filter(name="p3"), [])
def test_ordered(self):
self.assertQuerysetEqual(
Person.objects.order_by("name"),
[self.p1, self.p2],
)
def test_unordered(self):
self.assertQuerysetEqual(
Person.objects.order_by("name"), [self.p2, self.p1], ordered=False
)
def test_queryset(self):
self.assertQuerysetEqual(
Person.objects.order_by("name"),
Person.objects.order_by("name"),
)
def test_flat_values_list(self):
self.assertQuerysetEqual(
Person.objects.order_by("name").values_list("name", flat=True),
["p1", "p2"],
)
def test_transform(self):
self.assertQuerysetEqual(
Person.objects.order_by("name"),
[self.p1.pk, self.p2.pk],
transform=lambda x: x.pk,
)
def test_repr_transform(self):
self.assertQuerysetEqual(
Person.objects.order_by("name"),
[repr(self.p1), repr(self.p2)],
transform=repr,
)
def test_undefined_order(self):
# Using an unordered queryset with more than one ordered value
# is an error.
msg = (
"Trying to compare non-ordered queryset against more than one "
"ordered value."
)
with self.assertRaisesMessage(ValueError, msg):
self.assertQuerysetEqual(
Person.objects.all(),
[self.p1, self.p2],
)
# No error for one value.
self.assertQuerysetEqual(Person.objects.filter(name="p1"), [self.p1])
def test_repeated_values(self):
"""
assertQuerysetEqual checks the number of appearance of each item
when used with option ordered=False.
"""
batmobile = Car.objects.create(name="Batmobile")
k2000 = Car.objects.create(name="K 2000")
PossessedCar.objects.bulk_create(
[
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
]
)
with self.assertRaises(AssertionError):
self.assertQuerysetEqual(
self.p1.cars.all(), [batmobile, k2000], ordered=False
)
self.assertQuerysetEqual(
self.p1.cars.all(), [batmobile] * 2 + [k2000] * 4, ordered=False
)
def test_maxdiff(self):
names = ["Joe Smith %s" % i for i in range(20)]
Person.objects.bulk_create([Person(name=name) for name in names])
names.append("Extra Person")
with self.assertRaises(AssertionError) as ctx:
self.assertQuerysetEqual(
Person.objects.filter(name__startswith="Joe"),
names,
ordered=False,
transform=lambda p: p.name,
)
self.assertIn("Set self.maxDiff to None to see it.", str(ctx.exception))
original = self.maxDiff
self.maxDiff = None
try:
with self.assertRaises(AssertionError) as ctx:
self.assertQuerysetEqual(
Person.objects.filter(name__startswith="Joe"),
names,
ordered=False,
transform=lambda p: p.name,
)
finally:
self.maxDiff = original
exception_msg = str(ctx.exception)
self.assertNotIn("Set self.maxDiff to None to see it.", exception_msg)
for name in names:
self.assertIn(name, exception_msg)
@override_settings(ROOT_URLCONF="test_utils.urls")
class CaptureQueriesContextManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.person_pk = str(Person.objects.create(name="test").pk)
def test_simple(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
with CaptureQueriesContext(connection) as captured_queries:
pass
self.assertEqual(0, len(captured_queries))
def test_within(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
def test_nested(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.count()
with CaptureQueriesContext(connection) as nested_captured_queries:
Person.objects.count()
self.assertEqual(1, len(nested_captured_queries))
self.assertEqual(2, len(captured_queries))
def test_failure(self):
with self.assertRaises(TypeError):
with CaptureQueriesContext(connection):
raise TypeError
def test_with_client(self):
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 2)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
self.assertIn(self.person_pk, captured_queries[1]["sql"])
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertNumQueriesContextManagerTests(TestCase):
def test_simple(self):
with self.assertNumQueries(0):
pass
with self.assertNumQueries(1):
Person.objects.count()
with self.assertNumQueries(2):
Person.objects.count()
Person.objects.count()
def test_failure(self):
msg = "1 != 2 : 1 queries executed, 2 expected\nCaptured queries were:\n1."
with self.assertRaisesMessage(AssertionError, msg):
with self.assertNumQueries(2):
Person.objects.count()
with self.assertRaises(TypeError):
with self.assertNumQueries(4000):
raise TypeError
def test_with_client(self):
person = Person.objects.create(name="test")
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(2):
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertTemplateUsedContextManagerTests(SimpleTestCase):
def test_usage(self):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/base.html")
with self.assertTemplateUsed(template_name="template_used/base.html"):
render_to_string("template_used/base.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/include.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/extends.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/base.html")
render_to_string("template_used/base.html")
def test_nested_usage(self):
with self.assertTemplateUsed("template_used/base.html"):
with self.assertTemplateUsed("template_used/include.html"):
render_to_string("template_used/include.html")
with self.assertTemplateUsed("template_used/extends.html"):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/extends.html")
with self.assertTemplateUsed("template_used/base.html"):
with self.assertTemplateUsed("template_used/alternative.html"):
render_to_string("template_used/alternative.html")
render_to_string("template_used/base.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/extends.html")
with self.assertTemplateNotUsed("template_used/base.html"):
render_to_string("template_used/alternative.html")
render_to_string("template_used/base.html")
def test_not_used(self):
with self.assertTemplateNotUsed("template_used/base.html"):
pass
with self.assertTemplateNotUsed("template_used/alternative.html"):
pass
def test_error_message(self):
msg = "No templates used to render the response"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed("template_used/base.html"):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(template_name="template_used/base.html"):
pass
msg2 = (
"Template 'template_used/base.html' was not a template used to render "
"the response. Actual template(s) used: template_used/alternative.html"
)
with self.assertRaisesMessage(AssertionError, msg2):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/alternative.html")
with self.assertRaisesMessage(
AssertionError, "No templates used to render the response"
):
response = self.client.get("/test_utils/no_template_used/")
self.assertTemplateUsed(response, "template_used/base.html")
def test_msg_prefix(self):
msg_prefix = "Prefix"
msg = f"{msg_prefix}: No templates used to render the response"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(
"template_used/base.html", msg_prefix=msg_prefix
):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(
template_name="template_used/base.html",
msg_prefix=msg_prefix,
):
pass
msg = (
f"{msg_prefix}: Template 'template_used/base.html' was not a "
f"template used to render the response. Actual template(s) used: "
f"template_used/alternative.html"
)
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(
"template_used/base.html", msg_prefix=msg_prefix
):
render_to_string("template_used/alternative.html")
def test_count(self):
with self.assertTemplateUsed("template_used/base.html", count=2):
render_to_string("template_used/base.html")
render_to_string("template_used/base.html")
msg = (
"Template 'template_used/base.html' was expected to be rendered "
"3 time(s) but was actually rendered 2 time(s)."
)
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed("template_used/base.html", count=3):
render_to_string("template_used/base.html")
render_to_string("template_used/base.html")
def test_failure(self):
msg = "response and/or template_name argument must be provided"
with self.assertRaisesMessage(TypeError, msg):
with self.assertTemplateUsed():
pass
msg = "No templates used to render the response"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(""):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(""):
render_to_string("template_used/base.html")
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(template_name=""):
pass
msg = (
"Template 'template_used/base.html' was not a template used to "
"render the response. Actual template(s) used: "
"template_used/alternative.html"
)
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/alternative.html")
def test_assert_used_on_http_response(self):
response = HttpResponse()
msg = "%s() is only usable on responses fetched using the Django test Client."
with self.assertRaisesMessage(ValueError, msg % "assertTemplateUsed"):
self.assertTemplateUsed(response, "template.html")
with self.assertRaisesMessage(ValueError, msg % "assertTemplateNotUsed"):
self.assertTemplateNotUsed(response, "template.html")
class HTMLEqualTests(SimpleTestCase):
def test_html_parser(self):
element = parse_html("<div><p>Hello</p></div>")
self.assertEqual(len(element.children), 1)
self.assertEqual(element.children[0].name, "p")
self.assertEqual(element.children[0].children[0], "Hello")
parse_html("<p>")
parse_html("<p attr>")
dom = parse_html("<p>foo")
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.name, "p")
self.assertEqual(dom[0], "foo")
def test_parse_html_in_script(self):
parse_html('<script>var a = "<p" + ">";</script>')
parse_html(
"""
<script>
var js_sha_link='<p>***</p>';
</script>
"""
)
# script content will be parsed to text
dom = parse_html(
"""
<script><p>foo</p> '</scr'+'ipt>' <span>bar</span></script>
"""
)
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.children[0], "<p>foo</p> '</scr'+'ipt>' <span>bar</span>")
def test_self_closing_tags(self):
self_closing_tags = [
"area",
"base",
"br",
"col",
"embed",
"hr",
"img",
"input",
"link",
"meta",
"param",
"source",
"track",
"wbr",
# Deprecated tags
"frame",
"spacer",
]
for tag in self_closing_tags:
with self.subTest(tag):
dom = parse_html("<p>Hello <%s> world</p>" % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], "Hello")
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], "world")
dom = parse_html("<p>Hello <%s /> world</p>" % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], "Hello")
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], "world")
def test_simple_equal_html(self):
self.assertHTMLEqual("", "")
self.assertHTMLEqual("<p></p>", "<p></p>")
self.assertHTMLEqual("<p></p>", " <p> </p> ")
self.assertHTMLEqual("<div><p>Hello</p></div>", "<div><p>Hello</p></div>")
self.assertHTMLEqual("<div><p>Hello</p></div>", "<div> <p>Hello</p> </div>")
self.assertHTMLEqual("<div>\n<p>Hello</p></div>", "<div><p>Hello</p></div>\n")
self.assertHTMLEqual(
"<div><p>Hello\nWorld !</p></div>", "<div><p>Hello World\n!</p></div>"
)
self.assertHTMLEqual(
"<div><p>Hello\nWorld !</p></div>", "<div><p>Hello World\n!</p></div>"
)
self.assertHTMLEqual("<p>Hello World !</p>", "<p>Hello World\n\n!</p>")
self.assertHTMLEqual("<p> </p>", "<p></p>")
self.assertHTMLEqual("<p/>", "<p></p>")
self.assertHTMLEqual("<p />", "<p></p>")
self.assertHTMLEqual("<input checked>", '<input checked="checked">')
self.assertHTMLEqual("<p>Hello", "<p> Hello")
self.assertHTMLEqual("<p>Hello</p>World", "<p>Hello</p> World")
def test_ignore_comments(self):
self.assertHTMLEqual(
"<div>Hello<!-- this is a comment --> World!</div>",
"<div>Hello World!</div>",
)
def test_unequal_html(self):
self.assertHTMLNotEqual("<p>Hello</p>", "<p>Hello!</p>")
self.assertHTMLNotEqual("<p>foobar</p>", "<p>foo bar</p>")
self.assertHTMLNotEqual("<p>foo bar</p>", "<p>foo bar</p>")
self.assertHTMLNotEqual("<p>foo nbsp</p>", "<p>foo </p>")
self.assertHTMLNotEqual("<p>foo #20</p>", "<p>foo </p>")
self.assertHTMLNotEqual(
"<p><span>Hello</span><span>World</span></p>",
"<p><span>Hello</span>World</p>",
)
self.assertHTMLNotEqual(
"<p><span>Hello</span>World</p>",
"<p><span>Hello</span><span>World</span></p>",
)
def test_attributes(self):
self.assertHTMLEqual(
'<input type="text" id="id_name" />', '<input id="id_name" type="text" />'
)
self.assertHTMLEqual(
"""<input type='text' id="id_name" />""",
'<input id="id_name" type="text" />',
)
self.assertHTMLNotEqual(
'<input type="text" id="id_name" />',
'<input type="password" id="id_name" />',
)
def test_class_attribute(self):
pairs = [
('<p class="foo bar"></p>', '<p class="bar foo"></p>'),
('<p class=" foo bar "></p>', '<p class="bar foo"></p>'),
('<p class=" foo bar "></p>', '<p class="bar foo"></p>'),
('<p class="foo\tbar"></p>', '<p class="bar foo"></p>'),
('<p class="\tfoo\tbar\t"></p>', '<p class="bar foo"></p>'),
('<p class="\t\t\tfoo\t\t\tbar\t\t\t"></p>', '<p class="bar foo"></p>'),
('<p class="\t \nfoo \t\nbar\n\t "></p>', '<p class="bar foo"></p>'),
]
for html1, html2 in pairs:
with self.subTest(html1):
self.assertHTMLEqual(html1, html2)
def test_boolean_attribute(self):
html1 = "<input checked>"
html2 = '<input checked="">'
html3 = '<input checked="checked">'
self.assertHTMLEqual(html1, html2)
self.assertHTMLEqual(html1, html3)
self.assertHTMLEqual(html2, html3)
self.assertHTMLNotEqual(html1, '<input checked="invalid">')
self.assertEqual(str(parse_html(html1)), "<input checked>")
self.assertEqual(str(parse_html(html2)), "<input checked>")
self.assertEqual(str(parse_html(html3)), "<input checked>")
def test_non_boolean_attibutes(self):
html1 = "<input value>"
html2 = '<input value="">'
html3 = '<input value="value">'
self.assertHTMLEqual(html1, html2)
self.assertHTMLNotEqual(html1, html3)
self.assertEqual(str(parse_html(html1)), '<input value="">')
self.assertEqual(str(parse_html(html2)), '<input value="">')
def test_normalize_refs(self):
pairs = [
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
]
for pair in pairs:
with self.subTest(repr(pair)):
self.assertHTMLEqual(*pair)
def test_complex_examples(self):
self.assertHTMLEqual(
"""<tr><th><label for="id_first_name">First name:</label></th>
<td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><input type="text" id="id_last_name" name="last_name" value="Lennon" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><input type="text" value="1940-10-9" name="birthday" id="id_birthday" /></td></tr>""", # NOQA
"""
<tr><th>
<label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" value="John" id="id_first_name" />
</td></tr>
<tr><th>
<label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
</td></tr>
<tr><th>
<label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
</td></tr>
""",
)
self.assertHTMLEqual(
"""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p>
This is a valid paragraph
<div> this is a div AFTER the p</div>
</body>
</html>""",
"""
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p> This is a valid paragraph
<!-- browsers would close the p tag here -->
<div> this is a div AFTER the p</div>
</p> <!-- this is invalid HTML parsing, but it should make no
difference in most cases -->
</body>
</html>""",
)
def test_html_contain(self):
# equal html contains each other
dom1 = parse_html("<p>foo")
dom2 = parse_html("<p>foo</p>")
self.assertIn(dom1, dom2)
self.assertIn(dom2, dom1)
dom2 = parse_html("<div><p>foo</p></div>")
self.assertIn(dom1, dom2)
self.assertNotIn(dom2, dom1)
self.assertNotIn("<p>foo</p>", dom2)
self.assertIn("foo", dom2)
# when a root element is used ...
dom1 = parse_html("<p>foo</p><p>bar</p>")
dom2 = parse_html("<p>foo</p><p>bar</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<p>foo</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<p>bar</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<div><p>foo</p><p>bar</p></div>")
self.assertIn(dom2, dom1)
def test_count(self):
# equal html contains each other one time
dom1 = parse_html("<p>foo")
dom2 = parse_html("<p>foo</p>")
self.assertEqual(dom1.count(dom2), 1)
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo</p><p>bar</p>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo foo</p><p>foo</p>")
self.assertEqual(dom2.count("foo"), 3)
dom2 = parse_html('<p class="bar">foo</p>')
self.assertEqual(dom2.count("bar"), 0)
self.assertEqual(dom2.count("class"), 0)
self.assertEqual(dom2.count("p"), 0)
self.assertEqual(dom2.count("o"), 2)
dom2 = parse_html("<p>foo</p><p>foo</p>")
self.assertEqual(dom2.count(dom1), 2)
dom2 = parse_html('<div><p>foo<input type=""></p><p>foo</p></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<div><div><p>foo</p></div></div>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo<p>foo</p></p>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo<p>bar</p></p>")
self.assertEqual(dom2.count(dom1), 0)
# HTML with a root element contains the same HTML with no root element.
dom1 = parse_html("<p>foo</p><p>bar</p>")
dom2 = parse_html("<div><p>foo</p><p>bar</p></div>")
self.assertEqual(dom2.count(dom1), 1)
# Target of search is a sequence of child elements and appears more
# than once.
dom2 = parse_html("<div><p>foo</p><p>bar</p><p>foo</p><p>bar</p></div>")
self.assertEqual(dom2.count(dom1), 2)
# Searched HTML has additional children.
dom1 = parse_html("<a/><b/>")
dom2 = parse_html("<a/><b/><c/>")
self.assertEqual(dom2.count(dom1), 1)
# No match found in children.
dom1 = parse_html("<b/><a/>")
self.assertEqual(dom2.count(dom1), 0)
# Target of search found among children and grandchildren.
dom1 = parse_html("<b/><b/>")
dom2 = parse_html("<a><b/><b/></a><b/><b/>")
self.assertEqual(dom2.count(dom1), 2)
def test_root_element_escaped_html(self):
html = "<br>"
parsed = parse_html(html)
self.assertEqual(str(parsed), html)
def test_parsing_errors(self):
with self.assertRaises(AssertionError):
self.assertHTMLEqual("<p>", "")
with self.assertRaises(AssertionError):
self.assertHTMLEqual("", "<p>")
error_msg = (
"First argument is not valid HTML:\n"
"('Unexpected end tag `div` (Line 1, Column 6)', (1, 6))"
)
with self.assertRaisesMessage(AssertionError, error_msg):
self.assertHTMLEqual("< div></ div>", "<div></div>")
with self.assertRaises(HTMLParseError):
parse_html("</p>")
def test_escaped_html_errors(self):
msg = "<p>\n<foo>\n</p> != <p>\n<foo>\n</p>\n"
with self.assertRaisesMessage(AssertionError, msg):
self.assertHTMLEqual("<p><foo></p>", "<p><foo></p>")
with self.assertRaisesMessage(AssertionError, msg):
self.assertHTMLEqual("<p><foo></p>", "<p><foo></p>")
def test_contains_html(self):
response = HttpResponse(
"""<body>
This is a form: <form method="get">
<input type="text" name="Hello" />
</form></body>"""
)
self.assertNotContains(response, "<input name='Hello' type='text'>")
self.assertContains(response, '<form method="get">')
self.assertContains(response, "<input name='Hello' type='text'>", html=True)
self.assertNotContains(response, '<form method="get">', html=True)
invalid_response = HttpResponse("""<body <bad>>""")
with self.assertRaises(AssertionError):
self.assertContains(invalid_response, "<p></p>")
with self.assertRaises(AssertionError):
self.assertContains(response, '<p "whats" that>')
def test_unicode_handling(self):
response = HttpResponse(
'<p class="help">Some help text for the title (with Unicode ŠĐĆŽćžšđ)</p>'
)
self.assertContains(
response,
'<p class="help">Some help text for the title (with Unicode ŠĐĆŽćžšđ)</p>',
html=True,
)
class JSONEqualTests(SimpleTestCase):
def test_simple_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_unordered(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz", "attr1": "foo"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(json1, json2)
def test_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONEqual(valid_json, invalid_json)
def test_simple_not_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
self.assertJSONNotEqual(json1, json2)
def test_simple_not_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(json1, json2)
def test_not_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(valid_json, invalid_json)
class XMLEqualTests(SimpleTestCase):
def test_simple_equal(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_unordered(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raise(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raises_message(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
msg = """{xml1} != {xml2}
- <elem attr1='a' />
+ <elem attr2='b' attr1='a' />
? ++++++++++
""".format(
xml1=repr(xml1), xml2=repr(xml2)
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal(self):
xml1 = "<elem attr1='a' attr2='c' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLNotEqual(xml1, xml2)
def test_simple_not_equal_raise(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml1, xml2)
def test_parsing_errors(self):
xml_unvalid = "<elem attr1='a attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml_unvalid, xml2)
def test_comment_root(self):
xml1 = "<?xml version='1.0'?><!-- comment1 --><elem attr1='a' attr2='b' />"
xml2 = "<?xml version='1.0'?><!-- comment2 --><elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_with_leading_or_trailing_whitespace(self):
xml1 = "<elem>foo</elem> \t\n"
xml2 = " \t\n<elem>foo</elem>"
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal_with_whitespace_in_the_middle(self):
xml1 = "<elem>foo</elem><elem>bar</elem>"
xml2 = "<elem>foo</elem> <elem>bar</elem>"
self.assertXMLNotEqual(xml1, xml2)
def test_doctype_root(self):
xml1 = '<?xml version="1.0"?><!DOCTYPE root SYSTEM "example1.dtd"><root />'
xml2 = '<?xml version="1.0"?><!DOCTYPE root SYSTEM "example2.dtd"><root />'
self.assertXMLEqual(xml1, xml2)
def test_processing_instruction(self):
xml1 = (
'<?xml version="1.0"?>'
'<?xml-model href="http://www.example1.com"?><root />'
)
xml2 = (
'<?xml version="1.0"?>'
'<?xml-model href="http://www.example2.com"?><root />'
)
self.assertXMLEqual(xml1, xml2)
self.assertXMLEqual(
'<?xml-stylesheet href="style1.xslt" type="text/xsl"?><root />',
'<?xml-stylesheet href="style2.xslt" type="text/xsl"?><root />',
)
class SkippingExtraTests(TestCase):
fixtures = ["should_not_be_loaded.json"]
# HACK: This depends on internals of our TestCase subclasses
def __call__(self, result=None):
# Detect fixture loading by counting SQL queries, should be zero
with self.assertNumQueries(0):
super().__call__(result)
@unittest.skip("Fixture loading should not be performed for skipped tests.")
def test_fixtures_are_skipped(self):
pass
class AssertRaisesMsgTest(SimpleTestCase):
def test_assert_raises_message(self):
msg = "'Expected message' not found in 'Unexpected message'"
# context manager form of assertRaisesMessage()
with self.assertRaisesMessage(AssertionError, msg):
with self.assertRaisesMessage(ValueError, "Expected message"):
raise ValueError("Unexpected message")
# callable form
def func():
raise ValueError("Unexpected message")
with self.assertRaisesMessage(AssertionError, msg):
self.assertRaisesMessage(ValueError, "Expected message", func)
def test_special_re_chars(self):
"""assertRaisesMessage shouldn't interpret RE special chars."""
def func1():
raise ValueError("[.*x+]y?")
with self.assertRaisesMessage(ValueError, "[.*x+]y?"):
func1()
class AssertWarnsMessageTests(SimpleTestCase):
def test_context_manager(self):
with self.assertWarnsMessage(UserWarning, "Expected message"):
warnings.warn("Expected message", UserWarning)
def test_context_manager_failure(self):
msg = "Expected message' not found in 'Unexpected message'"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertWarnsMessage(UserWarning, "Expected message"):
warnings.warn("Unexpected message", UserWarning)
def test_callable(self):
def func():
warnings.warn("Expected message", UserWarning)
self.assertWarnsMessage(UserWarning, "Expected message", func)
def test_special_re_chars(self):
def func1():
warnings.warn("[.*x+]y?", UserWarning)
with self.assertWarnsMessage(UserWarning, "[.*x+]y?"):
func1()
# TODO: Remove when dropping support for PY39.
class AssertNoLogsTest(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
logging.config.dictConfig(DEFAULT_LOGGING)
cls.addClassCleanup(logging.config.dictConfig, settings.LOGGING)
def setUp(self):
self.logger = logging.getLogger("django")
@override_settings(DEBUG=True)
def test_fails_when_log_emitted(self):
msg = "Unexpected logs found: ['INFO:django:FAIL!']"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertNoLogs("django", "INFO"):
self.logger.info("FAIL!")
@override_settings(DEBUG=True)
def test_text_level(self):
with self.assertNoLogs("django", "INFO"):
self.logger.debug("DEBUG logs are ignored.")
@override_settings(DEBUG=True)
def test_int_level(self):
with self.assertNoLogs("django", logging.INFO):
self.logger.debug("DEBUG logs are ignored.")
@override_settings(DEBUG=True)
def test_default_level(self):
with self.assertNoLogs("django"):
self.logger.debug("DEBUG logs are ignored.")
@override_settings(DEBUG=True)
def test_does_not_hide_other_failures(self):
msg = "1 != 2"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertNoLogs("django"):
self.assertEqual(1, 2)
class AssertFieldOutputTests(SimpleTestCase):
def test_assert_field_output(self):
error_invalid = ["Enter a valid email address."]
self.assertFieldOutput(
EmailField, {"[email protected]": "[email protected]"}, {"aaa": error_invalid}
)
with self.assertRaises(AssertionError):
self.assertFieldOutput(
EmailField,
{"[email protected]": "[email protected]"},
{"aaa": error_invalid + ["Another error"]},
)
with self.assertRaises(AssertionError):
self.assertFieldOutput(
EmailField, {"[email protected]": "Wrong output"}, {"aaa": error_invalid}
)
with self.assertRaises(AssertionError):
self.assertFieldOutput(
EmailField,
{"[email protected]": "[email protected]"},
{"aaa": ["Come on, gimme some well formatted data, dude."]},
)
def test_custom_required_message(self):
class MyCustomField(IntegerField):
default_error_messages = {
"required": "This is really required.",
}
self.assertFieldOutput(MyCustomField, {}, {}, empty_value=None)
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertURLEqualTests(SimpleTestCase):
def test_equal(self):
valid_tests = (
("http://example.com/?", "http://example.com/"),
("http://example.com/?x=1&", "http://example.com/?x=1"),
("http://example.com/?x=1&y=2", "http://example.com/?y=2&x=1"),
("http://example.com/?x=1&y=2", "http://example.com/?y=2&x=1"),
(
"http://example.com/?x=1&y=2&a=1&a=2",
"http://example.com/?a=1&a=2&y=2&x=1",
),
("/path/to/?x=1&y=2&z=3", "/path/to/?z=3&y=2&x=1"),
("?x=1&y=2&z=3", "?z=3&y=2&x=1"),
("/test_utils/no_template_used/", reverse_lazy("no_template_used")),
)
for url1, url2 in valid_tests:
with self.subTest(url=url1):
self.assertURLEqual(url1, url2)
def test_not_equal(self):
invalid_tests = (
# Protocol must be the same.
("http://example.com/", "https://example.com/"),
("http://example.com/?x=1&x=2", "https://example.com/?x=2&x=1"),
("http://example.com/?x=1&y=bar&x=2", "https://example.com/?y=bar&x=2&x=1"),
# Parameters of the same name must be in the same order.
("/path/to?a=1&a=2", "/path/to/?a=2&a=1"),
)
for url1, url2 in invalid_tests:
with self.subTest(url=url1), self.assertRaises(AssertionError):
self.assertURLEqual(url1, url2)
def test_message(self):
msg = (
"Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual(
"http://example.com/?x=1&x=2", "https://example.com/?x=2&x=1"
)
def test_msg_prefix(self):
msg = (
"Prefix: Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual(
"http://example.com/?x=1&x=2",
"https://example.com/?x=2&x=1",
msg_prefix="Prefix: ",
)
class TestForm(Form):
field = CharField()
def clean_field(self):
value = self.cleaned_data.get("field", "")
if value == "invalid":
raise ValidationError("invalid value")
return value
def clean(self):
if self.cleaned_data.get("field") == "invalid_non_field":
raise ValidationError("non-field error")
return self.cleaned_data
@classmethod
def _get_cleaned_form(cls, field_value):
form = cls({"field": field_value})
form.full_clean()
return form
@classmethod
def valid(cls):
return cls._get_cleaned_form("valid")
@classmethod
def invalid(cls, nonfield=False):
return cls._get_cleaned_form("invalid_non_field" if nonfield else "invalid")
class TestFormset(formset_factory(TestForm)):
@classmethod
def _get_cleaned_formset(cls, field_value):
formset = cls(
{
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-0-field": field_value,
}
)
formset.full_clean()
return formset
@classmethod
def valid(cls):
return cls._get_cleaned_formset("valid")
@classmethod
def invalid(cls, nonfield=False, nonform=False):
if nonform:
formset = cls({}, error_messages={"missing_management_form": "error"})
formset.full_clean()
return formset
return cls._get_cleaned_formset("invalid_non_field" if nonfield else "invalid")
class AssertFormErrorTests(SimpleTestCase):
def test_non_client_response(self):
msg = (
"assertFormError() is only usable on responses fetched using the "
"Django test Client."
)
response = HttpResponse()
with self.assertRaisesMessage(ValueError, msg):
self.assertFormError(response, "formset", 0, "field", "invalid value")
def test_response_with_no_context(self):
msg = "Response did not use any contexts to render the response"
response = mock.Mock(context=[])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(response, "form", "field", "invalid value")
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
response,
"form",
"field",
"invalid value",
msg_prefix=msg_prefix,
)
def test_form_not_in_context(self):
msg = "The form 'form' was not used to render the response"
response = mock.Mock(context=[{}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(response, "form", "field", "invalid value")
def test_field_not_in_form(self):
msg = (
"The form <TestForm bound=True, valid=False, fields=(field)> does not "
"contain the field 'other_field'."
)
response = mock.Mock(context=[{"form": TestForm.invalid()}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(response, "form", "other_field", "invalid value")
def test_field_with_no_errors(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=True, "
"fields=(field)> don't match."
)
response = mock.Mock(context=[{"form": TestForm.valid()}])
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormError(response, "form", "field", "invalid value")
self.assertIn("[] != ['invalid value']", str(ctx.exception))
def test_field_with_different_error(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=False, "
"fields=(field)> don't match."
)
response = mock.Mock(context=[{"form": TestForm.invalid()}])
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormError(response, "form", "field", "other error")
self.assertIn("['invalid value'] != ['other error']", str(ctx.exception))
def test_basic_positive_assertion(self):
response = mock.Mock(context=[{"form": TestForm.invalid()}])
self.assertFormError(response, "form", "field", "invalid value")
def test_basic_positive_assertion_multicontext(self):
response = mock.Mock(context=[{}, {"form": TestForm.invalid()}])
self.assertFormError(response, "form", "field", "invalid value")
def test_empty_errors_unbound_form(self):
msg = (
"The form <TestForm bound=False, valid=Unknown, fields=(field)> is not "
"bound, it will never have any errors."
)
response = mock.Mock(context=[{"form": TestForm()}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(response, "form", "field", [])
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(response, "form", "field", [], msg_prefix=msg_prefix)
def test_empty_errors_valid_form(self):
response = mock.Mock(context=[{"form": TestForm.valid()}])
self.assertFormError(response, "form", "field", [])
def test_empty_errors_invalid_form(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=False, "
"fields=(field)> don't match."
)
response = mock.Mock(context=[{"form": TestForm.invalid()}])
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormError(response, "form", "field", [])
self.assertIn("['invalid value'] != []", str(ctx.exception))
def test_non_field_errors(self):
response = mock.Mock(context=[{"form": TestForm.invalid(nonfield=True)}])
self.assertFormError(response, "form", None, "non-field error")
@ignore_warnings(category=RemovedInDjango50Warning)
def test_errors_none(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=False, "
"fields=(field)> don't match."
)
response = mock.Mock(context=[{"form": TestForm.invalid()}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(response, "form", "field", None)
def test_errors_none_warning(self):
response = mock.Mock(context=[{"form": TestForm.valid()}])
msg = (
"Passing errors=None to assertFormError() is deprecated, use "
"errors=[] instead."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
self.assertFormError(response, "form", "field", None)
class AssertFormsetErrorTests(SimpleTestCase):
def _get_formset_data(self, field_value):
return {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-0-field": field_value,
}
def test_non_client_response(self):
msg = (
"assertFormsetError() is only usable on responses fetched using "
"the Django test Client."
)
response = HttpResponse()
with self.assertRaisesMessage(ValueError, msg):
self.assertFormsetError(response, "formset", 0, "field", "invalid value")
def test_response_with_no_context(self):
msg = "Response did not use any contexts to render the response"
response = mock.Mock(context=[])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(response, "formset", 0, "field", "invalid value")
def test_formset_not_in_context(self):
msg = "The formset 'formset' was not used to render the response"
response = mock.Mock(context=[{}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(response, "formset", 0, "field", "invalid value")
def test_field_not_in_form(self):
msg = (
"The form 0 of formset <TestFormset: bound=True valid=False total_forms=1> "
"does not contain the field 'other_field'."
)
response = mock.Mock(context=[{"formset": TestFormset.invalid()}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(
response,
"formset",
0,
"other_field",
"invalid value",
)
def test_field_with_no_errors(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=True total_forms=1> don't match."
)
response = mock.Mock(context=[{"formset": TestFormset.valid()}])
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormsetError(response, "formset", 0, "field", "invalid value")
self.assertIn("[] != ['invalid value']", str(ctx.exception))
def test_field_with_different_error(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
response = mock.Mock(context=[{"formset": TestFormset.invalid()}])
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormsetError(response, "formset", 0, "field", "other error")
self.assertIn("['invalid value'] != ['other error']", str(ctx.exception))
def test_basic_positive_assertion(self):
response = mock.Mock(context=[{"formset": TestFormset.invalid()}])
self.assertFormsetError(response, "formset", 0, "field", "invalid value")
def test_basic_positive_assertion_multicontext(self):
response = mock.Mock(context=[{}, {"formset": TestFormset.invalid()}])
self.assertFormsetError(response, "formset", 0, "field", "invalid value")
def test_empty_errors_unbound_formset(self):
msg = (
"The formset <TestFormset: bound=False valid=Unknown total_forms=1> is not "
"bound, it will never have any errors."
)
response = mock.Mock(context=[{"formset": TestFormset()}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(response, "formset", 0, "field", [])
def test_empty_errors_valid_formset(self):
response = mock.Mock(context=[{}, {"formset": TestFormset.valid()}])
self.assertFormsetError(response, "formset", 0, "field", [])
def test_empty_errors_invalid_formset(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
response = mock.Mock(context=[{}, {"formset": TestFormset.invalid()}])
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormsetError(response, "formset", 0, "field", [])
self.assertIn("['invalid value'] != []", str(ctx.exception))
def test_non_field_errors(self):
response = mock.Mock(
context=[
{},
{"formset": TestFormset.invalid(nonfield=True)},
]
)
self.assertFormsetError(response, "formset", 0, None, "non-field error")
def test_non_form_errors(self):
response = mock.Mock(
context=[
{},
{"formset": TestFormset.invalid(nonform=True)},
]
)
self.assertFormsetError(response, "formset", None, None, "error")
def test_non_form_errors_with_field(self):
response = mock.Mock(
context=[
{},
{"formset": TestFormset.invalid(nonform=True)},
]
)
msg = "You must use field=None with form_index=None."
with self.assertRaisesMessage(ValueError, msg):
self.assertFormsetError(response, "formset", None, "field", "error")
def test_form_index_too_big(self):
msg = (
"The formset <TestFormset: bound=True valid=False total_forms=1> only has "
"1 form."
)
response = mock.Mock(context=[{}, {"formset": TestFormset.invalid()}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(response, "formset", 2, "field", "error")
def test_form_index_too_big_plural(self):
formset = TestFormset(
{
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-0-field": "valid",
"form-1-field": "valid",
}
)
formset.full_clean()
msg = (
"The formset <TestFormset: bound=True valid=True total_forms=2> only has 2 "
"forms."
)
response = mock.Mock(context=[{}, {"formset": formset}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(response, "formset", 2, "field", "error")
def test_formset_named_form(self):
formset = TestFormset.invalid()
# The mocked context emulates the template-based rendering of the
# formset.
response = mock.Mock(
context=[
{"form": formset},
{"form": formset.management_form},
]
)
self.assertFormsetError(response, "form", 0, "field", "invalid value")
@ignore_warnings(category=RemovedInDjango50Warning)
def test_errors_none(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
response = mock.Mock(context=[{"formset": TestFormset.invalid()}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(response, "formset", 0, "field", None)
def test_errors_none_warning(self):
response = mock.Mock(context=[{"formset": TestFormset.valid()}])
msg = (
"Passing errors=None to assertFormsetError() is deprecated, use "
"errors=[] instead."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
self.assertFormsetError(response, "formset", 0, "field", None)
class FirstUrls:
urlpatterns = [path("first/", empty_response, name="first")]
class SecondUrls:
urlpatterns = [path("second/", empty_response, name="second")]
class SetupTestEnvironmentTests(SimpleTestCase):
def test_setup_test_environment_calling_more_than_once(self):
with self.assertRaisesMessage(
RuntimeError, "setup_test_environment() was already called"
):
setup_test_environment()
def test_allowed_hosts(self):
for type_ in (list, tuple):
with self.subTest(type_=type_):
allowed_hosts = type_("*")
with mock.patch("django.test.utils._TestState") as x:
del x.saved_data
with self.settings(ALLOWED_HOSTS=allowed_hosts):
setup_test_environment()
self.assertEqual(settings.ALLOWED_HOSTS, ["*", "testserver"])
class OverrideSettingsTests(SimpleTestCase):
# #21518 -- If neither override_settings nor a setting_changed receiver
# clears the URL cache between tests, then one of test_first or
# test_second will fail.
@override_settings(ROOT_URLCONF=FirstUrls)
def test_urlconf_first(self):
reverse("first")
@override_settings(ROOT_URLCONF=SecondUrls)
def test_urlconf_second(self):
reverse("second")
def test_urlconf_cache(self):
with self.assertRaises(NoReverseMatch):
reverse("first")
with self.assertRaises(NoReverseMatch):
reverse("second")
with override_settings(ROOT_URLCONF=FirstUrls):
self.client.get(reverse("first"))
with self.assertRaises(NoReverseMatch):
reverse("second")
with override_settings(ROOT_URLCONF=SecondUrls):
with self.assertRaises(NoReverseMatch):
reverse("first")
self.client.get(reverse("second"))
self.client.get(reverse("first"))
with self.assertRaises(NoReverseMatch):
reverse("second")
with self.assertRaises(NoReverseMatch):
reverse("first")
with self.assertRaises(NoReverseMatch):
reverse("second")
def test_override_media_root(self):
"""
Overriding the MEDIA_ROOT setting should be reflected in the
base_location attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, "")
with self.settings(MEDIA_ROOT="test_value"):
self.assertEqual(default_storage.base_location, "test_value")
def test_override_media_url(self):
"""
Overriding the MEDIA_URL setting should be reflected in the
base_url attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, "")
with self.settings(MEDIA_URL="/test_value/"):
self.assertEqual(default_storage.base_url, "/test_value/")
def test_override_file_upload_permissions(self):
"""
Overriding the FILE_UPLOAD_PERMISSIONS setting should be reflected in
the file_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.file_permissions_mode, 0o644)
with self.settings(FILE_UPLOAD_PERMISSIONS=0o777):
self.assertEqual(default_storage.file_permissions_mode, 0o777)
def test_override_file_upload_directory_permissions(self):
"""
Overriding the FILE_UPLOAD_DIRECTORY_PERMISSIONS setting should be
reflected in the directory_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertIsNone(default_storage.directory_permissions_mode)
with self.settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777):
self.assertEqual(default_storage.directory_permissions_mode, 0o777)
def test_override_database_routers(self):
"""
Overriding DATABASE_ROUTERS should update the base router.
"""
test_routers = [object()]
with self.settings(DATABASE_ROUTERS=test_routers):
self.assertEqual(router.routers, test_routers)
def test_override_static_url(self):
"""
Overriding the STATIC_URL setting should be reflected in the
base_url attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_URL="/test/"):
self.assertEqual(staticfiles_storage.base_url, "/test/")
def test_override_static_root(self):
"""
Overriding the STATIC_ROOT setting should be reflected in the
location attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_ROOT="/tmp/test"):
self.assertEqual(staticfiles_storage.location, os.path.abspath("/tmp/test"))
def test_override_staticfiles_storage(self):
"""
Overriding the STATICFILES_STORAGE setting should be reflected in
the value of django.contrib.staticfiles.storage.staticfiles_storage.
"""
new_class = "ManifestStaticFilesStorage"
new_storage = "django.contrib.staticfiles.storage." + new_class
with self.settings(STATICFILES_STORAGE=new_storage):
self.assertEqual(staticfiles_storage.__class__.__name__, new_class)
def test_override_staticfiles_finders(self):
"""
Overriding the STATICFILES_FINDERS setting should be reflected in
the return value of django.contrib.staticfiles.finders.get_finders.
"""
current = get_finders()
self.assertGreater(len(list(current)), 1)
finders = ["django.contrib.staticfiles.finders.FileSystemFinder"]
with self.settings(STATICFILES_FINDERS=finders):
self.assertEqual(len(list(get_finders())), len(finders))
def test_override_staticfiles_dirs(self):
"""
Overriding the STATICFILES_DIRS setting should be reflected in
the locations attribute of the
django.contrib.staticfiles.finders.FileSystemFinder instance.
"""
finder = get_finder("django.contrib.staticfiles.finders.FileSystemFinder")
test_path = "/tmp/test"
expected_location = ("", test_path)
self.assertNotIn(expected_location, finder.locations)
with self.settings(STATICFILES_DIRS=[test_path]):
finder = get_finder("django.contrib.staticfiles.finders.FileSystemFinder")
self.assertIn(expected_location, finder.locations)
class TestBadSetUpTestData(TestCase):
"""
An exception in setUpTestData() shouldn't leak a transaction which would
cascade across the rest of the test suite.
"""
class MyException(Exception):
pass
@classmethod
def setUpClass(cls):
try:
super().setUpClass()
except cls.MyException:
cls._in_atomic_block = connection.in_atomic_block
@classmethod
def tearDownClass(Cls):
# override to avoid a second cls._rollback_atomics() which would fail.
# Normal setUpClass() methods won't have exception handling so this
# method wouldn't typically be run.
pass
@classmethod
def setUpTestData(cls):
# Simulate a broken setUpTestData() method.
raise cls.MyException()
def test_failure_in_setUpTestData_should_rollback_transaction(self):
# setUpTestData() should call _rollback_atomics() so that the
# transaction doesn't leak.
self.assertFalse(self._in_atomic_block)
class CaptureOnCommitCallbacksTests(TestCase):
databases = {"default", "other"}
callback_called = False
def enqueue_callback(self, using="default"):
def hook():
self.callback_called = True
transaction.on_commit(hook, using=using)
def test_no_arguments(self):
with self.captureOnCommitCallbacks() as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, False)
callbacks[0]()
self.assertIs(self.callback_called, True)
def test_using(self):
with self.captureOnCommitCallbacks(using="other") as callbacks:
self.enqueue_callback(using="other")
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, False)
callbacks[0]()
self.assertIs(self.callback_called, True)
def test_different_using(self):
with self.captureOnCommitCallbacks(using="default") as callbacks:
self.enqueue_callback(using="other")
self.assertEqual(callbacks, [])
def test_execute(self):
with self.captureOnCommitCallbacks(execute=True) as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, True)
def test_pre_callback(self):
def pre_hook():
pass
transaction.on_commit(pre_hook, using="default")
with self.captureOnCommitCallbacks() as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertNotEqual(callbacks[0], pre_hook)
def test_with_rolled_back_savepoint(self):
with self.captureOnCommitCallbacks() as callbacks:
try:
with transaction.atomic():
self.enqueue_callback()
raise IntegrityError
except IntegrityError:
# Inner transaction.atomic() has been rolled back.
pass
self.assertEqual(callbacks, [])
def test_execute_recursive(self):
with self.captureOnCommitCallbacks(execute=True) as callbacks:
transaction.on_commit(self.enqueue_callback)
self.assertEqual(len(callbacks), 2)
self.assertIs(self.callback_called, True)
def test_execute_tree(self):
"""
A visualisation of the callback tree tested. Each node is expected to
be visited only once:
└─branch_1
├─branch_2
│ ├─leaf_1
│ └─leaf_2
└─leaf_3
"""
branch_1_call_counter = 0
branch_2_call_counter = 0
leaf_1_call_counter = 0
leaf_2_call_counter = 0
leaf_3_call_counter = 0
def leaf_1():
nonlocal leaf_1_call_counter
leaf_1_call_counter += 1
def leaf_2():
nonlocal leaf_2_call_counter
leaf_2_call_counter += 1
def leaf_3():
nonlocal leaf_3_call_counter
leaf_3_call_counter += 1
def branch_1():
nonlocal branch_1_call_counter
branch_1_call_counter += 1
transaction.on_commit(branch_2)
transaction.on_commit(leaf_3)
def branch_2():
nonlocal branch_2_call_counter
branch_2_call_counter += 1
transaction.on_commit(leaf_1)
transaction.on_commit(leaf_2)
with self.captureOnCommitCallbacks(execute=True) as callbacks:
transaction.on_commit(branch_1)
self.assertEqual(branch_1_call_counter, 1)
self.assertEqual(branch_2_call_counter, 1)
self.assertEqual(leaf_1_call_counter, 1)
self.assertEqual(leaf_2_call_counter, 1)
self.assertEqual(leaf_3_call_counter, 1)
self.assertEqual(callbacks, [branch_1, branch_2, leaf_3, leaf_1, leaf_2])
class DisallowedDatabaseQueriesTests(SimpleTestCase):
def test_disallowed_database_connections(self):
expected_message = (
"Database connections to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
connection.connect()
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
connection.temporary_connection()
def test_disallowed_database_queries(self):
expected_message = (
"Database queries to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
Car.objects.first()
def test_disallowed_database_chunked_cursor_queries(self):
expected_message = (
"Database queries to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
next(Car.objects.iterator())
class AllowedDatabaseQueriesTests(SimpleTestCase):
databases = {"default"}
def test_allowed_database_queries(self):
Car.objects.first()
def test_allowed_database_chunked_cursor_queries(self):
next(Car.objects.iterator(), None)
class DatabaseAliasTests(SimpleTestCase):
def setUp(self):
self.addCleanup(setattr, self.__class__, "databases", self.databases)
def test_no_close_match(self):
self.__class__.databases = {"void"}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'void' which is "
"not defined in settings.DATABASES."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_close_match(self):
self.__class__.databases = {"defualt"}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'defualt' which "
"is not defined in settings.DATABASES. Did you mean 'default'?"
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_match(self):
self.__class__.databases = {"default", "other"}
self.assertEqual(self._validate_databases(), frozenset({"default", "other"}))
def test_all(self):
self.__class__.databases = "__all__"
self.assertEqual(self._validate_databases(), frozenset(connections))
@isolate_apps("test_utils", attr_name="class_apps")
class IsolatedAppsTests(SimpleTestCase):
def test_installed_apps(self):
self.assertEqual(
[app_config.label for app_config in self.class_apps.get_app_configs()],
["test_utils"],
)
def test_class_decoration(self):
class ClassDecoration(models.Model):
pass
self.assertEqual(ClassDecoration._meta.apps, self.class_apps)
@isolate_apps("test_utils", kwarg_name="method_apps")
def test_method_decoration(self, method_apps):
class MethodDecoration(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
def test_context_manager(self):
with isolate_apps("test_utils") as context_apps:
class ContextManager(models.Model):
pass
self.assertEqual(ContextManager._meta.apps, context_apps)
@isolate_apps("test_utils", kwarg_name="method_apps")
def test_nested(self, method_apps):
class MethodDecoration(models.Model):
pass
with isolate_apps("test_utils") as context_apps:
class ContextManager(models.Model):
pass
with isolate_apps("test_utils") as nested_context_apps:
class NestedContextManager(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
self.assertEqual(ContextManager._meta.apps, context_apps)
self.assertEqual(NestedContextManager._meta.apps, nested_context_apps)
class DoNothingDecorator(TestContextDecorator):
def enable(self):
pass
def disable(self):
pass
class TestContextDecoratorTests(SimpleTestCase):
@mock.patch.object(DoNothingDecorator, "disable")
def test_exception_in_setup(self, mock_disable):
"""An exception is setUp() is reraised after disable() is called."""
class ExceptionInSetUp(unittest.TestCase):
def setUp(self):
raise NotImplementedError("reraised")
decorator = DoNothingDecorator()
decorated_test_class = decorator.__call__(ExceptionInSetUp)()
self.assertFalse(mock_disable.called)
with self.assertRaisesMessage(NotImplementedError, "reraised"):
decorated_test_class.setUp()
decorated_test_class.doCleanups()
self.assertTrue(mock_disable.called)
def test_cleanups_run_after_tearDown(self):
calls = []
class SaveCallsDecorator(TestContextDecorator):
def enable(self):
calls.append("enable")
def disable(self):
calls.append("disable")
class AddCleanupInSetUp(unittest.TestCase):
def setUp(self):
calls.append("setUp")
self.addCleanup(lambda: calls.append("cleanup"))
decorator = SaveCallsDecorator()
decorated_test_class = decorator.__call__(AddCleanupInSetUp)()
decorated_test_class.setUp()
decorated_test_class.tearDown()
decorated_test_class.doCleanups()
self.assertEqual(calls, ["enable", "setUp", "cleanup", "disable"])
|
09136585b9bd76d06cb11a9843033050addc38c036a521513660676794e2af44 | import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from unittest import mock
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Aggregate,
Avg,
Case,
Count,
DecimalField,
F,
IntegerField,
Max,
Q,
StdDev,
Sum,
Value,
Variance,
When,
)
from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from .models import (
Alfa,
Author,
Book,
Bravo,
Charlie,
Clues,
Entries,
HardbackBook,
ItemTag,
Publisher,
SelfRefFK,
Store,
WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="Brad Dayley", age=45)
cls.a4 = Author.objects.create(name="James Bennett", age=29)
cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37)
cls.a6 = Author.objects.create(name="Paul Bissex", age=29)
cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25)
cls.a8 = Author.objects.create(name="Peter Norvig", age=57)
cls.a9 = Author.objects.create(name="Stuart Russell", age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name="Apress", num_awards=3)
cls.p2 = Publisher.objects.create(name="Sams", num_awards=1)
cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7)
cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a3,
publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a4,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.b4 = Book.objects.create(
isbn="013235613",
name="Python Web Development with Django",
pages=350,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a5,
publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3),
)
cls.b5 = HardbackBook.objects.create(
isbn="013790395",
name="Artificial Intelligence: A Modern Approach",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a8,
publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15),
weight=4.5,
)
cls.b6 = HardbackBook.objects.create(
isbn="155860191",
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a8,
publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15),
weight=3.7,
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
s2 = Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30),
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in kwargs.items():
self.assertEqual(getattr(obj, attr), value)
def test_annotation_with_value(self):
values = (
Book.objects.filter(
name="Practical Django Projects",
)
.annotate(
discount_price=F("price") * 2,
)
.values(
"discount_price",
)
.annotate(sum_discount=Sum("discount_price"))
)
self.assertSequenceEqual(
values,
[{"discount_price": Decimal("59.38"), "sum_discount": Decimal("59.38")}],
)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
The subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values("contact").annotate(Max("id"))
qs = qs.order_by("contact").values_list("id__max", flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by("id")
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values("contact").annotate(Max("id"))
qs = qs.order_by("contact").values_list("id__max", flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by("id")
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(
select={
"name_of_shortest_book": shortest_book_sql,
}
).annotate(total_books=Count("book"))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)},
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum("pages"), Avg("pages")),
{"pages__sum": 3703, "pages__avg": Approximate(617.166, places=2)},
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum("pages"), Avg("pages")),
{"pages__sum": 3703, "pages__avg": Approximate(617.166, places=2)},
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={"price_per_page": "price / pages"}).aggregate(
Sum("pages")
),
{"pages__sum": 3703},
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.get(pk=self.b2.pk)
)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn="067232959",
mean_auth_age=45.0,
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0,
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545")))
# Order of the annotate/extra in the query doesn't matter
obj = (
Book.objects.extra(select={"manufacture_cost": "price * .5"})
.annotate(mean_auth_age=Avg("authors__age"))
.get(pk=self.b2.pk)
)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn="067232959",
mean_auth_age=45.0,
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0,
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545")))
# Values queries can be combined with annotate and extra
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.values()
.get(pk=self.b2.pk)
)
manufacture_cost = obj["manufacture_cost"]
self.assertIn(manufacture_cost, (11.545, Decimal("11.545")))
del obj["manufacture_cost"]
self.assertEqual(
obj,
{
"id": self.b2.id,
"contact_id": self.a3.id,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": self.p2.id,
"rating": 3.0,
},
)
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = (
Book.objects.values()
.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.get(pk=self.b2.pk)
)
manufacture_cost = obj["manufacture_cost"]
self.assertIn(manufacture_cost, (11.545, Decimal("11.545")))
del obj["manufacture_cost"]
self.assertEqual(
obj,
{
"id": self.b2.id,
"contact_id": self.a3.id,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": self.p2.id,
"rating": 3.0,
},
)
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.values("name")
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.values("name", "mean_auth_age")
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"mean_auth_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = (
Book.objects.annotate(n_authors=Count("authors"))
.values("name")
.filter(n_authors__gt=2)
)
self.assertSequenceEqual(
qs,
[{"name": "Python Web Development with Django"}],
)
# The annotations are added to values output if values() precedes
# annotate()
obj = (
Book.objects.values("name")
.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"mean_auth_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
# All of the objects are getting counted (allow_nulls) and that values
# respects the amount of objects
self.assertEqual(len(Author.objects.annotate(Avg("friends__age")).values()), 9)
# Consecutive calls to annotate accumulate in the query
qs = (
Book.objects.values("price")
.annotate(oldest=Max("authors__age"))
.order_by("oldest", "price")
.annotate(Max("publisher__num_awards"))
)
self.assertSequenceEqual(
qs,
[
{"price": Decimal("30"), "oldest": 35, "publisher__num_awards__max": 3},
{
"price": Decimal("29.69"),
"oldest": 37,
"publisher__num_awards__max": 7,
},
{
"price": Decimal("23.09"),
"oldest": 45,
"publisher__num_awards__max": 1,
},
{"price": Decimal("75"), "oldest": 57, "publisher__num_awards__max": 9},
{
"price": Decimal("82.8"),
"oldest": 57,
"publisher__num_awards__max": 7,
},
],
)
def test_aggregate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Max("pages"), Max("price"), Sum("num_authors"), Avg("num_authors")
)
self.assertEqual(
vals,
{
"num_authors__sum": 10,
"num_authors__avg": Approximate(1.666, places=2),
"pages__max": 1132,
"price__max": Decimal("82.80"),
},
)
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count("authors")).values("c").aggregate(Max("c")),
{"c__max": 3},
)
def test_conditional_aggregate(self):
# Conditional aggregation of a grouped queryset.
self.assertEqual(
Book.objects.annotate(c=Count("authors"))
.values("pk")
.aggregate(test=Sum(Case(When(c__gt=1, then=1))))["test"],
3,
)
def test_sliced_conditional_aggregate(self):
self.assertEqual(
Author.objects.all()[:5].aggregate(
test=Sum(Case(When(age__lte=35, then=1)))
)["test"],
3,
)
def test_annotated_conditional_aggregate(self):
annotated_qs = Book.objects.annotate(
discount_price=F("price") * Decimal("0.75")
)
self.assertAlmostEqual(
annotated_qs.aggregate(
test=Avg(
Case(
When(pages__lt=400, then="discount_price"),
output_field=DecimalField(),
)
)
)["test"],
Decimal("22.27"),
places=2,
)
def test_distinct_conditional_aggregate(self):
self.assertEqual(
Book.objects.distinct().aggregate(
test=Avg(
Case(
When(price=Decimal("29.69"), then="pages"),
output_field=IntegerField(),
)
)
)["test"],
325,
)
def test_conditional_aggregate_on_complex_condition(self):
self.assertEqual(
Book.objects.distinct().aggregate(
test=Avg(
Case(
When(
Q(price__gte=Decimal("29")) & Q(price__lt=Decimal("30")),
then="pages",
),
output_field=IntegerField(),
)
)
)["test"],
325,
)
def test_decimal_aggregate_annotation_filter(self):
"""
Filtering on an aggregate annotation with Decimal values should work.
Requires special handling on SQLite (#18247).
"""
self.assertEqual(
len(
Author.objects.annotate(sum=Sum("book_contact_set__price")).filter(
sum__gt=Decimal(40)
)
),
1,
)
self.assertEqual(
len(
Author.objects.annotate(sum=Sum("book_contact_set__price")).filter(
sum__lte=Decimal(40)
)
),
4,
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
msg = (
"Cannot resolve keyword 'foo' into field. Choices are: authors, "
"contact, contact_id, hardbackbook, id, isbn, name, pages, price, "
"pubdate, publisher, publisher_id, rating, store, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Book.objects.aggregate(num_authors=Count("foo"))
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(num_authors=Count("foo"))
msg = (
"Cannot resolve keyword 'foo' into field. Choices are: authors, "
"contact, contact_id, hardbackbook, id, isbn, name, num_authors, "
"pages, price, pubdate, publisher, publisher_id, rating, store, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Max("foo")
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(Book.objects.annotate(num_authors=Count("authors")).count(), 6)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count("authors")).aggregate(
Max("num_authors")
)
self.assertEqual(vals, {"num_authors__max": 3})
vals = Publisher.objects.annotate(avg_price=Avg("book__price")).aggregate(
Max("avg_price")
)
self.assertEqual(vals, {"avg_price__max": 75.0})
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max("pages"), select=Max("pages"))
self.assertEqual(vals, {"number": 1132, "select": 1132})
# Regression for #10064: select_related() plays nice with aggregates
obj = (
Book.objects.select_related("publisher")
.annotate(num_authors=Count("authors"))
.values()
.get(isbn="013790395")
)
self.assertEqual(
obj,
{
"contact_id": self.a8.id,
"id": self.b5.id,
"isbn": "013790395",
"name": "Artificial Intelligence: A Modern Approach",
"num_authors": 2,
"pages": 1132,
"price": Decimal("82.8"),
"pubdate": datetime.date(1995, 1, 15),
"publisher_id": self.p3.id,
"rating": 4.0,
},
)
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(len(Book.objects.annotate(num_authors=Count("authors"))), 6)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors")).filter(
num_authors__gt=2
)
),
1,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors")).exclude(
num_authors__gt=2
)
),
5,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors"))
.filter(num_authors__lt=3)
.exclude(num_authors__lt=2)
),
2,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors"))
.exclude(num_authors__lt=2)
.filter(num_authors__lt=3)
),
2,
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 1, "name": "Morgan Kaufmann", "num_awards": 9},
{"num_books": 2, "name": "Prentice Hall", "num_awards": 7},
],
)
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.exclude(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 2, "name": "Apress", "num_awards": 3},
{"num_books": 0, "name": "Jonno's House of Books", "num_awards": 0},
{"num_books": 1, "name": "Sams", "num_awards": 1},
],
)
# ... and where the F() references an aggregate
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_awards__gt=2 * F("num_books"))
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 1, "name": "Morgan Kaufmann", "num_awards": 9},
{"num_books": 2, "name": "Prentice Hall", "num_awards": 7},
],
)
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.exclude(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 2, "name": "Apress", "num_awards": 3},
{"num_books": 0, "name": "Jonno's House of Books", "num_awards": 0},
{"num_books": 1, "name": "Sams", "num_awards": 1},
],
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values("EntryID__Entry").annotate(
Appearances=Count("EntryID"), Distinct_Clues=Count("Clue", distinct=True)
)
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count("clues__ID"))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry="foo")
c = Clues.objects.create(EntryID=e, Clue="bar")
qs = Clues.objects.select_related("EntryID").annotate(Count("ID"))
self.assertSequenceEqual(qs, [c])
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(Book.objects.filter(id__in=[]).count(), 0)
vals = Book.objects.filter(id__in=[]).aggregate(
num_authors=Count("authors"),
avg_authors=Avg("authors"),
max_authors=Max("authors"),
max_price=Max("price"),
max_rating=Max("rating"),
)
self.assertEqual(
vals,
{
"max_authors": None,
"max_rating": None,
"num_authors": 0,
"avg_authors": None,
"max_price": None,
},
)
qs = (
Publisher.objects.filter(name="Jonno's House of Books")
.annotate(
num_authors=Count("book__authors"),
avg_authors=Avg("book__authors"),
max_authors=Max("book__authors"),
max_price=Max("book__price"),
max_rating=Max("book__rating"),
)
.values()
)
self.assertSequenceEqual(
qs,
[
{
"max_authors": None,
"name": "Jonno's House of Books",
"num_awards": 0,
"max_price": None,
"num_authors": 0,
"max_rating": None,
"id": self.p5.id,
"avg_authors": None,
}
],
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count("authors")).order_by(
"publisher__name", "name"
),
[
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name,
)
# Regression for #10127 - Empty select_related() works with annotate
qs = (
Book.objects.filter(rating__lt=4.5)
.select_related()
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerysetEqual(
qs,
[
(
"Artificial Intelligence: A Modern Approach",
51.5,
"Prentice Hall",
"Peter Norvig",
),
("Practical Django Projects", 29.0, "Apress", "James Bennett"),
(
"Python Web Development with Django",
Approximate(30.333, places=2),
"Prentice Hall",
"Jeffrey Forcier",
),
("Sams Teach Yourself Django in 24 Hours", 45.0, "Sams", "Brad Dayley"),
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name),
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = (
Book.objects.extra(select={"pub": "publisher_id"})
.values("pub")
.annotate(Count("id"))
.order_by("pub")
)
self.assertSequenceEqual(
qs,
[
{"pub": self.p1.id, "id__count": 2},
{"pub": self.p2.id, "id__count": 1},
{"pub": self.p3.id, "id__count": 2},
{"pub": self.p4.id, "id__count": 1},
],
)
qs = (
Book.objects.extra(select={"pub": "publisher_id", "foo": "pages"})
.values("pub")
.annotate(Count("id"))
.order_by("pub")
)
self.assertSequenceEqual(
qs,
[
{"pub": self.p1.id, "id__count": 2},
{"pub": self.p2.id, "id__count": 1},
{"pub": self.p3.id, "id__count": 2},
{"pub": self.p4.id, "id__count": 1},
],
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = (
Book.objects.filter(pages__gt=100)
.annotate(n_authors=Count("authors"))
.filter(n_authors__gt=2)
.order_by("n_authors")
)
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids),
[
"Python Web Development with Django",
],
lambda b: b.name,
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(
Book.objects.values("publisher")
.annotate(max_pages=Max("pages"))
.order_by()
.query
)
# There is just one GROUP BY clause (zero commas means at most one clause).
self.assertEqual(qstr[qstr.index("GROUP BY") :].count(", "), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
msg = (
"The named annotation 'authors__age__avg' conflicts with "
"the default name for another annotation."
)
with self.assertRaisesMessage(ValueError, msg):
Book.objects.annotate(
Avg("authors__age"), authors__age__avg=Avg("authors__age")
)
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a field name on the model raises ValueError
msg = "The annotation 'age' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(age=Avg("friends__age"))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with an m2m name on the model raises ValueError
msg = "The annotation 'friends' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(friends=Count("friends"))
def test_fk_attname_conflict(self):
msg = "The annotation 'contact_id' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Book.objects.annotate(contact_id=F("publisher_id"))
def test_values_queryset_non_conflict(self):
# If you're using a values query set, some potential conflicts are
# avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = (
Author.objects.values("name")
.annotate(age=Count("book_contact_set"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["age"], 1)
# Same problem, but aggregating over m2m fields
results = (
Author.objects.values("name")
.annotate(age=Avg("friends__age"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["age"], 32.0)
# Same problem, but colliding with an m2m field
results = (
Author.objects.values("name")
.annotate(friends=Count("friends"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["friends"], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a reverse-related name on the model raises ValueError
msg = "The annotation 'book_contact_set' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(book_contact_set=Avg("friends__age"))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count("authors"))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(),
[
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
"Practical Django Projects",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name,
)
# Regression for #10248 - Annotations work with dates()
qs = (
Book.objects.annotate(num_authors=Count("authors"))
.filter(num_authors=2)
.dates("pubdate", "day")
)
self.assertSequenceEqual(
qs,
[
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"sheets": "(pages + %s) / %s"}, select_params=[1, 2])
.order_by("sheets")
.values("sheets")
)
self.assertQuerysetEqual(
qs, [150, 175, 224, 264, 473, 566], lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values("publisher").annotate(Count("publisher")).count(), 4
)
self.assertEqual(
Book.objects.annotate(Count("publisher")).values("publisher").count(), 6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[self.p1.id, self.p2.id])
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(sorted_publishers[0].n_books, 2)
self.assertEqual(sorted_publishers[1].n_books, 1)
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books,
[
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name,
)
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum("book_ptr__pages")),
{"n_pages": 2078},
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum("pages")),
{"n_pages": 2078},
)
qs = (
HardbackBook.objects.annotate(
n_authors=Count("book_ptr__authors"),
)
.values("name", "n_authors")
.order_by("name")
)
self.assertSequenceEqual(
qs,
[
{"n_authors": 2, "name": "Artificial Intelligence: A Modern Approach"},
{
"n_authors": 1,
"name": (
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp"
),
},
],
)
qs = (
HardbackBook.objects.annotate(n_authors=Count("authors"))
.values("name", "n_authors")
.order_by("name")
)
self.assertSequenceEqual(
qs,
[
{"n_authors": 2, "name": "Artificial Intelligence: A Modern Approach"},
{
"n_authors": 1,
"name": (
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp"
),
},
],
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
msg = "Cannot compute Avg('mean_age'): 'mean_age' is an aggregate"
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(mean_age=Avg("authors__age")).annotate(
Avg("mean_age")
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(), 0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[])
.annotate(Count("friends"))
.aggregate(Count("pk")),
{"pk__count": 0},
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg("age")), {"age__avg": None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name"))
.exclude(friends__name="Joe")
.count(),
Author.objects.count(),
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = (
Book.objects.values("name")
.annotate(n_authors=Count("authors"))
.filter(pages__lt=F("n_authors") * 200)
.values_list("pk")
)
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs),
["Python Web Development with Django"],
attrgetter("name"),
)
def test_values_annotate_values(self):
qs = (
Book.objects.values("name")
.annotate(n_authors=Count("authors"))
.values_list("pk", flat=True)
.order_by("name")
)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# When a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = (
Book.objects.values_list("name")
.annotate(n_authors=Count("authors"))
.filter(pages__gt=F("n_authors"))
.values_list("name", flat=True)
.order_by("name")
)
# Results should be the same, all Books have more pages than authors
self.assertEqual(list(qs), list(Book.objects.values_list("name", flat=True)))
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = (
Book.objects.values_list("publisher__name")
.annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
)
.order_by("-publisher__name")
)
self.assertEqual(books[0], ("Sams", 1, Decimal("23.09"), 45.0, 528.0))
def test_annotation_disjunction(self):
qs = (
Book.objects.annotate(n_authors=Count("authors"))
.filter(Q(n_authors=2) | Q(name="Python Web Development with Django"))
.order_by("name")
)
self.assertQuerysetEqual(
qs,
[
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name"),
)
qs = (
Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right")
| (
Q(name="Artificial Intelligence: A Modern Approach")
& Q(n_authors=3)
)
)
).order_by("name")
self.assertQuerysetEqual(
qs,
[
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name"),
)
qs = (
Publisher.objects.annotate(
rating_sum=Sum("book__rating"), book_count=Count("book")
)
.filter(Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True))
.order_by("pk")
)
self.assertQuerysetEqual(
qs,
[
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name"),
)
qs = (
Publisher.objects.annotate(
rating_sum=Sum("book__rating"), book_count=Count("book")
)
.filter(Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None))
.order_by("num_awards")
)
self.assertQuerysetEqual(
qs,
[
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann",
],
attrgetter("name"),
)
def test_quoting_aggregate_order_by(self):
qs = (
Book.objects.filter(name="Python Web Development with Django")
.annotate(authorCount=Count("authors"))
.order_by("authorCount")
)
self.assertQuerysetEqual(
qs,
[
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount),
)
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev("pages")),
{"pages__stddev": Approximate(311.46, 1)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("rating")),
{"rating__stddev": Approximate(0.60, 1)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("price")),
{"price__stddev": Approximate(Decimal("24.16"), 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("pages", sample=True)),
{"pages__stddev": Approximate(341.19, 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("rating", sample=True)),
{"rating__stddev": Approximate(0.66, 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("price", sample=True)),
{"price__stddev": Approximate(Decimal("26.46"), 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("pages")),
{"pages__variance": Approximate(97010.80, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("rating")),
{"rating__variance": Approximate(0.36, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("price")),
{"price__variance": Approximate(Decimal("583.77"), 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("pages", sample=True)),
{"pages__variance": Approximate(116412.96, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("rating", sample=True)),
{"rating__variance": Approximate(0.44, 2)},
)
self.assertEqual(
Book.objects.aggregate(Variance("price", sample=True)),
{"price__variance": Approximate(Decimal("700.53"), 2)},
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.filter(book_cnt=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# Neither in this case
qs = (
Author.objects.annotate(book_count=Count("book"))
.filter(book_count=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = (
Author.objects.annotate(Count("book"))
.filter(book__count=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# Referencing the auto-generated name in an aggregate() also works.
self.assertEqual(
Author.objects.annotate(Count("book")).aggregate(Max("book__count")),
{"book__count__max": 2},
)
def test_annotate_joins(self):
"""
The base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count("pk"))
self.assertIs(qs.query.alias_map["aggregation_regress_book"].join_type, None)
# The query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessAnyDBFeature("allows_group_by_pk", "allows_group_by_selected_pks")
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count("book_contact_set"))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn("id", group_by[0][0])
self.assertNotIn("name", group_by[0][0])
self.assertNotIn("age", group_by[0][0])
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by("name")],
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 0),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 0),
("Peter Norvig", 2),
("Stuart Russell", 0),
("Wesley J. Chun", 0),
],
)
@skipUnlessAnyDBFeature("allows_group_by_pk", "allows_group_by_selected_pks")
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only("id", "name").annotate(
num_contacts=Count("book_contact_set")
)
_, _, grouping = results.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn("id", grouping[0][0])
self.assertNotIn("name", grouping[0][0])
self.assertNotIn("age", grouping[0][0])
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by("name")],
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 0),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 0),
("Peter Norvig", 2),
("Stuart Russell", 0),
("Wesley J. Chun", 0),
],
)
@skipUnlessAnyDBFeature("allows_group_by_pk", "allows_group_by_selected_pks")
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
_, _, grouping = results.query.get_compiler(using="default").pre_sql_setup()
# In the case of `group_by_selected_pks` we also group by contact.id
# because of the select_related.
self.assertEqual(
len(grouping), 1 if connection.features.allows_group_by_pk else 2
)
self.assertIn("id", grouping[0][0])
self.assertNotIn("name", grouping[0][0])
self.assertNotIn("contact", grouping[0][0])
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by("name")],
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
)
@skipUnlessDBFeature("allows_group_by_selected_pks")
def test_aggregate_unmanaged_model_columns(self):
"""
Unmanaged models are sometimes used to represent database views which
may not allow grouping by selected primary key.
"""
def assertQuerysetResults(queryset):
self.assertEqual(
[(b.name, b.num_authors) for b in queryset.order_by("name")],
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
)
queryset = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
# Unmanaged origin model.
with mock.patch.object(Book._meta, "managed", False):
_, _, grouping = queryset.query.get_compiler(
using="default"
).pre_sql_setup()
self.assertEqual(len(grouping), len(Book._meta.fields) + 1)
for index, field in enumerate(Book._meta.fields):
self.assertIn(field.name, grouping[index][0])
self.assertIn(Author._meta.pk.name, grouping[-1][0])
assertQuerysetResults(queryset)
# Unmanaged related model.
with mock.patch.object(Author._meta, "managed", False):
_, _, grouping = queryset.query.get_compiler(
using="default"
).pre_sql_setup()
self.assertEqual(len(grouping), len(Author._meta.fields) + 1)
self.assertIn(Book._meta.pk.name, grouping[0][0])
for index, field in enumerate(Author._meta.fields):
self.assertIn(field.name, grouping[index + 1][0])
assertQuerysetResults(queryset)
@skipUnlessDBFeature("allows_group_by_selected_pks")
def test_aggregate_unmanaged_model_as_tables(self):
qs = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
# Force treating unmanaged models as tables.
with mock.patch(
"django.db.connection.features.allows_group_by_selected_pks_on_model",
return_value=True,
):
with mock.patch.object(Book._meta, "managed", False), mock.patch.object(
Author._meta, "managed", False
):
_, _, grouping = qs.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(grouping), 2)
self.assertIn("id", grouping[0][0])
self.assertIn("id", grouping[1][0])
self.assertQuerysetEqual(
qs.order_by("name"),
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
(
"The Definitive Guide to Django: Web Development Done "
"Right",
2,
),
],
attrgetter("name", "num_authors"),
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count("book_contact_set__contact"))
self.assertIn(" JOIN ", str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name="Practical Django Projects")
ItemTag.objects.create(
object_id=django_book.id,
tag="intermediate",
content_type=ContentType.objects.get_for_model(django_book),
)
ItemTag.objects.create(
object_id=django_book.id,
tag="django",
content_type=ContentType.objects.get_for_model(django_book),
)
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(
object_id=wmpk.id,
tag="hi mom",
content_type=ContentType.objects.get_for_model(wmpk),
)
ai_book = Book.objects.get(
name__startswith="Paradigms of Artificial Intelligence"
)
ItemTag.objects.create(
object_id=ai_book.id,
tag="intermediate",
content_type=ContentType.objects.get_for_model(ai_book),
)
self.assertEqual(Book.objects.aggregate(Count("tags")), {"tags__count": 3})
results = Book.objects.annotate(Count("tags")).order_by("-tags__count", "name")
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
("Practical Django Projects", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Artificial Intelligence: A Modern Approach", 0),
("Python Web Development with Django", 0),
("Sams Teach Yourself Django in 24 Hours", 0),
("The Definitive Guide to Django: Web Development Done Right", 0),
],
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count("book")).filter(book_cnt=2)
).order_by("name")
expected_results = [a.name for a in expected_results]
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.exclude(Q(book_cnt=2), Q(book_cnt=2))
.order_by("name")
)
self.assertQuerysetEqual(qs, expected_results, lambda b: b.name)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count("book")).filter(book_cnt=2)
).order_by("name")
expected_results = [a.name for a in expected_results]
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.exclude(Q(book_cnt=2) | Q(book_cnt=2))
.order_by("name")
)
self.assertQuerysetEqual(qs, expected_results, lambda b: b.name)
def test_name_filters(self):
qs = (
Author.objects.annotate(Count("book"))
.filter(Q(book__count__exact=2) | Q(name="Adrian Holovaty"))
.order_by("name")
)
self.assertQuerysetEqual(
qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name
)
def test_name_expressions(self):
# Aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = (
Author.objects.annotate(Count("book"))
.filter(Q(name="Peter Norvig") | Q(age=F("book__count") + 33))
.order_by("name")
)
self.assertQuerysetEqual(
qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name
)
def test_filter_aggregates_or_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors")).filter(q1 | q2).order_by("pk")
self.assertQuerysetEqual(
query,
[self.b1.pk, self.b4.pk, self.b5.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_negated_and_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = (
Book.objects.annotate(Count("authors")).filter(~(q1 & q2)).order_by("pk")
)
self.assertQuerysetEqual(
query,
[self.b1.pk, self.b2.pk, self.b3.pk, self.b4.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_xor_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors")).filter(q1 ^ q2).order_by("pk")
self.assertQuerysetEqual(
query,
[self.b1.pk, self.b4.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_negated_xor_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = (
Book.objects.annotate(Count("authors")).filter(~(q1 ^ q2)).order_by("pk")
)
self.assertQuerysetEqual(
query,
[self.b2.pk, self.b3.pk, self.b5.pk],
attrgetter("pk"),
)
def test_ticket_11293_q_immutable(self):
"""
Splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn="")
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors"))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
An F() object referring to related column works correctly in group by.
"""
qs = Book.objects.annotate(account=Count("authors")).filter(
account=F("publisher__num_awards")
)
self.assertQuerysetEqual(
qs, ["Sams Teach Yourself Django in 24 Hours"], lambda b: b.name
)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count("authors__id")).aggregate(
Sum("select"), Avg("select")
)
self.assertEqual(
vals,
{
"select__sum": 10,
"select__avg": Approximate(1.666, places=2),
},
)
def test_annotate_on_relation(self):
book = Book.objects.annotate(
avg_price=Avg("price"), publisher_name=F("publisher__name")
).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg("price")).aggregate(
publisher_awards=Sum("publisher__num_awards")
)
self.assertEqual(qs["publisher_awards"], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = (
Book.objects.values("rating", "price")
.distinct()
.aggregate(result=Sum("rating"))
)
vals2 = Book.objects.aggregate(result=Sum("rating") - Value(4.0))
self.assertEqual(vals1, vals2)
def test_annotate_values_list_flat(self):
"""Find ages that are shared by at least two authors."""
qs = (
Author.objects.values_list("age", flat=True)
.annotate(age_count=Count("age"))
.filter(age_count__gt=1)
)
self.assertSequenceEqual(qs, [29])
def test_allow_distinct(self):
class MyAggregate(Aggregate):
pass
with self.assertRaisesMessage(TypeError, "MyAggregate does not allow distinct"):
MyAggregate("foo", distinct=True)
class DistinctAggregate(Aggregate):
allow_distinct = True
DistinctAggregate("foo", distinct=True)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_having_subquery_select(self):
authors = Author.objects.filter(pk=self.a1.pk)
books = Book.objects.annotate(Count("authors")).filter(
Q(authors__in=authors) | Q(authors__count__gt=2)
)
self.assertEqual(set(books), {self.b1, self.b4})
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related("alfa").annotate(Count("bravo__charlie"))
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertSequenceEqual(qs, [c])
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(
Count("alfa__name")
)
self.assertIn(" INNER JOIN ", str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count("alfa__name")).filter(
alfa__name__isnull=False
)
self.assertIn(" INNER JOIN ", str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count("alfa__name"))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count("contact__name"))
self.assertIn(" INNER JOIN ", str(qs.query))
class SelfReferentialFKTests(TestCase):
def test_ticket_24748(self):
t1 = SelfRefFK.objects.create(name="t1")
SelfRefFK.objects.create(name="t2", parent=t1)
SelfRefFK.objects.create(name="t3", parent=t1)
self.assertQuerysetEqual(
SelfRefFK.objects.annotate(num_children=Count("children")).order_by("name"),
[("t1", 2), ("t2", 0), ("t3", 0)],
lambda x: (x.name, x.num_children),
)
|
091c92a5cc4edef97f031bc82e5255e9ea5e95b15cefcd2ca7a3aa7491a0ccec | import os
from django.apps import AppConfig, apps
from django.apps.registry import Apps
from django.contrib.admin.models import LogEntry
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.db import models
from django.test import SimpleTestCase, override_settings
from django.test.utils import extend_sys_path, isolate_apps
from .models import SoAlternative, TotallyNormal, new_apps
from .one_config_app.apps import OneConfig
from .two_configs_one_default_app.apps import TwoConfig
# Small list with a variety of cases for tests that iterate on installed apps.
# Intentionally not in alphabetical order to check if the order is preserved.
SOME_INSTALLED_APPS = [
"apps.apps.MyAdmin",
"apps.apps.MyAuth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
SOME_INSTALLED_APPS_NAMES = [
"django.contrib.admin",
"django.contrib.auth",
] + SOME_INSTALLED_APPS[2:]
HERE = os.path.dirname(__file__)
class AppsTests(SimpleTestCase):
def test_singleton_main(self):
"""
Only one main registry can exist.
"""
with self.assertRaises(RuntimeError):
Apps(installed_apps=None)
def test_ready(self):
"""
Tests the ready property of the main registry.
"""
# The main app registry is always ready when the tests run.
self.assertIs(apps.ready, True)
# Non-main app registries are populated in __init__.
self.assertIs(Apps().ready, True)
# The condition is set when apps are ready
self.assertIs(apps.ready_event.is_set(), True)
self.assertIs(Apps().ready_event.is_set(), True)
def test_bad_app_config(self):
"""
Tests when INSTALLED_APPS contains an incorrect app config.
"""
msg = "'apps.apps.BadConfig' must supply a name attribute."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
with self.settings(INSTALLED_APPS=["apps.apps.BadConfig"]):
pass
def test_not_an_app_config(self):
"""
Tests when INSTALLED_APPS contains a class that isn't an app config.
"""
msg = "'apps.apps.NotAConfig' isn't a subclass of AppConfig."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
with self.settings(INSTALLED_APPS=["apps.apps.NotAConfig"]):
pass
def test_no_such_app(self):
"""
Tests when INSTALLED_APPS contains an app that doesn't exist, either
directly or via an app config.
"""
with self.assertRaises(ImportError):
with self.settings(INSTALLED_APPS=["there is no such app"]):
pass
msg = (
"Cannot import 'there is no such app'. Check that "
"'apps.apps.NoSuchApp.name' is correct."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
with self.settings(INSTALLED_APPS=["apps.apps.NoSuchApp"]):
pass
def test_no_such_app_config(self):
msg = "Module 'apps' does not contain a 'NoSuchConfig' class."
with self.assertRaisesMessage(ImportError, msg):
with self.settings(INSTALLED_APPS=["apps.NoSuchConfig"]):
pass
def test_no_such_app_config_with_choices(self):
msg = (
"Module 'apps.apps' does not contain a 'NoSuchConfig' class. "
"Choices are: 'BadConfig', 'ModelPKAppsConfig', 'MyAdmin', "
"'MyAuth', 'NoSuchApp', 'PlainAppsConfig', 'RelabeledAppsConfig'."
)
with self.assertRaisesMessage(ImportError, msg):
with self.settings(INSTALLED_APPS=["apps.apps.NoSuchConfig"]):
pass
def test_no_config_app(self):
"""Load an app that doesn't provide an AppConfig class."""
with self.settings(INSTALLED_APPS=["apps.no_config_app"]):
config = apps.get_app_config("no_config_app")
self.assertIsInstance(config, AppConfig)
def test_one_config_app(self):
"""Load an app that provides an AppConfig class."""
with self.settings(INSTALLED_APPS=["apps.one_config_app"]):
config = apps.get_app_config("one_config_app")
self.assertIsInstance(config, OneConfig)
def test_two_configs_app(self):
"""Load an app that provides two AppConfig classes."""
with self.settings(INSTALLED_APPS=["apps.two_configs_app"]):
config = apps.get_app_config("two_configs_app")
self.assertIsInstance(config, AppConfig)
def test_two_default_configs_app(self):
"""Load an app that provides two default AppConfig classes."""
msg = (
"'apps.two_default_configs_app.apps' declares more than one "
"default AppConfig: 'TwoConfig', 'TwoConfigBis'."
)
with self.assertRaisesMessage(RuntimeError, msg):
with self.settings(INSTALLED_APPS=["apps.two_default_configs_app"]):
pass
def test_two_configs_one_default_app(self):
"""
Load an app that provides two AppConfig classes, one being the default.
"""
with self.settings(INSTALLED_APPS=["apps.two_configs_one_default_app"]):
config = apps.get_app_config("two_configs_one_default_app")
self.assertIsInstance(config, TwoConfig)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_app_configs(self):
"""
Tests apps.get_app_configs().
"""
app_configs = apps.get_app_configs()
self.assertEqual(
[app_config.name for app_config in app_configs], SOME_INSTALLED_APPS_NAMES
)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_app_config(self):
"""
Tests apps.get_app_config().
"""
app_config = apps.get_app_config("admin")
self.assertEqual(app_config.name, "django.contrib.admin")
app_config = apps.get_app_config("staticfiles")
self.assertEqual(app_config.name, "django.contrib.staticfiles")
with self.assertRaises(LookupError):
apps.get_app_config("admindocs")
msg = "No installed app with label 'django.contrib.auth'. Did you mean 'myauth'"
with self.assertRaisesMessage(LookupError, msg):
apps.get_app_config("django.contrib.auth")
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_is_installed(self):
"""
Tests apps.is_installed().
"""
self.assertIs(apps.is_installed("django.contrib.admin"), True)
self.assertIs(apps.is_installed("django.contrib.auth"), True)
self.assertIs(apps.is_installed("django.contrib.staticfiles"), True)
self.assertIs(apps.is_installed("django.contrib.admindocs"), False)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_model(self):
"""
Tests apps.get_model().
"""
self.assertEqual(apps.get_model("admin", "LogEntry"), LogEntry)
with self.assertRaises(LookupError):
apps.get_model("admin", "LogExit")
# App label is case-sensitive, Model name is case-insensitive.
self.assertEqual(apps.get_model("admin", "loGentrY"), LogEntry)
with self.assertRaises(LookupError):
apps.get_model("Admin", "LogEntry")
# A single argument is accepted.
self.assertEqual(apps.get_model("admin.LogEntry"), LogEntry)
with self.assertRaises(LookupError):
apps.get_model("admin.LogExit")
with self.assertRaises(ValueError):
apps.get_model("admin_LogEntry")
@override_settings(INSTALLED_APPS=["apps.apps.RelabeledAppsConfig"])
def test_relabeling(self):
self.assertEqual(apps.get_app_config("relabeled").name, "apps")
def test_duplicate_labels(self):
with self.assertRaisesMessage(
ImproperlyConfigured, "Application labels aren't unique"
):
with self.settings(INSTALLED_APPS=["apps.apps.PlainAppsConfig", "apps"]):
pass
def test_duplicate_names(self):
with self.assertRaisesMessage(
ImproperlyConfigured, "Application names aren't unique"
):
with self.settings(
INSTALLED_APPS=["apps.apps.RelabeledAppsConfig", "apps"]
):
pass
def test_import_exception_is_not_masked(self):
"""
App discovery should preserve stack traces. Regression test for #22920.
"""
with self.assertRaisesMessage(ImportError, "Oops"):
with self.settings(INSTALLED_APPS=["import_error_package"]):
pass
def test_models_py(self):
"""
The models in the models.py file were loaded correctly.
"""
self.assertEqual(apps.get_model("apps", "TotallyNormal"), TotallyNormal)
with self.assertRaises(LookupError):
apps.get_model("apps", "SoAlternative")
with self.assertRaises(LookupError):
new_apps.get_model("apps", "TotallyNormal")
self.assertEqual(new_apps.get_model("apps", "SoAlternative"), SoAlternative)
def test_models_not_loaded(self):
"""
apps.get_models() raises an exception if apps.models_ready isn't True.
"""
apps.models_ready = False
try:
# The cache must be cleared to trigger the exception.
apps.get_models.cache_clear()
with self.assertRaisesMessage(
AppRegistryNotReady, "Models aren't loaded yet."
):
apps.get_models()
finally:
apps.models_ready = True
def test_dynamic_load(self):
"""
Makes a new model at runtime and ensures it goes into the right place.
"""
old_models = list(apps.get_app_config("apps").get_models())
# Construct a new model in a new app registry
body = {}
new_apps = Apps(["apps"])
meta_contents = {
"app_label": "apps",
"apps": new_apps,
}
meta = type("Meta", (), meta_contents)
body["Meta"] = meta
body["__module__"] = TotallyNormal.__module__
temp_model = type("SouthPonies", (models.Model,), body)
# Make sure it appeared in the right place!
self.assertEqual(list(apps.get_app_config("apps").get_models()), old_models)
with self.assertRaises(LookupError):
apps.get_model("apps", "SouthPonies")
self.assertEqual(new_apps.get_model("apps", "SouthPonies"), temp_model)
def test_model_clash(self):
"""
Test for behavior when two models clash in the app registry.
"""
new_apps = Apps(["apps"])
meta_contents = {
"app_label": "apps",
"apps": new_apps,
}
body = {}
body["Meta"] = type("Meta", (), meta_contents)
body["__module__"] = TotallyNormal.__module__
type("SouthPonies", (models.Model,), body)
# When __name__ and __module__ match we assume the module
# was reloaded and issue a warning. This use-case is
# useful for REPL. Refs #23621.
body = {}
body["Meta"] = type("Meta", (), meta_contents)
body["__module__"] = TotallyNormal.__module__
msg = (
"Model 'apps.southponies' was already registered. "
"Reloading models is not advised as it can lead to inconsistencies, "
"most notably with related models."
)
with self.assertRaisesMessage(RuntimeWarning, msg):
type("SouthPonies", (models.Model,), body)
# If it doesn't appear to be a reloaded module then we expect
# a RuntimeError.
body = {}
body["Meta"] = type("Meta", (), meta_contents)
body["__module__"] = TotallyNormal.__module__ + ".whatever"
with self.assertRaisesMessage(
RuntimeError, "Conflicting 'southponies' models in application 'apps':"
):
type("SouthPonies", (models.Model,), body)
def test_get_containing_app_config_apps_not_ready(self):
"""
apps.get_containing_app_config() should raise an exception if
apps.apps_ready isn't True.
"""
apps.apps_ready = False
try:
with self.assertRaisesMessage(
AppRegistryNotReady, "Apps aren't loaded yet"
):
apps.get_containing_app_config("foo")
finally:
apps.apps_ready = True
@isolate_apps("apps", kwarg_name="apps")
def test_lazy_model_operation(self, apps):
"""
Tests apps.lazy_model_operation().
"""
model_classes = []
initial_pending = set(apps._pending_operations)
def test_func(*models):
model_classes[:] = models
class LazyA(models.Model):
pass
# Test models appearing twice, and models appearing consecutively
model_keys = [
("apps", model_name)
for model_name in ["lazya", "lazyb", "lazyb", "lazyc", "lazya"]
]
apps.lazy_model_operation(test_func, *model_keys)
# LazyModelA shouldn't be waited on since it's already registered,
# and LazyModelC shouldn't be waited on until LazyModelB exists.
self.assertEqual(
set(apps._pending_operations) - initial_pending, {("apps", "lazyb")}
)
# Multiple operations can wait on the same model
apps.lazy_model_operation(test_func, ("apps", "lazyb"))
class LazyB(models.Model):
pass
self.assertEqual(model_classes, [LazyB])
# Now we are just waiting on LazyModelC.
self.assertEqual(
set(apps._pending_operations) - initial_pending, {("apps", "lazyc")}
)
class LazyC(models.Model):
pass
# Everything should be loaded - make sure the callback was executed properly.
self.assertEqual(model_classes, [LazyA, LazyB, LazyB, LazyC, LazyA])
class Stub:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class AppConfigTests(SimpleTestCase):
"""Unit tests for AppConfig class."""
def test_path_set_explicitly(self):
"""If subclass sets path as class attr, no module attributes needed."""
class MyAppConfig(AppConfig):
path = "foo"
ac = MyAppConfig("label", Stub())
self.assertEqual(ac.path, "foo")
def test_explicit_path_overrides(self):
"""If path set as class attr, overrides __path__ and __file__."""
class MyAppConfig(AppConfig):
path = "foo"
ac = MyAppConfig("label", Stub(__path__=["a"], __file__="b/__init__.py"))
self.assertEqual(ac.path, "foo")
def test_dunder_path(self):
"""If single element in __path__, use it (in preference to __file__)."""
ac = AppConfig("label", Stub(__path__=["a"], __file__="b/__init__.py"))
self.assertEqual(ac.path, "a")
def test_no_dunder_path_fallback_to_dunder_file(self):
"""If there is no __path__ attr, use __file__."""
ac = AppConfig("label", Stub(__file__="b/__init__.py"))
self.assertEqual(ac.path, "b")
def test_empty_dunder_path_fallback_to_dunder_file(self):
"""If the __path__ attr is empty, use __file__ if set."""
ac = AppConfig("label", Stub(__path__=[], __file__="b/__init__.py"))
self.assertEqual(ac.path, "b")
def test_multiple_dunder_path_fallback_to_dunder_file(self):
"""If the __path__ attr is length>1, use __file__ if set."""
ac = AppConfig("label", Stub(__path__=["a", "b"], __file__="c/__init__.py"))
self.assertEqual(ac.path, "c")
def test_no_dunder_path_or_dunder_file(self):
"""If there is no __path__ or __file__, raise ImproperlyConfigured."""
with self.assertRaises(ImproperlyConfigured):
AppConfig("label", Stub())
def test_empty_dunder_path_no_dunder_file(self):
"""If the __path__ attr is empty and there is no __file__, raise."""
with self.assertRaises(ImproperlyConfigured):
AppConfig("label", Stub(__path__=[]))
def test_multiple_dunder_path_no_dunder_file(self):
"""If the __path__ attr is length>1 and there is no __file__, raise."""
with self.assertRaises(ImproperlyConfigured):
AppConfig("label", Stub(__path__=["a", "b"]))
def test_duplicate_dunder_path_no_dunder_file(self):
"""
If the __path__ attr contains duplicate paths and there is no
__file__, they duplicates should be deduplicated (#25246).
"""
ac = AppConfig("label", Stub(__path__=["a", "a"]))
self.assertEqual(ac.path, "a")
def test_repr(self):
ac = AppConfig("label", Stub(__path__=["a"]))
self.assertEqual(repr(ac), "<AppConfig: label>")
def test_invalid_label(self):
class MyAppConfig(AppConfig):
label = "invalid.label"
msg = "The app label 'invalid.label' is not a valid Python identifier."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
MyAppConfig("test_app", Stub())
@override_settings(
INSTALLED_APPS=["apps.apps.ModelPKAppsConfig"],
DEFAULT_AUTO_FIELD="django.db.models.SmallAutoField",
)
def test_app_default_auto_field(self):
apps_config = apps.get_app_config("apps")
self.assertEqual(
apps_config.default_auto_field,
"django.db.models.BigAutoField",
)
self.assertIs(apps_config._is_default_auto_field_overridden, True)
@override_settings(
INSTALLED_APPS=["apps.apps.PlainAppsConfig"],
DEFAULT_AUTO_FIELD="django.db.models.SmallAutoField",
)
def test_default_auto_field_setting(self):
apps_config = apps.get_app_config("apps")
self.assertEqual(
apps_config.default_auto_field,
"django.db.models.SmallAutoField",
)
self.assertIs(apps_config._is_default_auto_field_overridden, False)
class NamespacePackageAppTests(SimpleTestCase):
# We need nsapp to be top-level so our multiple-paths tests can add another
# location for it (if its inside a normal package with an __init__.py that
# isn't possible). In order to avoid cluttering the already-full tests/ dir
# (which is on sys.path), we add these new entries to sys.path temporarily.
base_location = os.path.join(HERE, "namespace_package_base")
other_location = os.path.join(HERE, "namespace_package_other_base")
app_path = os.path.join(base_location, "nsapp")
def test_single_path(self):
"""
A Py3.3+ namespace package can be an app if it has only one path.
"""
with extend_sys_path(self.base_location):
with self.settings(INSTALLED_APPS=["nsapp"]):
app_config = apps.get_app_config("nsapp")
self.assertEqual(app_config.path, self.app_path)
def test_multiple_paths(self):
"""
A Py3.3+ namespace package with multiple locations cannot be an app.
(Because then we wouldn't know where to load its templates, static
assets, etc. from.)
"""
# Temporarily add two directories to sys.path that both contain
# components of the "nsapp" package.
with extend_sys_path(self.base_location, self.other_location):
with self.assertRaises(ImproperlyConfigured):
with self.settings(INSTALLED_APPS=["nsapp"]):
pass
def test_multiple_paths_explicit_path(self):
"""
Multiple locations are ok only if app-config has explicit path.
"""
# Temporarily add two directories to sys.path that both contain
# components of the "nsapp" package.
with extend_sys_path(self.base_location, self.other_location):
with self.settings(INSTALLED_APPS=["nsapp.apps.NSAppConfig"]):
app_config = apps.get_app_config("nsapp")
self.assertEqual(app_config.path, self.app_path)
|
caf7febf2fd8f8d59da7fd4c272c4834c7f3321d679a7b60d34af6804d543f14 | from django.db.models import Q
from django.test import TestCase
from .models import Number
class XorLookupsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.numbers = [Number.objects.create(num=i) for i in range(10)]
def test_filter(self):
self.assertCountEqual(
Number.objects.filter(num__lte=7) ^ Number.objects.filter(num__gte=3),
self.numbers[:3] + self.numbers[8:],
)
self.assertCountEqual(
Number.objects.filter(Q(num__lte=7) ^ Q(num__gte=3)),
self.numbers[:3] + self.numbers[8:],
)
def test_filter_negated(self):
self.assertCountEqual(
Number.objects.filter(Q(num__lte=7) ^ ~Q(num__lt=3)),
self.numbers[:3] + self.numbers[8:],
)
self.assertCountEqual(
Number.objects.filter(~Q(num__gt=7) ^ ~Q(num__lt=3)),
self.numbers[:3] + self.numbers[8:],
)
self.assertCountEqual(
Number.objects.filter(Q(num__lte=7) ^ ~Q(num__lt=3) ^ Q(num__lte=1)),
[self.numbers[2]] + self.numbers[8:],
)
self.assertCountEqual(
Number.objects.filter(~(Q(num__lte=7) ^ ~Q(num__lt=3) ^ Q(num__lte=1))),
self.numbers[:2] + self.numbers[3:8],
)
def test_exclude(self):
self.assertCountEqual(
Number.objects.exclude(Q(num__lte=7) ^ Q(num__gte=3)),
self.numbers[3:8],
)
def test_stages(self):
numbers = Number.objects.all()
self.assertSequenceEqual(
numbers.filter(num__gte=0) ^ numbers.filter(num__lte=11),
[],
)
self.assertSequenceEqual(
numbers.filter(num__gt=0) ^ numbers.filter(num__lt=11),
[self.numbers[0]],
)
def test_pk_q(self):
self.assertCountEqual(
Number.objects.filter(Q(pk=self.numbers[0].pk) ^ Q(pk=self.numbers[1].pk)),
self.numbers[:2],
)
def test_empty_in(self):
self.assertCountEqual(
Number.objects.filter(Q(pk__in=[]) ^ Q(num__gte=5)),
self.numbers[5:],
)
|
278d26dc977c6d170c051abc6e2eecec8230accfc7011dcc915de84beecc4351 | from django.db import models
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return str(self.num)
|
2ad0157ad0f61de51ddbfeda08bc53eca9eac725d6daca8afdafb73fb9011c5c | from datetime import timedelta
from django.core import signing
from django.http import HttpRequest, HttpResponse
from django.test import SimpleTestCase, override_settings
from django.test.utils import freeze_time
class SignedCookieTest(SimpleTestCase):
def test_can_set_and_read_signed_cookies(self):
response = HttpResponse()
response.set_signed_cookie("c", "hello")
self.assertIn("c", response.cookies)
self.assertTrue(response.cookies["c"].value.startswith("hello:"))
request = HttpRequest()
request.COOKIES["c"] = response.cookies["c"].value
value = request.get_signed_cookie("c")
self.assertEqual(value, "hello")
def test_can_use_salt(self):
response = HttpResponse()
response.set_signed_cookie("a", "hello", salt="one")
request = HttpRequest()
request.COOKIES["a"] = response.cookies["a"].value
value = request.get_signed_cookie("a", salt="one")
self.assertEqual(value, "hello")
with self.assertRaises(signing.BadSignature):
request.get_signed_cookie("a", salt="two")
def test_detects_tampering(self):
response = HttpResponse()
response.set_signed_cookie("c", "hello")
request = HttpRequest()
request.COOKIES["c"] = response.cookies["c"].value[:-2] + "$$"
with self.assertRaises(signing.BadSignature):
request.get_signed_cookie("c")
def test_default_argument_suppresses_exceptions(self):
response = HttpResponse()
response.set_signed_cookie("c", "hello")
request = HttpRequest()
request.COOKIES["c"] = response.cookies["c"].value[:-2] + "$$"
self.assertIsNone(request.get_signed_cookie("c", default=None))
def test_max_age_argument(self):
value = "hello"
with freeze_time(123456789):
response = HttpResponse()
response.set_signed_cookie("c", value)
request = HttpRequest()
request.COOKIES["c"] = response.cookies["c"].value
self.assertEqual(request.get_signed_cookie("c"), value)
with freeze_time(123456800):
self.assertEqual(request.get_signed_cookie("c", max_age=12), value)
self.assertEqual(request.get_signed_cookie("c", max_age=11), value)
self.assertEqual(
request.get_signed_cookie("c", max_age=timedelta(seconds=11)), value
)
with self.assertRaises(signing.SignatureExpired):
request.get_signed_cookie("c", max_age=10)
with self.assertRaises(signing.SignatureExpired):
request.get_signed_cookie("c", max_age=timedelta(seconds=10))
def test_set_signed_cookie_max_age_argument(self):
response = HttpResponse()
response.set_signed_cookie("c", "value", max_age=100)
self.assertEqual(response.cookies["c"]["max-age"], 100)
response.set_signed_cookie("d", "value", max_age=timedelta(hours=2))
self.assertEqual(response.cookies["d"]["max-age"], 7200)
@override_settings(SECRET_KEY=b"\xe7")
def test_signed_cookies_with_binary_key(self):
response = HttpResponse()
response.set_signed_cookie("c", "hello")
request = HttpRequest()
request.COOKIES["c"] = response.cookies["c"].value
self.assertEqual(request.get_signed_cookie("c"), "hello")
|
949cf71421a9bd1e5f53e3b10d12f366fa999d91fcd0701c8167829f66b86273 | import json
import os
import shutil
import sys
import tempfile
import unittest
from io import StringIO
from pathlib import Path
from unittest import mock
from django.conf import settings
from django.contrib.staticfiles import finders, storage
from django.contrib.staticfiles.management.commands.collectstatic import (
Command as CollectstaticCommand,
)
from django.core.management import call_command
from django.test import SimpleTestCase, override_settings
from .cases import CollectionTestCase
from .settings import TEST_ROOT
def hashed_file_path(test, path):
fullpath = test.render_template(test.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, "")
class TestHashedFiles:
hashed_file_path = hashed_file_path
def tearDown(self):
# Clear hashed files to avoid side effects among tests.
storage.staticfiles_storage.hashed_files.clear()
def assertPostCondition(self):
"""
Assert post conditions for a test are met. Must be manually called at
the end of each test.
"""
pass
def test_template_tag_return(self):
self.assertStaticRaises(
ValueError, "does/not/exist.png", "/static/does/not/exist.png"
)
self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt")
self.assertStaticRenders(
"test/file.txt", "/static/test/file.dad0999e4f8f.txt", asvar=True
)
self.assertStaticRenders(
"cached/styles.css", "/static/cached/styles.5e0040571e1a.css"
)
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
self.assertPostCondition()
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.5e0040571e1a.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
def test_path_ignored_completely(self):
relpath = self.hashed_file_path("cached/css/ignored.css")
self.assertEqual(relpath, "cached/css/ignored.554da52152af.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b"#foobar", content)
self.assertIn(b"http:foobar", content)
self.assertIn(b"https:foobar", content)
self.assertIn(b"data:foobar", content)
self.assertIn(b"chrome:foobar", content)
self.assertIn(b"//foobar", content)
self.assertPostCondition()
def test_path_with_querystring(self):
relpath = self.hashed_file_path("cached/styles.css?spam=eggs")
self.assertEqual(relpath, "cached/styles.5e0040571e1a.css?spam=eggs")
with storage.staticfiles_storage.open(
"cached/styles.5e0040571e1a.css"
) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
def test_path_with_fragment(self):
relpath = self.hashed_file_path("cached/styles.css#eggs")
self.assertEqual(relpath, "cached/styles.5e0040571e1a.css#eggs")
with storage.staticfiles_storage.open(
"cached/styles.5e0040571e1a.css"
) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
def test_path_with_querystring_and_fragment(self):
relpath = self.hashed_file_path("cached/css/fragments.css")
self.assertEqual(relpath, "cached/css/fragments.a60c0e74834f.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b"fonts/font.b9b105392eb8.eot?#iefix", content)
self.assertIn(b"fonts/font.b8d603e42714.svg#webfontIyfZbseF", content)
self.assertIn(
b"fonts/font.b8d603e42714.svg#path/to/../../fonts/font.svg", content
)
self.assertIn(
b"data:font/woff;charset=utf-8;"
b"base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA",
content,
)
self.assertIn(b"#default#VML", content)
self.assertPostCondition()
def test_template_tag_absolute(self):
relpath = self.hashed_file_path("cached/absolute.css")
self.assertEqual(relpath, "cached/absolute.eb04def9f9a4.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/cached/styles.css", content)
self.assertIn(b"/static/cached/styles.5e0040571e1a.css", content)
self.assertNotIn(b"/static/styles_root.css", content)
self.assertIn(b"/static/styles_root.401f2509a628.css", content)
self.assertIn(b"/static/cached/img/relative.acae32e4532b.png", content)
self.assertPostCondition()
def test_template_tag_absolute_root(self):
"""
Like test_template_tag_absolute, but for a file in STATIC_ROOT (#26249).
"""
relpath = self.hashed_file_path("absolute_root.css")
self.assertEqual(relpath, "absolute_root.f821df1b64f7.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/styles_root.css", content)
self.assertIn(b"/static/styles_root.401f2509a628.css", content)
self.assertPostCondition()
def test_template_tag_relative(self):
relpath = self.hashed_file_path("cached/relative.css")
self.assertEqual(relpath, "cached/relative.c3e9e1ea6f2e.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"../cached/styles.css", content)
self.assertNotIn(b'@import "styles.css"', content)
self.assertNotIn(b"url(img/relative.png)", content)
self.assertIn(b'url("img/relative.acae32e4532b.png")', content)
self.assertIn(b"../cached/styles.5e0040571e1a.css", content)
self.assertPostCondition()
def test_import_replacement(self):
"See #18050"
relpath = self.hashed_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.f53576679e5a.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"""import url("styles.5e0040571e1a.css")""", relfile.read())
self.assertPostCondition()
def test_template_tag_deep_relative(self):
relpath = self.hashed_file_path("cached/css/window.css")
self.assertEqual(relpath, "cached/css/window.5d5c10836967.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"url(img/window.png)", content)
self.assertIn(b'url("img/window.acae32e4532b.png")', content)
self.assertPostCondition()
def test_template_tag_url(self):
relpath = self.hashed_file_path("cached/url.css")
self.assertEqual(relpath, "cached/url.902310b73412.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"https://", relfile.read())
self.assertPostCondition()
@override_settings(
STATICFILES_DIRS=[os.path.join(TEST_ROOT, "project", "loop")],
STATICFILES_FINDERS=["django.contrib.staticfiles.finders.FileSystemFinder"],
)
def test_import_loop(self):
finders.get_finder.cache_clear()
err = StringIO()
with self.assertRaisesMessage(RuntimeError, "Max post-process passes exceeded"):
call_command("collectstatic", interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'All' failed!\n\n", err.getvalue())
self.assertPostCondition()
def test_post_processing(self):
"""
post_processing behaves correctly.
Files that are alterable should always be post-processed; files that
aren't should be skipped.
collectstatic has already been called once in setUp() for this testcase,
therefore we check by verifying behavior on a second run.
"""
collectstatic_args = {
"interactive": False,
"verbosity": 0,
"link": False,
"clear": False,
"dry_run": False,
"post_process": True,
"use_default_ignore_patterns": True,
"ignore_patterns": ["*.ignoreme"],
}
collectstatic_cmd = CollectstaticCommand()
collectstatic_cmd.set_options(**collectstatic_args)
stats = collectstatic_cmd.collect()
self.assertIn(
os.path.join("cached", "css", "window.css"), stats["post_processed"]
)
self.assertIn(
os.path.join("cached", "css", "img", "window.png"), stats["unmodified"]
)
self.assertIn(os.path.join("test", "nonascii.css"), stats["post_processed"])
# No file should be yielded twice.
self.assertCountEqual(stats["post_processed"], set(stats["post_processed"]))
self.assertPostCondition()
def test_css_import_case_insensitive(self):
relpath = self.hashed_file_path("cached/styles_insensitive.css")
self.assertEqual(relpath, "cached/styles_insensitive.3fa427592a53.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
self.assertPostCondition()
def test_css_source_map(self):
relpath = self.hashed_file_path("cached/source_map.css")
self.assertEqual(relpath, "cached/source_map.b2fceaf426aa.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/*# sourceMappingURL=source_map.css.map*/", content)
self.assertIn(
b"/*# sourceMappingURL=source_map.css.99914b932bd3.map */",
content,
)
self.assertPostCondition()
def test_css_source_map_tabs(self):
relpath = self.hashed_file_path("cached/source_map_tabs.css")
self.assertEqual(relpath, "cached/source_map_tabs.b2fceaf426aa.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/*#\tsourceMappingURL=source_map.css.map\t*/", content)
self.assertIn(
b"/*# sourceMappingURL=source_map.css.99914b932bd3.map */",
content,
)
self.assertPostCondition()
def test_css_source_map_sensitive(self):
relpath = self.hashed_file_path("cached/source_map_sensitive.css")
self.assertEqual(relpath, "cached/source_map_sensitive.456683f2106f.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b"/*# sOuRcEMaPpInGURL=source_map.css.map */", content)
self.assertNotIn(
b"/*# sourceMappingURL=source_map.css.99914b932bd3.map */",
content,
)
self.assertPostCondition()
def test_js_source_map(self):
relpath = self.hashed_file_path("cached/source_map.js")
self.assertEqual(relpath, "cached/source_map.cd45b8534a87.js")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"//# sourceMappingURL=source_map.js.map", content)
self.assertIn(
b"//# sourceMappingURL=source_map.js.99914b932bd3.map",
content,
)
self.assertPostCondition()
def test_js_source_map_sensitive(self):
relpath = self.hashed_file_path("cached/source_map_sensitive.js")
self.assertEqual(relpath, "cached/source_map_sensitive.5da96fdd3cb3.js")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b"//# sOuRcEMaPpInGURL=source_map.js.map", content)
self.assertNotIn(
b"//# sourceMappingURL=source_map.js.99914b932bd3.map",
content,
)
self.assertPostCondition()
@override_settings(
STATICFILES_DIRS=[os.path.join(TEST_ROOT, "project", "faulty")],
STATICFILES_FINDERS=["django.contrib.staticfiles.finders.FileSystemFinder"],
)
def test_post_processing_failure(self):
"""
post_processing indicates the origin of the error when it fails.
"""
finders.get_finder.cache_clear()
err = StringIO()
with self.assertRaises(Exception):
call_command("collectstatic", interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue())
self.assertPostCondition()
@override_settings(STATICFILES_STORAGE="staticfiles_tests.storage.ExtraPatternsStorage")
class TestExtraPatternsStorage(CollectionTestCase):
def setUp(self):
storage.staticfiles_storage.hashed_files.clear() # avoid cache interference
super().setUp()
def cached_file_path(self, path):
fullpath = self.render_template(self.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, "")
def test_multi_extension_patterns(self):
"""
With storage classes having several file extension patterns, only the
files matching a specific file pattern should be affected by the
substitution (#19670).
"""
# CSS files shouldn't be touched by JS patterns.
relpath = self.cached_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.f53576679e5a.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b'import url("styles.5e0040571e1a.css")', relfile.read())
# Confirm JS patterns have been applied to JS files.
relpath = self.cached_file_path("cached/test.js")
self.assertEqual(relpath, "cached/test.388d7a790d46.js")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b'JS_URL("import.f53576679e5a.css")', relfile.read())
@override_settings(
STATICFILES_STORAGE="django.contrib.staticfiles.storage.ManifestStaticFilesStorage",
)
class TestCollectionManifestStorage(TestHashedFiles, CollectionTestCase):
"""
Tests for the Cache busting storage
"""
def setUp(self):
super().setUp()
temp_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(temp_dir, "test"))
self._clear_filename = os.path.join(temp_dir, "test", "cleared.txt")
with open(self._clear_filename, "w") as f:
f.write("to be deleted in one test")
self.patched_settings = self.settings(
STATICFILES_DIRS=settings.STATICFILES_DIRS + [temp_dir],
)
self.patched_settings.enable()
self.addCleanup(shutil.rmtree, temp_dir)
self._manifest_strict = storage.staticfiles_storage.manifest_strict
def tearDown(self):
self.patched_settings.disable()
if os.path.exists(self._clear_filename):
os.unlink(self._clear_filename)
storage.staticfiles_storage.manifest_strict = self._manifest_strict
super().tearDown()
def assertPostCondition(self):
hashed_files = storage.staticfiles_storage.hashed_files
# The in-memory version of the manifest matches the one on disk
# since a properly created manifest should cover all filenames.
if hashed_files:
manifest = storage.staticfiles_storage.load_manifest()
self.assertEqual(hashed_files, manifest)
def test_manifest_exists(self):
filename = storage.staticfiles_storage.manifest_name
path = storage.staticfiles_storage.path(filename)
self.assertTrue(os.path.exists(path))
def test_manifest_does_not_exist(self):
storage.staticfiles_storage.manifest_name = "does.not.exist.json"
self.assertIsNone(storage.staticfiles_storage.read_manifest())
def test_manifest_does_not_ignore_permission_error(self):
with mock.patch("builtins.open", side_effect=PermissionError):
with self.assertRaises(PermissionError):
storage.staticfiles_storage.read_manifest()
def test_loaded_cache(self):
self.assertNotEqual(storage.staticfiles_storage.hashed_files, {})
manifest_content = storage.staticfiles_storage.read_manifest()
self.assertIn(
'"version": "%s"' % storage.staticfiles_storage.manifest_version,
manifest_content,
)
def test_parse_cache(self):
hashed_files = storage.staticfiles_storage.hashed_files
manifest = storage.staticfiles_storage.load_manifest()
self.assertEqual(hashed_files, manifest)
def test_clear_empties_manifest(self):
cleared_file_name = storage.staticfiles_storage.clean_name(
os.path.join("test", "cleared.txt")
)
# collect the additional file
self.run_collectstatic()
hashed_files = storage.staticfiles_storage.hashed_files
self.assertIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertIn(cleared_file_name, manifest_content)
original_path = storage.staticfiles_storage.path(cleared_file_name)
self.assertTrue(os.path.exists(original_path))
# delete the original file form the app, collect with clear
os.unlink(self._clear_filename)
self.run_collectstatic(clear=True)
self.assertFileNotFound(original_path)
hashed_files = storage.staticfiles_storage.hashed_files
self.assertNotIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertNotIn(cleared_file_name, manifest_content)
def test_missing_entry(self):
missing_file_name = "cached/missing.css"
configured_storage = storage.staticfiles_storage
self.assertNotIn(missing_file_name, configured_storage.hashed_files)
# File name not found in manifest
with self.assertRaisesMessage(
ValueError,
"Missing staticfiles manifest entry for '%s'" % missing_file_name,
):
self.hashed_file_path(missing_file_name)
configured_storage.manifest_strict = False
# File doesn't exist on disk
err_msg = "The file '%s' could not be found with %r." % (
missing_file_name,
configured_storage._wrapped,
)
with self.assertRaisesMessage(ValueError, err_msg):
self.hashed_file_path(missing_file_name)
content = StringIO()
content.write("Found")
configured_storage.save(missing_file_name, content)
# File exists on disk
self.hashed_file_path(missing_file_name)
def test_intermediate_files(self):
cached_files = os.listdir(os.path.join(settings.STATIC_ROOT, "cached"))
# Intermediate files shouldn't be created for reference.
self.assertEqual(
len(
[
cached_file
for cached_file in cached_files
if cached_file.startswith("relative.")
]
),
2,
)
@override_settings(STATICFILES_STORAGE="staticfiles_tests.storage.NoneHashStorage")
class TestCollectionNoneHashStorage(CollectionTestCase):
hashed_file_path = hashed_file_path
def test_hashed_name(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.css")
@override_settings(
STATICFILES_STORAGE="staticfiles_tests.storage.NoPostProcessReplacedPathStorage"
)
class TestCollectionNoPostProcessReplacedPaths(CollectionTestCase):
run_collectstatic_in_setUp = False
def test_collectstatistic_no_post_process_replaced_paths(self):
stdout = StringIO()
self.run_collectstatic(verbosity=1, stdout=stdout)
self.assertIn("post-processed", stdout.getvalue())
@override_settings(STATICFILES_STORAGE="staticfiles_tests.storage.SimpleStorage")
class TestCollectionSimpleStorage(CollectionTestCase):
hashed_file_path = hashed_file_path
def setUp(self):
storage.staticfiles_storage.hashed_files.clear() # avoid cache interference
super().setUp()
def test_template_tag_return(self):
self.assertStaticRaises(
ValueError, "does/not/exist.png", "/static/does/not/exist.png"
)
self.assertStaticRenders("test/file.txt", "/static/test/file.deploy12345.txt")
self.assertStaticRenders(
"cached/styles.css", "/static/cached/styles.deploy12345.css"
)
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.deploy12345.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.deploy12345.css", content)
class CustomManifestStorage(storage.ManifestStaticFilesStorage):
def __init__(self, *args, manifest_storage=None, **kwargs):
manifest_storage = storage.StaticFilesStorage(
location=kwargs.pop("manifest_location"),
)
super().__init__(*args, manifest_storage=manifest_storage, **kwargs)
class TestCustomManifestStorage(SimpleTestCase):
def setUp(self):
self.manifest_path = Path(tempfile.mkdtemp())
self.addCleanup(shutil.rmtree, self.manifest_path)
self.staticfiles_storage = CustomManifestStorage(
manifest_location=self.manifest_path,
)
self.manifest_file = self.manifest_path / self.staticfiles_storage.manifest_name
# Manifest without paths.
self.manifest = {"version": self.staticfiles_storage.manifest_version}
with self.manifest_file.open("w") as manifest_file:
json.dump(self.manifest, manifest_file)
def test_read_manifest(self):
self.assertEqual(
self.staticfiles_storage.read_manifest(),
json.dumps(self.manifest),
)
def test_read_manifest_nonexistent(self):
os.remove(self.manifest_file)
self.assertIsNone(self.staticfiles_storage.read_manifest())
def test_save_manifest_override(self):
self.assertIs(self.manifest_file.exists(), True)
self.staticfiles_storage.save_manifest()
self.assertIs(self.manifest_file.exists(), True)
new_manifest = json.loads(self.staticfiles_storage.read_manifest())
self.assertIn("paths", new_manifest)
self.assertNotEqual(new_manifest, self.manifest)
def test_save_manifest_create(self):
os.remove(self.manifest_file)
self.staticfiles_storage.save_manifest()
self.assertIs(self.manifest_file.exists(), True)
new_manifest = json.loads(self.staticfiles_storage.read_manifest())
self.assertIn("paths", new_manifest)
self.assertNotEqual(new_manifest, self.manifest)
class CustomStaticFilesStorage(storage.StaticFilesStorage):
"""
Used in TestStaticFilePermissions
"""
def __init__(self, *args, **kwargs):
kwargs["file_permissions_mode"] = 0o640
kwargs["directory_permissions_mode"] = 0o740
super().__init__(*args, **kwargs)
@unittest.skipIf(sys.platform == "win32", "Windows only partially supports chmod.")
class TestStaticFilePermissions(CollectionTestCase):
command_params = {
"interactive": False,
"verbosity": 0,
"ignore_patterns": ["*.ignoreme"],
}
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
super().setUp()
def tearDown(self):
os.umask(self.old_umask)
super().tearDown()
# Don't run collectstatic command in this test class.
def run_collectstatic(self, **kwargs):
pass
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
)
def test_collect_static_files_permissions(self):
call_command("collectstatic", **self.command_params)
static_root = Path(settings.STATIC_ROOT)
test_file = static_root / "test.txt"
file_mode = test_file.stat().st_mode & 0o777
self.assertEqual(file_mode, 0o655)
tests = [
static_root / "subdir",
static_root / "nested",
static_root / "nested" / "css",
]
for directory in tests:
with self.subTest(directory=directory):
dir_mode = directory.stat().st_mode & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(
FILE_UPLOAD_PERMISSIONS=None,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=None,
)
def test_collect_static_files_default_permissions(self):
call_command("collectstatic", **self.command_params)
static_root = Path(settings.STATIC_ROOT)
test_file = static_root / "test.txt"
file_mode = test_file.stat().st_mode & 0o777
self.assertEqual(file_mode, 0o666 & ~self.umask)
tests = [
static_root / "subdir",
static_root / "nested",
static_root / "nested" / "css",
]
for directory in tests:
with self.subTest(directory=directory):
dir_mode = directory.stat().st_mode & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
STATICFILES_STORAGE="staticfiles_tests.test_storage.CustomStaticFilesStorage",
)
def test_collect_static_files_subclass_of_static_storage(self):
call_command("collectstatic", **self.command_params)
static_root = Path(settings.STATIC_ROOT)
test_file = static_root / "test.txt"
file_mode = test_file.stat().st_mode & 0o777
self.assertEqual(file_mode, 0o640)
tests = [
static_root / "subdir",
static_root / "nested",
static_root / "nested" / "css",
]
for directory in tests:
with self.subTest(directory=directory):
dir_mode = directory.stat().st_mode & 0o777
self.assertEqual(dir_mode, 0o740)
@override_settings(
STATICFILES_STORAGE="django.contrib.staticfiles.storage.ManifestStaticFilesStorage",
)
class TestCollectionHashedFilesCache(CollectionTestCase):
"""
Files referenced from CSS use the correct final hashed name regardless of
the order in which the files are post-processed.
"""
hashed_file_path = hashed_file_path
def setUp(self):
super().setUp()
self._temp_dir = temp_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(temp_dir, "test"))
self.addCleanup(shutil.rmtree, temp_dir)
def _get_filename_path(self, filename):
return os.path.join(self._temp_dir, "test", filename)
def test_file_change_after_collectstatic(self):
# Create initial static files.
file_contents = (
("foo.png", "foo"),
("bar.css", 'url("foo.png")\nurl("xyz.png")'),
("xyz.png", "xyz"),
)
for filename, content in file_contents:
with open(self._get_filename_path(filename), "w") as f:
f.write(content)
with self.modify_settings(STATICFILES_DIRS={"append": self._temp_dir}):
finders.get_finder.cache_clear()
err = StringIO()
# First collectstatic run.
call_command("collectstatic", interactive=False, verbosity=0, stderr=err)
relpath = self.hashed_file_path("test/bar.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b"foo.acbd18db4cc2.png", content)
self.assertIn(b"xyz.d16fb36f0911.png", content)
# Change the contents of the png files.
for filename in ("foo.png", "xyz.png"):
with open(self._get_filename_path(filename), "w+b") as f:
f.write(b"new content of file to change its hash")
# The hashes of the png files in the CSS file are updated after
# a second collectstatic.
call_command("collectstatic", interactive=False, verbosity=0, stderr=err)
relpath = self.hashed_file_path("test/bar.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b"foo.57a5cb9ba68d.png", content)
self.assertIn(b"xyz.57a5cb9ba68d.png", content)
|
762aba01f536600ff746754e54c831fa6f02dbb500d5645624203e1103f30db7 | import time
from datetime import datetime, timedelta
from http import cookies
from django.http import HttpResponse
from django.test import SimpleTestCase
from django.test.utils import freeze_time
from django.utils.http import http_date
from django.utils.timezone import utc
class SetCookieTests(SimpleTestCase):
def test_near_expiration(self):
"""Cookie will expire when a near expiration time is provided."""
response = HttpResponse()
# There's a timing weakness in this test; The expected result for
# max-age requires that there be a very slight difference between the
# evaluated expiration time and the time evaluated in set_cookie(). If
# this difference doesn't exist, the cookie time will be 1 second
# larger. The sleep guarantees that there will be a time difference.
expires = datetime.now(tz=utc).replace(tzinfo=None) + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie("datetime", expires=expires)
datetime_cookie = response.cookies["datetime"]
self.assertEqual(datetime_cookie["max-age"], 10)
def test_aware_expiration(self):
"""set_cookie() accepts an aware datetime as expiration time."""
response = HttpResponse()
expires = datetime.now(tz=utc) + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie("datetime", expires=expires)
datetime_cookie = response.cookies["datetime"]
self.assertEqual(datetime_cookie["max-age"], 10)
def test_create_cookie_after_deleting_cookie(self):
"""Setting a cookie after deletion clears the expiry date."""
response = HttpResponse()
response.set_cookie("c", "old-value")
self.assertEqual(response.cookies["c"]["expires"], "")
response.delete_cookie("c")
self.assertEqual(
response.cookies["c"]["expires"], "Thu, 01 Jan 1970 00:00:00 GMT"
)
response.set_cookie("c", "new-value")
self.assertEqual(response.cookies["c"]["expires"], "")
def test_far_expiration(self):
"""Cookie will expire when a distant expiration time is provided."""
response = HttpResponse()
response.set_cookie("datetime", expires=datetime(2038, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies["datetime"]
self.assertIn(
datetime_cookie["expires"],
# assertIn accounts for slight time dependency (#23450)
("Fri, 01 Jan 2038 04:05:06 GMT", "Fri, 01 Jan 2038 04:05:07 GMT"),
)
def test_max_age_expiration(self):
"""Cookie will expire if max_age is provided."""
response = HttpResponse()
set_cookie_time = time.time()
with freeze_time(set_cookie_time):
response.set_cookie("max_age", max_age=10)
max_age_cookie = response.cookies["max_age"]
self.assertEqual(max_age_cookie["max-age"], 10)
self.assertEqual(max_age_cookie["expires"], http_date(set_cookie_time + 10))
def test_max_age_int(self):
response = HttpResponse()
response.set_cookie("max_age", max_age=10.6)
self.assertEqual(response.cookies["max_age"]["max-age"], 10)
def test_max_age_timedelta(self):
response = HttpResponse()
response.set_cookie("max_age", max_age=timedelta(hours=1))
self.assertEqual(response.cookies["max_age"]["max-age"], 3600)
def test_max_age_with_expires(self):
response = HttpResponse()
msg = "'expires' and 'max_age' can't be used together."
with self.assertRaisesMessage(ValueError, msg):
response.set_cookie(
"max_age", expires=datetime(2000, 1, 1), max_age=timedelta(hours=1)
)
def test_httponly_cookie(self):
response = HttpResponse()
response.set_cookie("example", httponly=True)
example_cookie = response.cookies["example"]
self.assertIn(
"; %s" % cookies.Morsel._reserved["httponly"], str(example_cookie)
)
self.assertIs(example_cookie["httponly"], True)
def test_unicode_cookie(self):
"""HttpResponse.set_cookie() works with Unicode data."""
response = HttpResponse()
cookie_value = "清風"
response.set_cookie("test", cookie_value)
self.assertEqual(response.cookies["test"].value, cookie_value)
def test_samesite(self):
response = HttpResponse()
response.set_cookie("example", samesite="None")
self.assertEqual(response.cookies["example"]["samesite"], "None")
response.set_cookie("example", samesite="Lax")
self.assertEqual(response.cookies["example"]["samesite"], "Lax")
response.set_cookie("example", samesite="strict")
self.assertEqual(response.cookies["example"]["samesite"], "strict")
def test_invalid_samesite(self):
msg = 'samesite must be "lax", "none", or "strict".'
with self.assertRaisesMessage(ValueError, msg):
HttpResponse().set_cookie("example", samesite="invalid")
class DeleteCookieTests(SimpleTestCase):
def test_default(self):
response = HttpResponse()
response.delete_cookie("c")
cookie = response.cookies["c"]
self.assertEqual(cookie["expires"], "Thu, 01 Jan 1970 00:00:00 GMT")
self.assertEqual(cookie["max-age"], 0)
self.assertEqual(cookie["path"], "/")
self.assertEqual(cookie["secure"], "")
self.assertEqual(cookie["domain"], "")
self.assertEqual(cookie["samesite"], "")
def test_delete_cookie_secure_prefix(self):
"""
delete_cookie() sets the secure flag if the cookie name starts with
__Host- or __Secure- (without that, browsers ignore cookies with those
prefixes).
"""
response = HttpResponse()
for prefix in ("Secure", "Host"):
with self.subTest(prefix=prefix):
cookie_name = "__%s-c" % prefix
response.delete_cookie(cookie_name)
self.assertIs(response.cookies[cookie_name]["secure"], True)
def test_delete_cookie_secure_samesite_none(self):
# delete_cookie() sets the secure flag if samesite='none'.
response = HttpResponse()
response.delete_cookie("c", samesite="none")
self.assertIs(response.cookies["c"]["secure"], True)
def test_delete_cookie_samesite(self):
response = HttpResponse()
response.delete_cookie("c", samesite="lax")
self.assertEqual(response.cookies["c"]["samesite"], "lax")
|
071aa4cdead4fc0918350f97ba80486837a6e073ede593c3781b2ef24da231f2 | import io
import itertools
import os
import sys
import tempfile
from unittest import skipIf
from django.core.files.base import ContentFile
from django.http import FileResponse
from django.test import SimpleTestCase
class UnseekableBytesIO(io.BytesIO):
def seekable(self):
return False
class FileResponseTests(SimpleTestCase):
def test_content_length_file(self):
response = FileResponse(open(__file__, "rb"))
response.close()
self.assertEqual(
response.headers["Content-Length"], str(os.path.getsize(__file__))
)
def test_content_length_buffer(self):
response = FileResponse(io.BytesIO(b"binary content"))
self.assertEqual(response.headers["Content-Length"], "14")
def test_content_length_nonzero_starting_position_file(self):
file = open(__file__, "rb")
file.seek(10)
response = FileResponse(file)
response.close()
self.assertEqual(
response.headers["Content-Length"], str(os.path.getsize(__file__) - 10)
)
def test_content_length_nonzero_starting_position_buffer(self):
test_tuples = (
("BytesIO", io.BytesIO),
("UnseekableBytesIO", UnseekableBytesIO),
)
for buffer_class_name, BufferClass in test_tuples:
with self.subTest(buffer_class_name=buffer_class_name):
buffer = BufferClass(b"binary content")
buffer.seek(10)
response = FileResponse(buffer)
self.assertEqual(response.headers["Content-Length"], "4")
def test_content_length_nonzero_starting_position_file_seekable_no_tell(self):
class TestFile:
def __init__(self, path, *args, **kwargs):
self._file = open(path, *args, **kwargs)
def read(self, n_bytes=-1):
return self._file.read(n_bytes)
def seek(self, offset, whence=io.SEEK_SET):
return self._file.seek(offset, whence)
def seekable(self):
return True
@property
def name(self):
return self._file.name
def close(self):
if self._file:
self._file.close()
self._file = None
def __enter__(self):
return self
def __exit__(self, e_type, e_val, e_tb):
self.close()
file = TestFile(__file__, "rb")
file.seek(10)
response = FileResponse(file)
response.close()
self.assertEqual(
response.headers["Content-Length"], str(os.path.getsize(__file__) - 10)
)
def test_content_type_file(self):
response = FileResponse(open(__file__, "rb"))
response.close()
self.assertIn(response.headers["Content-Type"], ["text/x-python", "text/plain"])
def test_content_type_buffer(self):
response = FileResponse(io.BytesIO(b"binary content"))
self.assertEqual(response.headers["Content-Type"], "application/octet-stream")
def test_content_type_buffer_explicit(self):
response = FileResponse(
io.BytesIO(b"binary content"), content_type="video/webm"
)
self.assertEqual(response.headers["Content-Type"], "video/webm")
def test_content_type_buffer_explicit_default(self):
response = FileResponse(
io.BytesIO(b"binary content"), content_type="text/html; charset=utf-8"
)
self.assertEqual(response.headers["Content-Type"], "text/html; charset=utf-8")
def test_content_type_buffer_named(self):
test_tuples = (
(__file__, ["text/x-python", "text/plain"]),
(__file__ + "nosuchfile", ["application/octet-stream"]),
("test_fileresponse.py", ["text/x-python", "text/plain"]),
("test_fileresponse.pynosuchfile", ["application/octet-stream"]),
)
for filename, content_types in test_tuples:
with self.subTest(filename=filename):
buffer = io.BytesIO(b"binary content")
buffer.name = filename
response = FileResponse(buffer)
self.assertIn(response.headers["Content-Type"], content_types)
def test_content_disposition_file(self):
filenames = (
("", "test_fileresponse.py"),
("custom_name.py", "custom_name.py"),
)
dispositions = (
(False, "inline"),
(True, "attachment"),
)
for (filename, header_filename), (
as_attachment,
header_disposition,
) in itertools.product(filenames, dispositions):
with self.subTest(filename=filename, disposition=header_disposition):
response = FileResponse(
open(__file__, "rb"), filename=filename, as_attachment=as_attachment
)
response.close()
self.assertEqual(
response.headers["Content-Disposition"],
'%s; filename="%s"' % (header_disposition, header_filename),
)
def test_content_disposition_buffer(self):
response = FileResponse(io.BytesIO(b"binary content"))
self.assertFalse(response.has_header("Content-Disposition"))
def test_content_disposition_buffer_attachment(self):
response = FileResponse(io.BytesIO(b"binary content"), as_attachment=True)
self.assertEqual(response.headers["Content-Disposition"], "attachment")
def test_content_disposition_buffer_explicit_filename(self):
dispositions = (
(False, "inline"),
(True, "attachment"),
)
for as_attachment, header_disposition in dispositions:
response = FileResponse(
io.BytesIO(b"binary content"),
as_attachment=as_attachment,
filename="custom_name.py",
)
self.assertEqual(
response.headers["Content-Disposition"],
'%s; filename="custom_name.py"' % header_disposition,
)
def test_response_buffer(self):
response = FileResponse(io.BytesIO(b"binary content"))
self.assertEqual(list(response), [b"binary content"])
def test_response_nonzero_starting_position(self):
test_tuples = (
("BytesIO", io.BytesIO),
("UnseekableBytesIO", UnseekableBytesIO),
)
for buffer_class_name, BufferClass in test_tuples:
with self.subTest(buffer_class_name=buffer_class_name):
buffer = BufferClass(b"binary content")
buffer.seek(10)
response = FileResponse(buffer)
self.assertEqual(list(response), [b"tent"])
def test_buffer_explicit_absolute_filename(self):
"""
Headers are set correctly with a buffer when an absolute filename is
provided.
"""
response = FileResponse(io.BytesIO(b"binary content"), filename=__file__)
self.assertEqual(response.headers["Content-Length"], "14")
self.assertEqual(
response.headers["Content-Disposition"],
'inline; filename="test_fileresponse.py"',
)
@skipIf(sys.platform == "win32", "Named pipes are Unix-only.")
def test_file_from_named_pipe_response(self):
with tempfile.TemporaryDirectory() as temp_dir:
pipe_file = os.path.join(temp_dir, "named_pipe")
os.mkfifo(pipe_file)
pipe_for_read = os.open(pipe_file, os.O_RDONLY | os.O_NONBLOCK)
with open(pipe_file, "wb") as pipe_for_write:
pipe_for_write.write(b"binary content")
response = FileResponse(os.fdopen(pipe_for_read, mode="rb"))
response_content = list(response)
response.close()
self.assertEqual(response_content, [b"binary content"])
self.assertFalse(response.has_header("Content-Length"))
def test_compressed_response(self):
"""
If compressed responses are served with the uncompressed Content-Type
and a compression Content-Encoding, browsers might automatically
uncompress the file, which is most probably not wanted.
"""
test_tuples = (
(".tar.gz", "application/gzip"),
(".tar.bz2", "application/x-bzip"),
(".tar.xz", "application/x-xz"),
)
for extension, mimetype in test_tuples:
with self.subTest(ext=extension):
with tempfile.NamedTemporaryFile(suffix=extension) as tmp:
response = FileResponse(tmp)
self.assertEqual(response.headers["Content-Type"], mimetype)
self.assertFalse(response.has_header("Content-Encoding"))
def test_unicode_attachment(self):
response = FileResponse(
ContentFile(b"binary content", name="祝您平安.odt"),
as_attachment=True,
content_type="application/vnd.oasis.opendocument.text",
)
self.assertEqual(
response.headers["Content-Type"],
"application/vnd.oasis.opendocument.text",
)
self.assertEqual(
response.headers["Content-Disposition"],
"attachment; filename*=utf-8''%E7%A5%9D%E6%82%A8%E5%B9%B3%E5%AE%89.odt",
)
def test_repr(self):
response = FileResponse(io.BytesIO(b"binary content"))
self.assertEqual(
repr(response),
'<FileResponse status_code=200, "application/octet-stream">',
)
|
8f172b2dfabd14e64c4c8837f9688ed2a1dde64c0c71923e73dcdbefae04d04e | from importlib import import_module
from django.apps import apps
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.db import DEFAULT_DB_ALIAS, connections
from django.test import TransactionTestCase
remove_content_type_name = import_module(
"django.contrib.contenttypes.migrations.0002_remove_content_type_name"
)
class MultiDBRemoveContentTypeNameTests(TransactionTestCase):
databases = {"default", "other"}
available_apps = ["django.contrib.auth", "django.contrib.contenttypes"]
def test_add_legacy_name_other_database(self):
# add_legacy_name() should update ContentType objects in the specified
# database. Remove ContentTypes from the default database to distinct
# from which database they are fetched.
Permission.objects.all().delete()
ContentType.objects.all().delete()
# ContentType.name in the current version is a property and cannot be
# set, so an AttributeError is raised with the other database.
with self.assertRaises(AttributeError):
with connections["other"].schema_editor() as editor:
remove_content_type_name.add_legacy_name(apps, editor)
# ContentType were removed from the default database.
with connections[DEFAULT_DB_ALIAS].schema_editor() as editor:
remove_content_type_name.add_legacy_name(apps, editor)
|
df1a0637358bf90fcd69d6b81faf71eecd685d4edb83bea167cc731836662c8f | import datetime
import sys
import unittest
from pathlib import Path
from unittest import mock
from urllib.parse import quote_plus
from django.test import SimpleTestCase
from django.utils.encoding import (
DjangoUnicodeDecodeError,
escape_uri_path,
filepath_to_uri,
force_bytes,
force_str,
get_system_encoding,
iri_to_uri,
repercent_broken_unicode,
smart_bytes,
smart_str,
uri_to_iri,
)
from django.utils.functional import SimpleLazyObject
from django.utils.translation import gettext_lazy
class TestEncodingUtils(SimpleTestCase):
def test_force_str_exception(self):
"""
Broken __str__ actually raises an error.
"""
class MyString:
def __str__(self):
return b"\xc3\xb6\xc3\xa4\xc3\xbc"
# str(s) raises a TypeError if the result is not a text type.
with self.assertRaises(TypeError):
force_str(MyString())
def test_force_str_lazy(self):
s = SimpleLazyObject(lambda: "x")
self.assertIs(type(force_str(s)), str)
def test_force_str_DjangoUnicodeDecodeError(self):
msg = (
"'utf-8' codec can't decode byte 0xff in position 0: invalid "
"start byte. You passed in b'\\xff' (<class 'bytes'>)"
)
with self.assertRaisesMessage(DjangoUnicodeDecodeError, msg):
force_str(b"\xff")
def test_force_bytes_exception(self):
"""
force_bytes knows how to convert to bytes an exception
containing non-ASCII characters in its args.
"""
error_msg = "This is an exception, voilà"
exc = ValueError(error_msg)
self.assertEqual(force_bytes(exc), error_msg.encode())
self.assertEqual(
force_bytes(exc, encoding="ascii", errors="ignore"),
b"This is an exception, voil",
)
def test_force_bytes_strings_only(self):
today = datetime.date.today()
self.assertEqual(force_bytes(today, strings_only=True), today)
def test_force_bytes_encoding(self):
error_msg = "This is an exception, voilà".encode()
result = force_bytes(error_msg, encoding="ascii", errors="ignore")
self.assertEqual(result, b"This is an exception, voil")
def test_force_bytes_memory_view(self):
data = b"abc"
result = force_bytes(memoryview(data))
# Type check is needed because memoryview(bytes) == bytes.
self.assertIs(type(result), bytes)
self.assertEqual(result, data)
def test_smart_bytes(self):
class Test:
def __str__(self):
return "ŠĐĆŽćžšđ"
lazy_func = gettext_lazy("x")
self.assertIs(smart_bytes(lazy_func), lazy_func)
self.assertEqual(
smart_bytes(Test()),
b"\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91",
)
self.assertEqual(smart_bytes(1), b"1")
self.assertEqual(smart_bytes("foo"), b"foo")
def test_smart_str(self):
class Test:
def __str__(self):
return "ŠĐĆŽćžšđ"
lazy_func = gettext_lazy("x")
self.assertIs(smart_str(lazy_func), lazy_func)
self.assertEqual(
smart_str(Test()), "\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111"
)
self.assertEqual(smart_str(1), "1")
self.assertEqual(smart_str("foo"), "foo")
def test_get_default_encoding(self):
with mock.patch("locale.getlocale", side_effect=Exception):
self.assertEqual(get_system_encoding(), "ascii")
def test_repercent_broken_unicode_recursion_error(self):
# Prepare a string long enough to force a recursion error if the tested
# function uses recursion.
data = b"\xfc" * sys.getrecursionlimit()
try:
self.assertEqual(
repercent_broken_unicode(data), b"%FC" * sys.getrecursionlimit()
)
except RecursionError:
self.fail("Unexpected RecursionError raised.")
class TestRFC3987IEncodingUtils(unittest.TestCase):
def test_filepath_to_uri(self):
self.assertIsNone(filepath_to_uri(None))
self.assertEqual(
filepath_to_uri("upload\\чубака.mp4"),
"upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4",
)
self.assertEqual(filepath_to_uri(Path("upload/test.png")), "upload/test.png")
self.assertEqual(filepath_to_uri(Path("upload\\test.png")), "upload/test.png")
def test_iri_to_uri(self):
cases = [
# Valid UTF-8 sequences are encoded.
("red%09rosé#red", "red%09ros%C3%A9#red"),
("/blog/for/Jürgen Münster/", "/blog/for/J%C3%BCrgen%20M%C3%BCnster/"),
(
"locations/%s" % quote_plus("Paris & Orléans"),
"locations/Paris+%26+Orl%C3%A9ans",
),
# Reserved chars remain unescaped.
("%&", "%&"),
("red&♥ros%#red", "red&%E2%99%A5ros%#red"),
(gettext_lazy("red&♥ros%#red"), "red&%E2%99%A5ros%#red"),
]
for iri, uri in cases:
with self.subTest(iri):
self.assertEqual(iri_to_uri(iri), uri)
# Test idempotency.
self.assertEqual(iri_to_uri(iri_to_uri(iri)), uri)
def test_uri_to_iri(self):
cases = [
(None, None),
# Valid UTF-8 sequences are decoded.
("/%e2%89%Ab%E2%99%a5%E2%89%aB/", "/≫♥≫/"),
("/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93", "/♥♥/?utf8=✓"),
("/%41%5a%6B/", "/AZk/"),
# Reserved and non-URL valid ASCII chars are not decoded.
("/%25%20%02%41%7b/", "/%25%20%02A%7b/"),
# Broken UTF-8 sequences remain escaped.
("/%AAd%AAj%AAa%AAn%AAg%AAo%AA/", "/%AAd%AAj%AAa%AAn%AAg%AAo%AA/"),
("/%E2%99%A5%E2%E2%99%A5/", "/♥%E2♥/"),
("/%E2%99%A5%E2%99%E2%99%A5/", "/♥%E2%99♥/"),
("/%E2%E2%99%A5%E2%99%A5%99/", "/%E2♥♥%99/"),
(
"/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93",
"/♥♥/?utf8=%9C%93✓%9C%93",
),
]
for uri, iri in cases:
with self.subTest(uri):
self.assertEqual(uri_to_iri(uri), iri)
# Test idempotency.
self.assertEqual(uri_to_iri(uri_to_iri(uri)), iri)
def test_complementarity(self):
cases = [
(
"/blog/for/J%C3%BCrgen%20M%C3%BCnster/",
"/blog/for/J\xfcrgen%20M\xfcnster/",
),
("%&", "%&"),
("red&%E2%99%A5ros%#red", "red&♥ros%#red"),
("/%E2%99%A5%E2%99%A5/", "/♥♥/"),
("/%E2%99%A5%E2%99%A5/?utf8=%E2%9C%93", "/♥♥/?utf8=✓"),
("/%25%20%02%7b/", "/%25%20%02%7b/"),
("/%AAd%AAj%AAa%AAn%AAg%AAo%AA/", "/%AAd%AAj%AAa%AAn%AAg%AAo%AA/"),
("/%E2%99%A5%E2%E2%99%A5/", "/♥%E2♥/"),
("/%E2%99%A5%E2%99%E2%99%A5/", "/♥%E2%99♥/"),
("/%E2%E2%99%A5%E2%99%A5%99/", "/%E2♥♥%99/"),
(
"/%E2%99%A5%E2%99%A5/?utf8=%9C%93%E2%9C%93%9C%93",
"/♥♥/?utf8=%9C%93✓%9C%93",
),
]
for uri, iri in cases:
with self.subTest(uri):
self.assertEqual(iri_to_uri(uri_to_iri(uri)), uri)
self.assertEqual(uri_to_iri(iri_to_uri(iri)), iri)
def test_escape_uri_path(self):
cases = [
(
"/;some/=awful/?path/:with/@lots/&of/+awful/chars",
"/%3Bsome/%3Dawful/%3Fpath/:with/@lots/&of/+awful/chars",
),
("/foo#bar", "/foo%23bar"),
("/foo?bar", "/foo%3Fbar"),
]
for uri, expected in cases:
with self.subTest(uri):
self.assertEqual(escape_uri_path(uri), expected)
|
1fcb693fa020121323a920dac0e37e8fb0e22094955576d6e787f43aafa0cb99 | import os
from datetime import datetime
from django.test import SimpleTestCase
from django.utils.functional import lazystr
from django.utils.html import (
conditional_escape,
escape,
escapejs,
format_html,
html_safe,
json_script,
linebreaks,
smart_urlquote,
strip_spaces_between_tags,
strip_tags,
urlize,
)
from django.utils.safestring import mark_safe
class TestUtilsHtml(SimpleTestCase):
def check_output(self, function, value, output=None):
"""
function(value) equals output. If output is None, function(value)
equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
items = (
("&", "&"),
("<", "<"),
(">", ">"),
('"', """),
("'", "'"),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
with self.subTest(value=value, output=output):
for pattern in patterns:
with self.subTest(value=value, output=output, pattern=pattern):
self.check_output(escape, pattern % value, pattern % output)
self.check_output(
escape, lazystr(pattern % value), pattern % output
)
# Check repeated values.
self.check_output(escape, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(escape, "<&", "<&")
def test_format_html(self):
self.assertEqual(
format_html(
"{} {} {third} {fourth}",
"< Dangerous >",
mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=mark_safe("<i>safe again</i>"),
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>",
)
def test_linebreaks(self):
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
(
"para1\nsub1\rsub2\n\npara2",
"<p>para1<br>sub1<br>sub2</p>\n\n<p>para2</p>",
),
(
"para1\r\n\r\npara2\rsub1\r\rpara4",
"<p>para1</p>\n\n<p>para2<br>sub1</p>\n\n<p>para4</p>",
),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(linebreaks, value, output)
self.check_output(linebreaks, lazystr(value), output)
def test_strip_tags(self):
items = (
(
"<p>See: 'é is an apostrophe followed by e acute</p>",
"See: 'é is an apostrophe followed by e acute",
),
(
"<p>See: 'é is an apostrophe followed by e acute</p>",
"See: 'é is an apostrophe followed by e acute",
),
("<adf>a", "a"),
("</adf>a", "a"),
("<asdf><asdf>e", "e"),
("hi, <f x", "hi, <f x"),
("234<235, right?", "234<235, right?"),
("a4<a5 right?", "a4<a5 right?"),
("b7>b2!", "b7>b2!"),
("</fe", "</fe"),
("<x>b<y>", "b"),
("a<p onclick=\"alert('<test>')\">b</p>c", "abc"),
("a<p a >b</p>c", "abc"),
("d<a:b c:d>e</p>f", "def"),
('<strong>foo</strong><a href="http://example.com">bar</a>', "foobar"),
# caused infinite loop on Pythons not patched with
# https://bugs.python.org/issue20288
("&gotcha&#;<>", "&gotcha&#;<>"),
("<sc<!-- -->ript>test<<!-- -->/script>", "ript>test"),
("<script>alert()</script>&h", "alert()h"),
("><!" + ("&" * 16000) + "D", "><!" + ("&" * 16000) + "D"),
("X<<<<br>br>br>br>X", "XX"),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(strip_tags, value, output)
self.check_output(strip_tags, lazystr(value), output)
def test_strip_tags_files(self):
# Test with more lengthy content (also catching performance regressions)
for filename in ("strip_tags1.html", "strip_tags2.txt"):
with self.subTest(filename=filename):
path = os.path.join(os.path.dirname(__file__), "files", filename)
with open(path) as fp:
content = fp.read()
start = datetime.now()
stripped = strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Test string that has not been stripped.", stripped)
self.assertNotIn("<", stripped)
def test_strip_spaces_between_tags(self):
# Strings that should come out untouched.
items = (" <adf>", "<adf> ", " </adf> ", " <f> x</f>")
for value in items:
with self.subTest(value=value):
self.check_output(strip_spaces_between_tags, value)
self.check_output(strip_spaces_between_tags, lazystr(value))
# Strings that have spaces to strip.
items = (
("<d> </d>", "<d></d>"),
("<p>hello </p>\n<p> world</p>", "<p>hello </p><p> world</p>"),
("\n<p>\t</p>\n<p> </p>\n", "\n<p></p><p></p>\n"),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(strip_spaces_between_tags, value, output)
self.check_output(strip_spaces_between_tags, lazystr(value), output)
def test_escapejs(self):
items = (
(
"\"double quotes\" and 'single quotes'",
"\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027",
),
(r"\ : backslashes, too", "\\u005C : backslashes, too"),
(
"and lots of whitespace: \r\n\t\v\f\b",
"and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008",
),
(
r"<script>and this</script>",
"\\u003Cscript\\u003Eand this\\u003C/script\\u003E",
),
(
"paragraph separator:\u2029and line separator:\u2028",
"paragraph separator:\\u2029and line separator:\\u2028",
),
("`", "\\u0060"),
)
for value, output in items:
with self.subTest(value=value, output=output):
self.check_output(escapejs, value, output)
self.check_output(escapejs, lazystr(value), output)
def test_json_script(self):
tests = (
# "<", ">" and "&" are quoted inside JSON strings
(
(
"&<>",
'<script id="test_id" type="application/json">'
'"\\u0026\\u003C\\u003E"</script>',
)
),
# "<", ">" and "&" are quoted inside JSON objects
(
{"a": "<script>test&ing</script>"},
'<script id="test_id" type="application/json">'
'{"a": "\\u003Cscript\\u003Etest\\u0026ing\\u003C/script\\u003E"}'
"</script>",
),
# Lazy strings are quoted
(
lazystr("&<>"),
'<script id="test_id" type="application/json">"\\u0026\\u003C\\u003E"'
"</script>",
),
(
{"a": lazystr("<script>test&ing</script>")},
'<script id="test_id" type="application/json">'
'{"a": "\\u003Cscript\\u003Etest\\u0026ing\\u003C/script\\u003E"}'
"</script>",
),
)
for arg, expected in tests:
with self.subTest(arg=arg):
self.assertEqual(json_script(arg, "test_id"), expected)
def test_json_script_without_id(self):
self.assertHTMLEqual(
json_script({"key": "value"}),
'<script type="application/json">{"key": "value"}</script>',
)
def test_smart_urlquote(self):
items = (
("http://öäü.com/", "http://xn--4ca9at.com/"),
("http://öäü.com/öäü/", "http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/"),
# Everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered
# safe as per RFC.
(
"http://example.com/path/öäü/",
"http://example.com/path/%C3%B6%C3%A4%C3%BC/",
),
("http://example.com/%C3%B6/ä/", "http://example.com/%C3%B6/%C3%A4/"),
("http://example.com/?x=1&y=2+3&z=", "http://example.com/?x=1&y=2+3&z="),
("http://example.com/?x=<>\"'", "http://example.com/?x=%3C%3E%22%27"),
(
"http://example.com/?q=http://example.com/?x=1%26q=django",
"http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3D"
"django",
),
(
"http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3D"
"django",
"http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3D"
"django",
),
("http://.www.f oo.bar/", "http://.www.f%20oo.bar/"),
)
# IDNs are properly quoted
for value, output in items:
with self.subTest(value=value, output=output):
self.assertEqual(smart_urlquote(value), output)
def test_conditional_escape(self):
s = "<h1>interop</h1>"
self.assertEqual(conditional_escape(s), "<h1>interop</h1>")
self.assertEqual(conditional_escape(mark_safe(s)), s)
self.assertEqual(conditional_escape(lazystr(mark_safe(s))), s)
def test_html_safe(self):
@html_safe
class HtmlClass:
def __str__(self):
return "<h1>I'm a html class!</h1>"
html_obj = HtmlClass()
self.assertTrue(hasattr(HtmlClass, "__html__"))
self.assertTrue(hasattr(html_obj, "__html__"))
self.assertEqual(str(html_obj), html_obj.__html__())
def test_html_safe_subclass(self):
class BaseClass:
def __html__(self):
# defines __html__ on its own
return "some html content"
def __str__(self):
return "some non html content"
@html_safe
class Subclass(BaseClass):
def __str__(self):
# overrides __str__ and is marked as html_safe
return "some html safe content"
subclass_obj = Subclass()
self.assertEqual(str(subclass_obj), subclass_obj.__html__())
def test_html_safe_defines_html_error(self):
msg = "can't apply @html_safe to HtmlClass because it defines __html__()."
with self.assertRaisesMessage(ValueError, msg):
@html_safe
class HtmlClass:
def __html__(self):
return "<h1>I'm a html class!</h1>"
def test_html_safe_doesnt_define_str(self):
msg = "can't apply @html_safe to HtmlClass because it doesn't define __str__()."
with self.assertRaisesMessage(ValueError, msg):
@html_safe
class HtmlClass:
pass
def test_urlize(self):
tests = (
(
"Search for google.com/?q=! and see.",
'Search for <a href="http://google.com/?q=">google.com/?q=</a>! and '
"see.",
),
(
"Search for google.com/?q=1<! and see.",
'Search for <a href="http://google.com/?q=1%3C">google.com/?q=1<'
"</a>! and see.",
),
(
lazystr("Search for google.com/?q=!"),
'Search for <a href="http://google.com/?q=">google.com/?q=</a>!',
),
("[email protected]", '<a href="mailto:[email protected]">[email protected]</a>'),
)
for value, output in tests:
with self.subTest(value=value):
self.assertEqual(urlize(value), output)
def test_urlize_unchanged_inputs(self):
tests = (
("a" + "@a" * 50000) + "a", # simple_email_re catastrophic test
("a" + "." * 1000000) + "a", # trailing_punctuation catastrophic test
"foo@",
"@foo.com",
"[email protected]",
"foo@localhost",
"foo@localhost.",
)
for value in tests:
with self.subTest(value=value):
self.assertEqual(urlize(value), value)
|
6f919466c706d6f9b70eccb193bd172d5e78f7aa2c09010347ad31834bf00071 | from django.db.models import BooleanField, Exists, F, OuterRef, Q
from django.db.models.expressions import RawSQL
from django.test import SimpleTestCase
from .models import Tag
class QTests(SimpleTestCase):
def test_combine_and_empty(self):
q = Q(x=1)
self.assertEqual(q & Q(), q)
self.assertEqual(Q() & q, q)
q = Q(x__in={}.keys())
self.assertEqual(q & Q(), q)
self.assertEqual(Q() & q, q)
def test_combine_and_both_empty(self):
self.assertEqual(Q() & Q(), Q())
def test_combine_or_empty(self):
q = Q(x=1)
self.assertEqual(q | Q(), q)
self.assertEqual(Q() | q, q)
q = Q(x__in={}.keys())
self.assertEqual(q | Q(), q)
self.assertEqual(Q() | q, q)
def test_combine_xor_empty(self):
q = Q(x=1)
self.assertEqual(q ^ Q(), q)
self.assertEqual(Q() ^ q, q)
q = Q(x__in={}.keys())
self.assertEqual(q ^ Q(), q)
self.assertEqual(Q() ^ q, q)
def test_combine_empty_copy(self):
base_q = Q(x=1)
tests = [
base_q | Q(),
Q() | base_q,
base_q & Q(),
Q() & base_q,
base_q ^ Q(),
Q() ^ base_q,
]
for i, q in enumerate(tests):
with self.subTest(i=i):
self.assertEqual(q, base_q)
self.assertIsNot(q, base_q)
def test_combine_or_both_empty(self):
self.assertEqual(Q() | Q(), Q())
def test_combine_xor_both_empty(self):
self.assertEqual(Q() ^ Q(), Q())
def test_combine_not_q_object(self):
obj = object()
q = Q(x=1)
with self.assertRaisesMessage(TypeError, str(obj)):
q | obj
with self.assertRaisesMessage(TypeError, str(obj)):
q & obj
with self.assertRaisesMessage(TypeError, str(obj)):
q ^ obj
def test_combine_negated_boolean_expression(self):
tagged = Tag.objects.filter(category=OuterRef("pk"))
tests = [
Q() & ~Exists(tagged),
Q() | ~Exists(tagged),
Q() ^ ~Exists(tagged),
]
for q in tests:
with self.subTest(q=q):
self.assertIs(q.negated, True)
def test_deconstruct(self):
q = Q(price__gt=F("discounted_price"))
path, args, kwargs = q.deconstruct()
self.assertEqual(path, "django.db.models.Q")
self.assertEqual(args, (("price__gt", F("discounted_price")),))
self.assertEqual(kwargs, {})
def test_deconstruct_negated(self):
q = ~Q(price__gt=F("discounted_price"))
path, args, kwargs = q.deconstruct()
self.assertEqual(args, (("price__gt", F("discounted_price")),))
self.assertEqual(kwargs, {"_negated": True})
def test_deconstruct_or(self):
q1 = Q(price__gt=F("discounted_price"))
q2 = Q(price=F("discounted_price"))
q = q1 | q2
path, args, kwargs = q.deconstruct()
self.assertEqual(
args,
(
("price__gt", F("discounted_price")),
("price", F("discounted_price")),
),
)
self.assertEqual(kwargs, {"_connector": "OR"})
def test_deconstruct_xor(self):
q1 = Q(price__gt=F("discounted_price"))
q2 = Q(price=F("discounted_price"))
q = q1 ^ q2
path, args, kwargs = q.deconstruct()
self.assertEqual(
args,
(
("price__gt", F("discounted_price")),
("price", F("discounted_price")),
),
)
self.assertEqual(kwargs, {"_connector": "XOR"})
def test_deconstruct_and(self):
q1 = Q(price__gt=F("discounted_price"))
q2 = Q(price=F("discounted_price"))
q = q1 & q2
path, args, kwargs = q.deconstruct()
self.assertEqual(
args,
(
("price__gt", F("discounted_price")),
("price", F("discounted_price")),
),
)
self.assertEqual(kwargs, {})
def test_deconstruct_multiple_kwargs(self):
q = Q(price__gt=F("discounted_price"), price=F("discounted_price"))
path, args, kwargs = q.deconstruct()
self.assertEqual(
args,
(
("price", F("discounted_price")),
("price__gt", F("discounted_price")),
),
)
self.assertEqual(kwargs, {})
def test_deconstruct_nested(self):
q = Q(Q(price__gt=F("discounted_price")))
path, args, kwargs = q.deconstruct()
self.assertEqual(args, (Q(price__gt=F("discounted_price")),))
self.assertEqual(kwargs, {})
def test_deconstruct_boolean_expression(self):
expr = RawSQL("1 = 1", BooleanField())
q = Q(expr)
_, args, kwargs = q.deconstruct()
self.assertEqual(args, (expr,))
self.assertEqual(kwargs, {})
def test_reconstruct(self):
q = Q(price__gt=F("discounted_price"))
path, args, kwargs = q.deconstruct()
self.assertEqual(Q(*args, **kwargs), q)
def test_reconstruct_negated(self):
q = ~Q(price__gt=F("discounted_price"))
path, args, kwargs = q.deconstruct()
self.assertEqual(Q(*args, **kwargs), q)
def test_reconstruct_or(self):
q1 = Q(price__gt=F("discounted_price"))
q2 = Q(price=F("discounted_price"))
q = q1 | q2
path, args, kwargs = q.deconstruct()
self.assertEqual(Q(*args, **kwargs), q)
def test_reconstruct_xor(self):
q1 = Q(price__gt=F("discounted_price"))
q2 = Q(price=F("discounted_price"))
q = q1 ^ q2
path, args, kwargs = q.deconstruct()
self.assertEqual(Q(*args, **kwargs), q)
def test_reconstruct_and(self):
q1 = Q(price__gt=F("discounted_price"))
q2 = Q(price=F("discounted_price"))
q = q1 & q2
path, args, kwargs = q.deconstruct()
self.assertEqual(Q(*args, **kwargs), q)
|
187821cbb62330b8723010acd76d1a6b18212a1be076d24cb8a24f1d4ab30f6b | import datetime
import pickle
import sys
import unittest
from operator import attrgetter
from threading import Lock
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import Count, Exists, F, Max, OuterRef, Q
from django.db.models.expressions import RawSQL
from django.db.models.sql.constants import LOUTER
from django.db.models.sql.where import NothingNode, WhereNode
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext, ignore_warnings
from django.utils.deprecation import RemovedInDjango50Warning
from .models import (
FK1,
Annotation,
Article,
Author,
BaseA,
BaseUser,
Book,
CategoryItem,
CategoryRelationship,
Celebrity,
Channel,
Chapter,
Child,
ChildObjectA,
Classroom,
CommonMixedCaseForeignKeys,
Company,
Cover,
CustomPk,
CustomPkTag,
DateTimePK,
Detail,
DumbCategory,
Eaten,
Employment,
ExtraInfo,
Fan,
Food,
Identifier,
Individual,
Item,
Job,
JobResponsibilities,
Join,
LeafA,
LeafB,
LoopX,
LoopZ,
ManagedModel,
Member,
MixedCaseDbColumnCategoryItem,
MixedCaseFieldCategoryItem,
ModelA,
ModelB,
ModelC,
ModelD,
MyObject,
NamedCategory,
Node,
Note,
NullableName,
Number,
ObjectA,
ObjectB,
ObjectC,
OneToOneCategory,
Order,
OrderItem,
Page,
Paragraph,
Person,
Plaything,
PointerA,
Program,
ProxyCategory,
ProxyObjectA,
ProxyObjectB,
Ranking,
Related,
RelatedIndividual,
RelatedObject,
Report,
ReportComment,
ReservedName,
Responsibility,
School,
SharedConnection,
SimpleCategory,
SingleObject,
SpecialCategory,
Staff,
StaffUser,
Student,
Tag,
Task,
Teacher,
Ticket21203Child,
Ticket21203Parent,
Ticket23605A,
Ticket23605B,
Ticket23605C,
TvChef,
Valid,
X,
)
class Queries1Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.nc1 = generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
cls.t2 = Tag.objects.create(name="t2", parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name="t3", parent=cls.t1)
cls.t4 = Tag.objects.create(name="t4", parent=cls.t3)
cls.t5 = Tag.objects.create(name="t5", parent=cls.t3)
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.n2 = Note.objects.create(note="n2", misc="bar", id=2)
cls.n3 = Note.objects.create(note="n3", misc="foo", id=3, negate=False)
cls.ann1 = Annotation.objects.create(name="a1", tag=cls.t1)
cls.ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name="a2", tag=cls.t4)
ann2.notes.add(cls.n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(
info="e2", note=cls.n2, value=41, filterable=False
)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1, value=42)
cls.a1 = Author.objects.create(name="a1", num=1001, extra=e1)
cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1)
cls.a3 = Author.objects.create(name="a3", num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name="a4", num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(
name="one",
created=cls.time1,
modified=cls.time1,
creator=cls.a1,
note=cls.n3,
)
cls.i1.tags.set([cls.t1, cls.t2])
cls.i2 = Item.objects.create(
name="two", created=cls.time2, creator=cls.a2, note=cls.n2
)
cls.i2.tags.set([cls.t1, cls.t3])
cls.i3 = Item.objects.create(
name="three", created=time3, creator=cls.a2, note=cls.n3
)
cls.i4 = Item.objects.create(
name="four", created=time4, creator=cls.a4, note=cls.n3
)
cls.i4.tags.set([cls.t4])
cls.r1 = Report.objects.create(name="r1", creator=cls.a1)
cls.r2 = Report.objects.create(name="r2", creator=cls.a3)
cls.r3 = Report.objects.create(name="r3")
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
cls.c1 = Cover.objects.create(title="first", item=cls.i4)
cls.c2 = Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {"T", "U", "V"})
self.assertIn("v0", str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {"T", "U", "V"})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn("w0", str(qs4.query).lower())
# So, 'U0."id"' is referenced in SELECT and WHERE twice.
self.assertEqual(str(qs4.query).lower().count("u0."), 4)
def test_ticket1050(self):
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=True),
[self.i3],
)
self.assertSequenceEqual(
Item.objects.filter(tags__id__isnull=True),
[self.i3],
)
def test_ticket1801(self):
self.assertSequenceEqual(
Author.objects.filter(item=self.i2),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i3),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
[self.a2],
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).order_by("name"),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(
Q(creator__name="fred") | Q(tags=self.t2)
),
[self.i1],
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), []
)
self.assertSequenceEqual(
Item.objects.filter(
Q(tags=self.t1), Q(creator__name="fred") | Q(tags=self.t2)
),
[],
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertSequenceEqual(list(qs), [self.a2])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertSequenceEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2])
.distinct()
.order_by("name"),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
[self.i2],
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by("name")[:3],
[self.i1, self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2])
.distinct()
.order_by("name")[:3],
[self.i1, self.i2],
)
def test_tickets_2080_3592(self):
self.assertSequenceEqual(
Author.objects.filter(item__name="one") | Author.objects.filter(name="a3"),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name="one") | Q(name="a3")),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(name="a3") | Q(item__name="one")),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name="three") | Q(report__name="r3")),
[self.a2],
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertSequenceEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertSequenceEqual(Author.objects.filter(Q(id__in=[]) | Q(id__in=[])), [])
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values("creator").distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name="four", created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name="two")
.values("creator", "name")
.distinct()
.count(),
4,
)
self.assertEqual(
(
Item.objects.exclude(name="two")
.extra(select={"foo": "%s"}, select_params=(1,))
.values("creator", "name", "foo")
.distinct()
.count()
),
4,
)
self.assertEqual(
(
Item.objects.exclude(name="two")
.extra(select={"foo": "%s"}, select_params=(1,))
.values("creator", "name")
.distinct()
.count()
),
4,
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values("creator", "name").count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by("name")
q2 = Item.objects.filter(id=self.i1.id)
self.assertSequenceEqual(q1, [self.i4, self.i1, self.i3, self.i2])
self.assertSequenceEqual(q2, [self.i1])
self.assertSequenceEqual(
(q1 | q2).order_by("name"),
[self.i4, self.i1, self.i3, self.i2],
)
self.assertSequenceEqual((q1 & q2).order_by("name"), [self.i1])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertSequenceEqual(
((q1 & q2) | q3).order_by("name"),
[self.i4, self.i1],
)
def test_order_by_tables(self):
q1 = Item.objects.order_by("name")
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by("name").query
self.assertEqual(
len(
[
t
for t in combined_query.alias_map
if combined_query.alias_refcount[t]
]
),
1,
)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by("greatest_fan__fan_of")
self.assertIn("OUTER JOIN", str(qs.query))
qs = qs.order_by("id")
self.assertNotIn("OUTER JOIN", str(qs.query))
def test_get_clears_ordering(self):
"""
get() should clear ordering for optimization purposes.
"""
with CaptureQueriesContext(connection) as captured_queries:
Author.objects.order_by("name").get(pk=self.a1.pk)
self.assertNotIn("order by", captured_queries[0]["sql"].lower())
def test_tickets_4088_4306(self):
self.assertSequenceEqual(Report.objects.filter(creator=1001), [self.r1])
self.assertSequenceEqual(Report.objects.filter(creator__num=1001), [self.r1])
self.assertSequenceEqual(Report.objects.filter(creator__id=1001), [])
self.assertSequenceEqual(
Report.objects.filter(creator__id=self.a1.id), [self.r1]
)
self.assertSequenceEqual(Report.objects.filter(creator__name="a1"), [self.r1])
def test_ticket4510(self):
self.assertSequenceEqual(
Author.objects.filter(report__name="r1"),
[self.a1],
)
def test_ticket7378(self):
self.assertSequenceEqual(self.a1.report_set.all(), [self.r1])
def test_tickets_5324_6704(self):
self.assertSequenceEqual(
Item.objects.filter(tags__name="t4"),
[self.i4],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t4").order_by("name").distinct(),
[self.i1, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t4").order_by("name").distinct().reverse(),
[self.i2, self.i3, self.i1],
)
self.assertSequenceEqual(
Author.objects.exclude(item__name="one").distinct().order_by("name"),
[self.a2, self.a3, self.a4],
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1").order_by("name"),
[self.i4, self.i3],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1").exclude(tags__name="t4"),
[self.i3],
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
)
self.assertEqual(
len(
[
x
for x in qs.query.alias_map.values()
if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]
]
),
1,
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertSequenceEqual(
Tag.objects.filter(parent__isnull=True).order_by("name"), [self.t1]
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__isnull=True).order_by("name"),
[self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__name="t1") | Q(parent__isnull=True)).order_by(
"name"
),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name="t1")).order_by(
"name"
),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by("name"),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by("name"),
[self.t4, self.t5],
)
def test_ticket2091(self):
t = Tag.objects.get(name="t4")
self.assertSequenceEqual(Item.objects.filter(tags__in=[t]), [self.i4])
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = sys.getrecursionlimit() // 16
msg = "Maximum recursion depth exceeded: too many subqueries."
with self.assertRaisesMessage(RecursionError, msg):
for i in range(local_recursion_limit + 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases,
{
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"AA",
"AB",
"AC",
"AD",
"AE",
"AF",
"AG",
"AH",
"AI",
"AJ",
"AK",
"AL",
"AM",
"AN",
},
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
msg = "Cannot combine queries on two different base models."
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() & Tag.objects.all()
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() | Tag.objects.all()
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={"foo": "1"}).count(), 4)
self.assertEqual(
Author.objects.extra(select={"foo": "%s"}, select_params=(1,)).count(), 4
)
def test_ticket2400(self):
self.assertSequenceEqual(
Author.objects.filter(item__isnull=True),
[self.a3],
)
self.assertSequenceEqual(
Tag.objects.filter(item__isnull=True),
[self.t5],
)
def test_ticket2496(self):
self.assertSequenceEqual(
Item.objects.extra(tables=["queries_author"])
.select_related()
.order_by("name")[:1],
[self.i4],
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, "Cannot parse keyword query as dict"):
Note.objects.filter({"note": "n1", "misc": "foo"})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertSequenceEqual(
Item.objects.order_by("note__note", "name"),
[self.i2, self.i4, self.i1, self.i3],
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertSequenceEqual(
Author.objects.order_by("extra", "-name"),
[self.a2, self.a1, self.a4, self.a3],
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertSequenceEqual(Cover.objects.all(), [self.c1, self.c2])
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertSequenceEqual(
Item.objects.order_by("creator", "name"),
[self.i1, self.i3, self.i2, self.i4],
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=False).order_by("tags", "id"),
[self.i1, self.i2, self.i1, self.i2, self.i4],
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by("name")
self.assertSequenceEqual(qs, [self.i4, self.i1, self.i3, self.i2])
self.assertEqual(len(qs.query.alias_map), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by("note__note", "name")
self.assertQuerysetEqual(qs, [self.i2, self.i4, self.i1, self.i3])
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertEqual(repr(qs[0].note), "<Note: n2>")
self.assertEqual(repr(qs[0].creator.extra.note), "<Note: n1>")
def test_ticket3037(self):
self.assertSequenceEqual(
Item.objects.filter(
Q(creator__name="a3", name="two") | Q(creator__name="a4", name="four")
),
[self.i4],
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertSequenceEqual(
Note.objects.values("misc").distinct().order_by("note", "-misc"),
[{"misc": "foo"}, {"misc": "bar"}, {"misc": "foo"}],
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn("note_id", ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertSequenceEqual(
ExtraInfo.objects.values("note_id"), [{"note_id": 1}, {"note_id": 2}]
)
# ...or use the field name.
self.assertSequenceEqual(
ExtraInfo.objects.values("note"), [{"note": 1}, {"note": 2}]
)
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertSequenceEqual(
Author.objects.filter(id=self.a1.id).filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
),
[self.a1],
)
self.assertSequenceEqual(
Author.objects.filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
).filter(id=self.a1.id),
[self.a1],
)
def test_ticket6981(self):
self.assertSequenceEqual(
Tag.objects.select_related("parent").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_ticket9926(self):
self.assertSequenceEqual(
Tag.objects.select_related("parent", "category").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.select_related("parent", "parent__category").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes("created", "month").count(), 1)
self.assertEqual(Item.objects.datetimes("created", "day").count(), 2)
self.assertEqual(len(Item.objects.datetimes("created", "day")), 2)
self.assertEqual(
Item.objects.datetimes("created", "day")[0],
datetime.datetime(2007, 12, 19, 0, 0),
)
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertSequenceEqual(
Item.objects.datetimes("created", "day").extra(select={"a": 1}),
[
datetime.datetime(2007, 12, 19, 0, 0),
datetime.datetime(2007, 12, 20, 0, 0),
],
)
self.assertSequenceEqual(
Item.objects.extra(select={"a": 1}).datetimes("created", "day"),
[
datetime.datetime(2007, 12, 19, 0, 0),
datetime.datetime(2007, 12, 20, 0, 0),
],
)
name = "one"
self.assertSequenceEqual(
Item.objects.datetimes("created", "day").extra(
where=["name=%s"], params=[name]
),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
self.assertSequenceEqual(
Item.objects.extra(where=["name=%s"], params=[name]).datetimes(
"created", "day"
),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_ticket7155(self):
# Nullable dates
self.assertSequenceEqual(
Item.objects.datetimes("modified", "day"),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_order_by_rawsql(self):
self.assertSequenceEqual(
Item.objects.values("note__note").order_by(
RawSQL("queries_note.note", ()),
"id",
),
[
{"note__note": "n2"},
{"note__note": "n3"},
{"note__note": "n3"},
{"note__note": "n3"},
],
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertSequenceEqual(
Tag.objects.filter(parent=self.t1, name="t3").order_by("name"),
[self.t3],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent=self.t1, name="t3").order_by("name"),
[self.t1, self.t2, self.t4, self.t5],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1", name="one")
.order_by("name")
.distinct(),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(name__in=["three", "four"])
.exclude(tags__name="t1")
.order_by("name"),
[self.i4, self.i3],
)
# More twisted cases, involving nested negations.
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name="t1", name="one")),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(~Q(tags__name="t1", name="one"), name="two"),
[self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name="t1", name="one"), name="two"),
[self.i4, self.i1, self.i3],
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(query2.get_compiler(qs.db).as_sql()[0], query)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer("name", "creator")
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertSequenceEqual(
self.n1.annotation_set.filter(
Q(tag=self.t5)
| Q(tag__children=self.t5)
| Q(tag__children__children=self.t5)
),
[self.ann1],
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertSequenceEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
[self.i1, self.i2],
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal="m")
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal="m"), [])
self.assertQuerysetEqual(q.exclude(meal="m"), [])
self.assertQuerysetEqual(q.complex_filter({"pk": 1}), [])
self.assertQuerysetEqual(q.select_related("food"), [])
self.assertQuerysetEqual(q.annotate(Count("food")), [])
self.assertQuerysetEqual(q.order_by("meal", "food"), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(q.extra(select={"foo": "1"}), [])
self.assertQuerysetEqual(q.reverse(), [])
q.query.low_mark = 1
msg = "Cannot change a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
q.extra(select={"foo": "1"})
self.assertQuerysetEqual(q.defer("meal"), [])
self.assertQuerysetEqual(q.only("meal"), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(len(Note.objects.order_by("extrainfo__info").distinct()), 3)
# Pickling of QuerySets using datetimes() should work.
qs = Item.objects.datetimes("created", "month")
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertSequenceEqual(
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values("name")
),
[self.t2, self.t3],
)
# Multi-valued values() and values_list() querysets should raise errors.
with self.assertRaisesMessage(
TypeError, "Cannot use multi-field values as a filter value."
):
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values("name", "id")
)
with self.assertRaisesMessage(
TypeError, "Cannot use multi-field values as a filter value."
):
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values_list("name", "id")
)
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertSequenceEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{"id": 1}, {"id": 2}, {"id": 3}],
)
self.assertSequenceEqual(
Annotation.objects.filter(
notes__in=Note.objects.filter(note="n1")
.values_list("note")
.values("id")
),
[self.ann1],
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue
# properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
yield n_obj.pk
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertSequenceEqual(qs, [self.a1, self.a2])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertSequenceEqual(qs, [self.a3, self.a4])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertSequenceEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name="a1")),
[self.a1],
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertSequenceEqual(
Item.objects.exclude(modified=self.time1).order_by("name"),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__name=self.t1.name),
[self.t1, self.t4, self.t5],
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by("parent__name")), 5)
# Empty querysets can be merged with others.
self.assertSequenceEqual(
Note.objects.none() | Note.objects.all(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.all() | Note.objects.none(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(Note.objects.none() & Note.objects.all(), [])
self.assertSequenceEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertSequenceEqual(
Author.objects.filter(
Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name="xyz")
),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(
Q(report=self.r1, name="xyz") | Q(item__note__extrainfo=self.e2)
),
[self.a2],
)
self.assertSequenceEqual(
Annotation.objects.filter(
Q(tag__parent=self.t1) | Q(notes__note="n1", name="a1")
),
[self.ann1],
)
xx = ExtraInfo.objects.create(info="xx", note=self.n3)
self.assertSequenceEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
[self.n1, self.n3],
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len(
[
x
for x in q.alias_map.values()
if x.join_type == LOUTER and q.alias_refcount[x.table_alias]
]
),
1,
)
def test_ticket17429(self):
"""
Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertCountEqual(
Tag.objects.all(),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name="t4"),
Item.objects.filter(~Q(tags__name="t4")),
)
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name="t4") | Q(tags__name="t3")),
Item.objects.filter(~(Q(tags__name="t4") | Q(tags__name="t3"))),
)
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name="t4") | ~Q(tags__name="t3")),
Item.objects.filter(~(Q(tags__name="t4") | ~Q(tags__name="t3"))),
)
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name="t4")),
Item.objects.filter(~~Q(tags__name="t4")),
)
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name="t4")),
Item.objects.filter(~~Q(tags__name="t4")),
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name="t4")),
Item.objects.filter(~Q(~Q(tags__name="t4"))),
)
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=["t4", "t3"])),
Item.objects.filter(~Q(tags__name__in=["t4", "t3"])),
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=["t4", "t3"])),
Item.objects.filter(~~Q(tags__name__in=["t4", "t3"])),
)
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertNotIn("INNER JOIN", str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
self.assertSequenceEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertSequenceEqual(q, [self.nc1])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertSequenceEqual(q, [self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 2)
self.assertNotIn("INNER JOIN", str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3)
self.assertNotIn("INNER JOIN", str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertSequenceEqual(
q,
[self.a1, self.a1, self.a2, self.a2, self.a2, self.a3],
)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 4)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertSequenceEqual(q, [self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
q = Author.objects.filter(item__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3, self.t4, self.t5])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q1 & q2
self.assertSequenceEqual(q3, [])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
def test_ticket19672(self):
self.assertSequenceEqual(
Report.objects.filter(
Q(creator__isnull=False) & ~Q(creator__extra__value=41)
),
[self.r1],
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count("item"))
qs = qs.filter(~Q(extra__value=0)).order_by("name")
self.assertIn("SELECT", str(qs.query))
self.assertSequenceEqual(qs, [self.a1, self.a2, self.a3, self.a4])
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, note, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name="generic")
def test_common_mixed_case_foreign_keys(self):
"""
Valid query should be generated when fields fetched from joined tables
include FKs whose names only differ by case.
"""
c1 = SimpleCategory.objects.create(name="c1")
c2 = SimpleCategory.objects.create(name="c2")
c3 = SimpleCategory.objects.create(name="c3")
category = CategoryItem.objects.create(category=c1)
mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(
CaTeGoRy=c2
)
mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(
category=c3
)
CommonMixedCaseForeignKeys.objects.create(
category=category,
mixed_case_field_category=mixed_case_field_category,
mixed_case_db_column_category=mixed_case_db_column_category,
)
qs = CommonMixedCaseForeignKeys.objects.values(
"category",
"mixed_case_field_category",
"mixed_case_db_column_category",
"category__category",
"mixed_case_field_category__CaTeGoRy",
"mixed_case_db_column_category__category",
)
self.assertTrue(qs.first())
def test_excluded_intermediary_m2m_table_joined(self):
self.assertSequenceEqual(
Note.objects.filter(~Q(tag__annotation__name=F("note"))),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.filter(tag__annotation__name="a1").filter(
~Q(tag__annotation__name=F("note"))
),
[],
)
def test_field_with_filterable(self):
self.assertSequenceEqual(
Author.objects.filter(extra=self.e2),
[self.a3, self.a4],
)
def test_negate_field(self):
self.assertSequenceEqual(
Note.objects.filter(negate=True),
[self.n1, self.n2],
)
self.assertSequenceEqual(Note.objects.exclude(negate=True), [self.n3])
class Queries2Tests(TestCase):
@classmethod
def setUpTestData(cls):
cls.num4 = Number.objects.create(num=4)
cls.num8 = Number.objects.create(num=8)
cls.num12 = Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertSequenceEqual(Number.objects.filter(num__lt=4), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertSequenceEqual(
Number.objects.filter(num__gt=8, num__lt=13),
[self.num12],
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)), []
)
self.assertSequenceEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
[self.num8],
)
def test_ticket12239(self):
# Custom lookups are registered to round float values correctly on gte
# and lt IntegerField queries.
self.assertSequenceEqual(
Number.objects.filter(num__gt=11.9),
[self.num12],
)
self.assertSequenceEqual(Number.objects.filter(num__gt=12), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=12.0), [])
self.assertSequenceEqual(Number.objects.filter(num__gt=12.1), [])
self.assertCountEqual(
Number.objects.filter(num__lt=12),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lt=12.0),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lt=12.1),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=11.9),
[self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=12),
[self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__gte=12.0),
[self.num12],
)
self.assertSequenceEqual(Number.objects.filter(num__gte=12.1), [])
self.assertSequenceEqual(Number.objects.filter(num__gte=12.9), [])
self.assertCountEqual(
Number.objects.filter(num__lte=11.9),
[self.num4, self.num8],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.0),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.1),
[self.num4, self.num8, self.num12],
)
self.assertCountEqual(
Number.objects.filter(num__lte=12.9),
[self.num4, self.num8, self.num12],
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(TestCase):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_datetimes_invalid_field(self):
# An error should be raised when QuerySet.datetimes() is passed the
# wrong type of field.
msg = "'name' isn't a DateField, TimeField, or DateTimeField."
with self.assertRaisesMessage(TypeError, msg):
Item.objects.datetimes("name", "month")
def test_ticket22023(self):
with self.assertRaisesMessage(
TypeError, "Cannot call only() after .values() or .values_list()"
):
Valid.objects.values().only()
with self.assertRaisesMessage(
TypeError, "Cannot call defer() after .values() or .values_list()"
):
Valid.objects.values().defer()
class Queries4Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
n1 = Note.objects.create(note="n1", misc="foo")
n2 = Note.objects.create(note="n2", misc="bar")
e1 = ExtraInfo.objects.create(info="e1", note=n1)
e2 = ExtraInfo.objects.create(info="e2", note=n2)
cls.a1 = Author.objects.create(name="a1", num=1001, extra=e1)
cls.a3 = Author.objects.create(name="a3", num=3003, extra=e2)
cls.r1 = Report.objects.create(name="r1", creator=cls.a1)
cls.r2 = Report.objects.create(name="r2", creator=cls.a3)
cls.r3 = Report.objects.create(name="r3")
cls.i1 = Item.objects.create(
name="i1", created=datetime.datetime.now(), note=n1, creator=cls.a1
)
cls.i2 = Item.objects.create(
name="i2", created=datetime.datetime.now(), note=n1, creator=cls.a3
)
def test_ticket24525(self):
tag = Tag.objects.create()
anth100 = tag.note_set.create(note="ANTH", misc="100")
math101 = tag.note_set.create(note="MATH", misc="101")
s1 = tag.annotation_set.create(name="1")
s2 = tag.annotation_set.create(name="2")
s1.notes.set([math101, anth100])
s2.notes.set([math101])
result = math101.annotation_set.all() & tag.annotation_set.exclude(
notes__in=[anth100]
)
self.assertEqual(list(result), [s2])
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
msg = (
"Unsaved model instance <NamedCategory: Other> cannot be used in an ORM "
"query."
)
with self.assertRaisesMessage(ValueError, msg):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(
Q(creator__isnull=True) | Q(creator__extra__info="e1")
)
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(
Q(creator__extra__info="e1")
)
self.assertCountEqual(q1, [self.r1, self.r3])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(
Q(creator__extra__info="e1") | Q(creator__isnull=True)
)
q2 = Report.objects.filter(
Q(creator__extra__info="e1")
) | Report.objects.filter(Q(creator__isnull=True))
self.assertCountEqual(q1, [self.r1, self.r3])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(
Q(creator=self.a1) | Q(creator__report__name="r1")
).order_by()
q2 = (
Item.objects.filter(Q(creator=self.a1)).order_by()
| Item.objects.filter(Q(creator__report__name="r1")).order_by()
)
self.assertCountEqual(q1, [self.i1])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(
Q(creator__report__name="e1") | Q(creator=self.a1)
).order_by()
q2 = (
Item.objects.filter(Q(creator__report__name="e1")).order_by()
| Item.objects.filter(Q(creator=self.a1)).order_by()
)
self.assertCountEqual(q1, [self.i1])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Joins having identical connections are correctly recreated in the
# rhs query, in case the query is ORed together (#18748).
Report.objects.create(name="r4", creator=self.a1)
q1 = Author.objects.filter(report__name="r5")
q2 = Author.objects.filter(report__name="r4").filter(report__name="r1")
combined = q1 | q2
self.assertEqual(str(combined.query).count("JOIN"), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, "a1")
def test_combine_or_filter_reuse(self):
combined = Author.objects.filter(name="a1") | Author.objects.filter(name="a3")
self.assertEqual(combined.get(name="a1"), self.a1)
def test_join_reuse_order(self):
# Join aliases are reused in order. This shouldn't raise AssertionError
# because change_map contains a circular reference (#26522).
s1 = School.objects.create()
s2 = School.objects.create()
s3 = School.objects.create()
t1 = Teacher.objects.create()
otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1)
qs1 = otherteachers.filter(schools=s1).filter(schools=s2)
qs2 = otherteachers.filter(schools=s1).filter(schools=s3)
self.assertQuerysetEqual(qs1 | qs2, [])
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL.
ManagedModel.objects.create(data="mm1", tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data="mm"), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ""
else:
expected_null_charfield_repr = None
self.assertSequenceEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by(
"name"
),
["e1", "e2", expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertSequenceEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
[self.r1, self.r2, self.r3],
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, "d2")
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by("name")
self.assertIn("ORDER BY", qs.query.get_compiler(qs.db).as_sql()[0])
def test_order_by_reverse_fk(self):
# It is possible to order by reverse of foreign key, although that can lead
# to duplicate results.
c1 = SimpleCategory.objects.create(name="category1")
c2 = SimpleCategory.objects.create(name="category2")
CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c1)
self.assertSequenceEqual(
SimpleCategory.objects.order_by("categoryitem", "pk"), [c1, c2, c1]
)
def test_filter_reverse_non_integer_pk(self):
date_obj = DateTimePK.objects.create()
extra_obj = ExtraInfo.objects.create(info="extra", date=date_obj)
self.assertEqual(
DateTimePK.objects.filter(extrainfo=extra_obj).get(),
date_obj,
)
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])), []
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
ci1 = CategoryItem.objects.create(category=c1)
CategoryItem.objects.create(category=c2)
CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(
name="named category1", special_name="special1"
)
c3 = SpecialCategory.objects.create(
name="named category2", special_name="special2"
)
CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(
category__onetoonecategory__isnull=False
).order_by("pk")
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
CategoryItem.objects.create(category=c0)
CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(qs, [ci1])
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
OneToOneCategory.objects.create(category=c1, new_name="new1")
OneToOneCategory.objects.create(category=c0, new_name="new2")
CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(
category__onetoonecategory__isnull=True
).order_by("pk")
self.assertEqual(qs.count(), 2)
self.assertSequenceEqual(qs, [ci2, ci3])
class Queries5Tests(TestCase):
@classmethod
def setUpTestData(cls):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.n2 = Note.objects.create(note="n2", misc="bar", id=2)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
e2 = ExtraInfo.objects.create(info="e2", note=cls.n2)
a1 = Author.objects.create(name="a1", num=1001, extra=e1)
a2 = Author.objects.create(name="a2", num=2002, extra=e1)
a3 = Author.objects.create(name="a3", num=3003, extra=e2)
cls.rank2 = Ranking.objects.create(rank=2, author=a2)
cls.rank1 = Ranking.objects.create(rank=1, author=a3)
cls.rank3 = Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertSequenceEqual(
Ranking.objects.all(),
[self.rank3, self.rank2, self.rank1],
)
self.assertSequenceEqual(
Ranking.objects.order_by("rank"),
[self.rank1, self.rank2, self.rank3],
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertSequenceEqual(
Ranking.objects.extra(
tables=["django_site"], order_by=["-django_site.id", "rank"]
),
[self.rank1, self.rank2, self.rank3],
)
sql = "case when %s > 2 then 1 else 0 end" % connection.ops.quote_name("rank")
qs = Ranking.objects.extra(select={"good": sql})
self.assertEqual(
[o.good for o in qs.extra(order_by=("-good",))], [True, False, False]
)
self.assertSequenceEqual(
qs.extra(order_by=("-good", "id")),
[self.rank3, self.rank2, self.rank1],
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values("id", "rank").order_by("id")
self.assertEqual([d["rank"] for d in dicts], [2, 1, 3])
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
sql = "case when %s > 2 then 1 else 0 end" % connection.ops.quote_name("rank")
qs = Ranking.objects.extra(select={"good": sql})
dicts = qs.values().order_by("id")
for d in dicts:
del d["id"]
del d["author_id"]
self.assertEqual(
[sorted(d.items()) for d in dicts],
[
[("good", 0), ("rank", 2)],
[("good", 0), ("rank", 1)],
[("good", 1), ("rank", 3)],
],
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=["django_site"])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
author_start = Author.objects.get(name="a1")
ranking_start = Ranking.objects.get(author__name="a1")
# Make sure that the IDs from different tables don't happen to match.
self.assertSequenceEqual(
Ranking.objects.filter(author__name="a1"),
[self.rank3],
)
self.assertEqual(Ranking.objects.filter(author__name="a1").update(rank=4636), 1)
r = Ranking.objects.get(author__name="a1")
self.assertEqual(r.id, ranking_start.id)
self.assertEqual(r.author.id, author_start.id)
self.assertEqual(r.rank, 4636)
r.rank = 3
r.save()
self.assertSequenceEqual(
Ranking.objects.all(),
[self.rank3, self.rank2, self.rank1],
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertSequenceEqual(
Note.objects.exclude(Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.filter(~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.filter(~Q() | ~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.exclude(~Q() & ~Q()),
[self.n1, self.n2],
)
self.assertSequenceEqual(
Note.objects.exclude(~Q() ^ ~Q()),
[self.n1, self.n2],
)
def test_extra_select_literal_percent_s(self):
# Allow %%s to escape select clauses
self.assertEqual(Note.objects.extra(select={"foo": "'%%s'"})[0].foo, "%s")
self.assertEqual(
Note.objects.extra(select={"foo": "'%%s bar %%s'"})[0].foo, "%s bar %s"
)
self.assertEqual(
Note.objects.extra(select={"foo": "'bar %%s'"})[0].foo, "bar %s"
)
def test_queryset_reuse(self):
# Using querysets doesn't mutate aliases.
authors = Author.objects.filter(Q(name="a1") | Q(name="nonexistent"))
self.assertEqual(Ranking.objects.filter(author__in=authors).get(), self.rank3)
self.assertEqual(authors.count(), 1)
def test_filter_unsaved_object(self):
# These tests will catch ValueError in Django 5.0 when passing unsaved
# model instances to related filters becomes forbidden.
# msg = "Model instances passed to related filters must be saved."
msg = "Passing unsaved model instances to related filters is deprecated."
company = Company.objects.create(name="Django")
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.filter(employer=Company(name="unsaved"))
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.filter(employer__in=[company, Company(name="unsaved")])
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
StaffUser.objects.filter(staff=Staff(name="unsaved"))
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by("custom"), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
p1 = Plaything.objects.create(name="p1")
self.assertSequenceEqual(Plaything.objects.all(), [p1])
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name="s")
r = RelatedObject.objects.create(single=s, f=1)
p2 = Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.filter(others__isnull=False).order_by("pk")
self.assertNotIn("JOIN", str(qs.query))
qs = Plaything.objects.filter(others__f__isnull=False).order_by("pk")
self.assertIn("INNER", str(qs.query))
qs = qs.order_by("others__single__name")
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count("LEFT"), 1)
self.assertEqual(str(qs.query).count("INNER"), 1)
self.assertSequenceEqual(qs, [p2])
class DisjunctiveFilterTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object related to the LeafA we create.
l1 = LeafA.objects.create(data="first")
self.assertSequenceEqual(LeafA.objects.all(), [l1])
self.assertSequenceEqual(
LeafA.objects.filter(Q(data="first") | Q(join__b__data="second")),
[l1],
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertSequenceEqual(
(
ExtraInfo.objects.filter(note=self.n1)
| ExtraInfo.objects.filter(info="e2")
).filter(note=self.n1),
[self.e1],
)
self.assertSequenceEqual(
(
ExtraInfo.objects.filter(info="e2")
| ExtraInfo.objects.filter(note=self.n1)
).filter(note=self.n1),
[self.e1],
)
class Queries6Tests(TestCase):
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
cls.t2 = Tag.objects.create(name="t2", parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name="t3", parent=cls.t1)
cls.t4 = Tag.objects.create(name="t4", parent=cls.t3)
cls.t5 = Tag.objects.create(name="t5", parent=cls.t3)
n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.ann1 = Annotation.objects.create(name="a1", tag=cls.t1)
cls.ann1.notes.add(n1)
cls.ann2 = Annotation.objects.create(name="a2", tag=cls.t4)
def test_parallel_iterators(self):
# Parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), "<Tag: t1>")
self.assertEqual(repr(next(i1)), "<Tag: t2>")
self.assertEqual(repr(next(i2)), "<Tag: t1>")
self.assertEqual(repr(next(i2)), "<Tag: t2>")
self.assertEqual(repr(next(i2)), "<Tag: t3>")
self.assertEqual(repr(next(i1)), "<Tag: t3>")
qs = X.objects.all()
self.assertFalse(qs)
self.assertFalse(qs)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(qs.query.get_compiler(qs.db).as_sql()[0].count("SELECT"), 2)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# preemptively discovered cases).
self.assertSequenceEqual(
PointerA.objects.filter(connection__pointerb__id=1), []
)
self.assertSequenceEqual(
PointerA.objects.exclude(connection__pointerb__id=1), []
)
self.assertSequenceEqual(
Tag.objects.exclude(children=None),
[self.t1, self.t3],
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertSequenceEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
[self.t1, self.t4, self.t5],
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertSequenceEqual(
Annotation.objects.exclude(tag__children__name="t2"),
[self.ann2],
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertSequenceEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
[self.ann1],
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by("name")
self.assertIsNot(q1, q1.all())
def test_ticket_11320(self):
qs = Tag.objects.exclude(category=None).exclude(category__name="foo")
self.assertEqual(str(qs.query).count(" INNER JOIN "), 1)
def test_distinct_ordered_sliced_subquery_aggregation(self):
self.assertEqual(
Tag.objects.distinct().order_by("category__name")[:3].count(), 3
)
def test_multiple_columns_with_the_same_name_slice(self):
self.assertEqual(
list(
Tag.objects.order_by("name").values_list("name", "category__name")[:2]
),
[("t1", "Generic"), ("t2", "Generic")],
)
self.assertSequenceEqual(
Tag.objects.order_by("name").select_related("category")[:2],
[self.t1, self.t2],
)
self.assertEqual(
list(Tag.objects.order_by("-name").values_list("name", "parent__name")[:2]),
[("t5", "t3"), ("t4", "t3")],
)
self.assertSequenceEqual(
Tag.objects.order_by("-name").select_related("parent")[:2],
[self.t5, self.t4],
)
def test_col_alias_quoted(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertEqual(
Tag.objects.values("parent")
.annotate(
tag_per_parent=Count("pk"),
)
.aggregate(Max("tag_per_parent")),
{"tag_per_parent__max": 2},
)
sql = captured_queries[0]["sql"]
self.assertIn("AS %s" % connection.ops.quote_name("col1"), sql)
def test_xor_subquery(self):
self.assertSequenceEqual(
Tag.objects.filter(
Exists(Tag.objects.filter(id=OuterRef("id"), name="t3"))
^ Exists(Tag.objects.filter(id=OuterRef("id"), parent=self.t1))
),
[self.t2],
)
class RawQueriesTests(TestCase):
@classmethod
def setUpTestData(cls):
Note.objects.create(note="n1", misc="foo", id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ["n1"]
qs = Note.objects.raw(query, params=params)
self.assertEqual(
repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>"
)
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ["n1", "foo"]
qs = Note.objects.raw(query, params=params)
self.assertEqual(
repr(qs),
"<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>",
)
class GeneratorExpressionTests(SimpleTestCase):
def test_ticket10432(self):
# Using an empty iterator as the rvalue for an "__in"
# lookup is legal.
self.assertCountEqual(Note.objects.filter(pk__in=iter(())), [])
class ComparisonTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
item_ab = Item.objects.create(
name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1
)
item_xy = Item.objects.create(
name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1
)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="x%Y"),
[item_xy],
)
self.assertSequenceEqual(
Item.objects.filter(name__istartswith="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iendswith="A_b"),
[item_ab],
)
class ExistsSql(TestCase):
def test_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]["sql"]
id, name = connection.ops.quote_name("id"), connection.ops.quote_name("name")
self.assertNotIn(id, qstr)
self.assertNotIn(name, qstr)
def test_ticket_18414(self):
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
self.assertTrue(Article.objects.exists())
self.assertTrue(Article.objects.distinct().exists())
self.assertTrue(Article.objects.distinct()[1:3].exists())
self.assertFalse(Article.objects.distinct()[1:1].exists())
@skipUnlessDBFeature("can_distinct_on_fields")
def test_ticket_18414_distinct_on(self):
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
self.assertTrue(Article.objects.distinct("name").exists())
self.assertTrue(Article.objects.distinct("name")[1:2].exists())
self.assertFalse(Article.objects.distinct("name")[2:3].exists())
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertIs(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertIs(Tag.objects.all().ordered, True)
self.assertIs(Tag.objects.order_by().ordered, False)
def test_explicit_ordering(self):
self.assertIs(Annotation.objects.order_by("id").ordered, True)
def test_empty_queryset(self):
self.assertIs(Annotation.objects.none().ordered, True)
def test_order_by_extra(self):
self.assertIs(Annotation.objects.extra(order_by=["id"]).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count("notes"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("num_notes").ordered, True)
def test_annotated_default_ordering(self):
qs = Tag.objects.annotate(num_notes=Count("pk"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("name").ordered, True)
def test_annotated_values_default_ordering(self):
qs = Tag.objects.values("name").annotate(num_notes=Count("pk"))
self.assertIs(qs.ordered, False)
self.assertIs(qs.order_by("name").ordered, True)
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
class SubqueryTests(TestCase):
@classmethod
def setUpTestData(cls):
NamedCategory.objects.create(id=1, name="first")
NamedCategory.objects.create(id=2, name="second")
NamedCategory.objects.create(id=3, name="third")
NamedCategory.objects.create(id=4, name="fourth")
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:2]
)
self.assertEqual(set(query.values_list("id", flat=True)), {3})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[2:]
)
self.assertEqual(set(query.values_list("id", flat=True)), {1, 2})
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:2]
)[0:2]
self.assertEqual({x.id for x in query}, {3, 4})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:3]
)[1:3]
self.assertEqual({x.id for x in query}, {3})
query = DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[2:]
)[1:]
self.assertEqual({x.id for x in query}, {2})
def test_related_sliced_subquery(self):
"""
Related objects constraints can safely contain sliced subqueries.
refs #22434
"""
generic = NamedCategory.objects.create(id=5, name="Generic")
t1 = Tag.objects.create(name="t1", category=generic)
t2 = Tag.objects.create(name="t2", category=generic)
ManagedModel.objects.create(data="mm1", tag=t1, public=True)
mm2 = ManagedModel.objects.create(data="mm2", tag=t2, public=True)
query = ManagedModel.normal_manager.filter(
tag__in=Tag.objects.order_by("-id")[:1]
)
self.assertEqual({x.id for x in query}, {mm2.id})
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[0:1]
).delete()
self.assertEqual(
set(DumbCategory.objects.values_list("id", flat=True)), {1, 2, 3}
)
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:2]
).delete()
self.assertEqual(set(DumbCategory.objects.values_list("id", flat=True)), {1, 3})
DumbCategory.objects.filter(
id__in=DumbCategory.objects.order_by("-id")[1:]
).delete()
self.assertEqual(set(DumbCategory.objects.values_list("id", flat=True)), {3})
def test_distinct_ordered_sliced_subquery(self):
# Implicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct().order_by("name")[0:2],
)
.order_by("name")
.values_list("name", flat=True),
["first", "fourth"],
)
# Explicit values('id').
self.assertSequenceEqual(
NamedCategory.objects.filter(
id__in=NamedCategory.objects.distinct()
.order_by("-name")
.values("id")[0:2],
)
.order_by("name")
.values_list("name", flat=True),
["second", "third"],
)
# Annotated value.
self.assertSequenceEqual(
DumbCategory.objects.filter(
id__in=DumbCategory.objects.annotate(double_id=F("id") * 2)
.order_by("id")
.distinct()
.values("double_id")[0:2],
)
.order_by("id")
.values_list("id", flat=True),
[2, 4],
)
class QuerySetBitwiseOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.school = School.objects.create()
cls.room_1 = Classroom.objects.create(
school=cls.school, has_blackboard=False, name="Room 1"
)
cls.room_2 = Classroom.objects.create(
school=cls.school, has_blackboard=True, name="Room 2"
)
cls.room_3 = Classroom.objects.create(
school=cls.school, has_blackboard=True, name="Room 3"
)
cls.room_4 = Classroom.objects.create(
school=cls.school, has_blackboard=False, name="Room 4"
)
tag = Tag.objects.create()
cls.annotation_1 = Annotation.objects.create(tag=tag)
annotation_2 = Annotation.objects.create(tag=tag)
note = cls.annotation_1.notes.create(tag=tag)
cls.base_user_1 = BaseUser.objects.create(annotation=cls.annotation_1)
cls.base_user_2 = BaseUser.objects.create(annotation=annotation_2)
cls.task = Task.objects.create(
owner=cls.base_user_2,
creator=cls.base_user_2,
note=note,
)
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_rhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)
qs2 = Classroom.objects.filter(has_blackboard=False)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_lhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)[:1]
qs2 = Classroom.objects.filter(has_blackboard=False)
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_both_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=False)[:1]
qs2 = Classroom.objects.filter(has_blackboard=True)[:1]
self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_or_with_both_slice_and_ordering(self):
qs1 = Classroom.objects.filter(has_blackboard=False).order_by("-pk")[:1]
qs2 = Classroom.objects.filter(has_blackboard=True).order_by("-name")[:1]
self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_rhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)
qs2 = Classroom.objects.filter(has_blackboard=False)[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2, self.room_3])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_lhs_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=True)[:1]
qs2 = Classroom.objects.filter(has_blackboard=False)
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2, self.room_4])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_both_slice(self):
qs1 = Classroom.objects.filter(has_blackboard=False)[:1]
qs2 = Classroom.objects.filter(has_blackboard=True)[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2])
@skipUnlessDBFeature("allow_sliced_subqueries_with_in")
def test_xor_with_both_slice_and_ordering(self):
qs1 = Classroom.objects.filter(has_blackboard=False).order_by("-pk")[:1]
qs2 = Classroom.objects.filter(has_blackboard=True).order_by("-name")[:1]
self.assertCountEqual(qs1 ^ qs2, [self.room_3, self.room_4])
def test_subquery_aliases(self):
combined = School.objects.filter(pk__isnull=False) & School.objects.filter(
Exists(
Classroom.objects.filter(
has_blackboard=True,
school=OuterRef("pk"),
)
),
)
self.assertSequenceEqual(combined, [self.school])
nested_combined = School.objects.filter(pk__in=combined.values("pk"))
self.assertSequenceEqual(nested_combined, [self.school])
def test_conflicting_aliases_during_combine(self):
qs1 = self.annotation_1.baseuser_set.all()
qs2 = BaseUser.objects.filter(
Q(owner__note__in=self.annotation_1.notes.all())
| Q(creator__note__in=self.annotation_1.notes.all())
)
self.assertSequenceEqual(qs1, [self.base_user_1])
self.assertSequenceEqual(qs2, [self.base_user_2])
self.assertCountEqual(qs2 | qs1, qs1 | qs2)
self.assertCountEqual(qs2 | qs1, [self.base_user_1, self.base_user_2])
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"""
If a queryset is already evaluated, it can still be used as a query arg.
"""
n = Note(note="Test1", misc="misc")
n.save()
e = ExtraInfo(info="good", note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Make one of cached results unpickable.
n_list._result_cache[0].lock = Lock()
with self.assertRaises(TypeError):
pickle.dumps(n_list)
# Use the note queryset in a query, and evaluate
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, "good")
def test_no_model_options_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail(
"Model options shouldn't be cloned."
)
try:
Note.objects.filter(pk__lte=F("pk") + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field("misc"))
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail(
"Model fields shouldn't be cloned"
)
try:
Note.objects.filter(note=F("misc")).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(SimpleTestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertCountEqual(Number.objects.none().values("num").order_by("num"), [])
def test_values_subquery(self):
self.assertCountEqual(
Number.objects.filter(pk__in=Number.objects.none().values("pk")), []
)
self.assertCountEqual(
Number.objects.filter(pk__in=Number.objects.none().values_list("pk")), []
)
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertCountEqual(q.values(), [])
self.assertCountEqual(q.values_list(), [])
class ValuesQuerysetTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=72)
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_x": "num+%s", "value_minus_x": "num-%s"},
select_params=(1, 2),
)
qs = qs.order_by("value_minus_x")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_one": "num+1", "value_minus_one": "num-1"}
)
qs = qs.order_by("value_minus_one").order_by("value_plus_one")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(
select={
"value_plus_one": "num+1",
"value_minus_one": "num-1",
"constant_value": "1",
}
)
qs = qs.order_by("value_plus_one", "value_minus_one", "constant_value")
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={"value_plus_one": "num+1", "value_minus_one": "num-1"},
order_by=["value_minus_one"],
)
qs = qs.values("num")
def test_extra_select_params_values_order_in_extra(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={"value_plus_x": "num+%s"},
select_params=[1],
order_by=["value_plus_x"],
)
qs = qs.filter(num=72)
qs = qs.values("num")
self.assertSequenceEqual(qs, [{"num": 72}])
def test_extra_multiple_select_params_values_order_by(self):
# testing for 23259 issue
qs = Number.objects.extra(
select={"value_plus_x": "num+%s", "value_minus_x": "num-%s"},
select_params=(72, 72),
)
qs = qs.order_by("value_minus_x")
qs = qs.filter(num=1)
qs = qs.values("num")
self.assertSequenceEqual(qs, [])
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={"value_plus_one": "num+1"})
qs = qs.order_by("value_plus_one")
qs = qs.values_list("num")
self.assertSequenceEqual(qs, [(72,)])
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={"value_plus_one": "num+1"})
qs = qs.order_by("value_plus_one")
qs = qs.values_list("num", flat=True)
self.assertSequenceEqual(qs, [72])
def test_field_error_values_list(self):
# see #23443
msg = (
"Cannot resolve keyword %r into field. Join on 'name' not permitted."
% "foo"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.values_list("name__foo")
def test_named_values_list_flat(self):
msg = "'flat' and 'named' can't be used together."
with self.assertRaisesMessage(TypeError, msg):
Number.objects.values_list("num", flat=True, named=True)
def test_named_values_list_bad_field_name(self):
msg = "Type names and field names must be valid identifiers: '1'"
with self.assertRaisesMessage(ValueError, msg):
Number.objects.extra(select={"1": "num+1"}).values_list(
"1", named=True
).first()
def test_named_values_list_with_fields(self):
qs = Number.objects.extra(select={"num2": "num+1"}).annotate(Count("id"))
values = qs.values_list("num", "num2", named=True).first()
self.assertEqual(type(values).__name__, "Row")
self.assertEqual(values._fields, ("num", "num2"))
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
def test_named_values_list_without_fields(self):
qs = Number.objects.extra(select={"num2": "num+1"}).annotate(Count("id"))
values = qs.values_list(named=True).first()
self.assertEqual(type(values).__name__, "Row")
self.assertEqual(
values._fields,
("num2", "id", "num", "other_num", "another_num", "id__count"),
)
self.assertEqual(values.num, 72)
self.assertEqual(values.num2, 73)
self.assertEqual(values.id__count, 1)
def test_named_values_list_expression_with_default_alias(self):
expr = Count("id")
values = (
Number.objects.annotate(id__count1=expr)
.values_list(expr, "id__count1", named=True)
.first()
)
self.assertEqual(values._fields, ("id__count2", "id__count1"))
def test_named_values_list_expression(self):
expr = F("num") + 1
qs = Number.objects.annotate(combinedexpression1=expr).values_list(
expr, "combinedexpression1", named=True
)
values = qs.first()
self.assertEqual(values._fields, ("combinedexpression2", "combinedexpression1"))
def test_named_values_pickle(self):
value = Number.objects.values_list("num", "other_num", named=True).get()
self.assertEqual(value, (72, None))
self.assertEqual(pickle.loads(pickle.dumps(value)), value)
class QuerySetSupportsPythonIdioms(TestCase):
@classmethod
def setUpTestData(cls):
some_date = datetime.datetime(2014, 5, 16, 12, 1)
cls.articles = [
Article.objects.create(name=f"Article {i}", created=some_date)
for i in range(1, 8)
]
def get_ordered_articles(self):
return Article.objects.order_by("name")
def test_can_get_items_using_index_and_slice_notation(self):
self.assertEqual(self.get_ordered_articles()[0].name, "Article 1")
self.assertSequenceEqual(
self.get_ordered_articles()[1:3],
[self.articles[1], self.articles[2]],
)
def test_slicing_with_steps_can_be_used(self):
self.assertSequenceEqual(
self.get_ordered_articles()[::2],
[
self.articles[0],
self.articles[2],
self.articles[4],
self.articles[6],
],
)
def test_slicing_without_step_is_lazy(self):
with self.assertNumQueries(0):
self.get_ordered_articles()[0:5]
def test_slicing_with_tests_is_not_lazy(self):
with self.assertNumQueries(1):
self.get_ordered_articles()[0:5:3]
def test_slicing_can_slice_again_after_slicing(self):
self.assertSequenceEqual(
self.get_ordered_articles()[0:5][0:2],
[self.articles[0], self.articles[1]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[0:5][4:], [self.articles[4]]
)
self.assertSequenceEqual(self.get_ordered_articles()[0:5][5:], [])
# Some more tests!
self.assertSequenceEqual(
self.get_ordered_articles()[2:][0:2],
[self.articles[2], self.articles[3]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[2:][:2],
[self.articles[2], self.articles[3]],
)
self.assertSequenceEqual(
self.get_ordered_articles()[2:][2:3], [self.articles[4]]
)
# Using an offset without a limit is also possible.
self.assertSequenceEqual(
self.get_ordered_articles()[5:],
[self.articles[5], self.articles[6]],
)
def test_slicing_cannot_filter_queryset_once_sliced(self):
msg = "Cannot filter a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].filter(id=1)
def test_slicing_cannot_reorder_queryset_once_sliced(self):
msg = "Cannot reorder a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].order_by("id")
def test_slicing_cannot_combine_queries_once_sliced(self):
msg = "Cannot combine queries once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:1] & Article.objects.all()[4:5]
def test_slicing_negative_indexing_not_supported_for_single_element(self):
"""hint: inverting your ordering might do what you need"""
msg = "Negative indexing is not supported."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[-1]
def test_slicing_negative_indexing_not_supported_for_range(self):
"""hint: inverting your ordering might do what you need"""
msg = "Negative indexing is not supported."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[0:-5]
with self.assertRaisesMessage(ValueError, msg):
Article.objects.all()[-1:]
def test_invalid_index(self):
msg = "QuerySet indices must be integers or slices, not str."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()["foo"]
def test_can_get_number_of_items_in_queryset_using_standard_len(self):
self.assertEqual(len(Article.objects.filter(name__exact="Article 1")), 1)
def test_can_combine_queries_using_and_and_or_operators(self):
s1 = Article.objects.filter(name__exact="Article 1")
s2 = Article.objects.filter(name__exact="Article 2")
self.assertSequenceEqual(
(s1 | s2).order_by("name"),
[self.articles[0], self.articles[1]],
)
self.assertSequenceEqual(s1 & s2, [])
class WeirdQuerysetSlicingTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name="one", created=datetime.datetime.now())
Article.objects.create(name="two", created=datetime.datetime.now())
Article.objects.create(name="three", created=datetime.datetime.now())
Article.objects.create(name="four", created=datetime.datetime.now())
food = Food.objects.create(name="spam")
Eaten.objects.create(meal="spam with eggs", food=food)
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
msg = "Cannot change a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[:0].latest("created")
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
def test_empty_sliced_subquery(self):
self.assertEqual(
Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0
)
def test_empty_sliced_subquery_exclude(self):
self.assertEqual(
Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1
)
def test_zero_length_values_slicing(self):
n = 42
with self.assertNumQueries(0):
self.assertQuerysetEqual(Article.objects.values()[n:n], [])
self.assertQuerysetEqual(Article.objects.values_list()[n:n], [])
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
r_a = ReservedName.objects.create(name="a", order=42)
r_b = ReservedName.objects.create(name="b", order=37)
self.assertSequenceEqual(
ReservedName.objects.order_by("order"),
[r_b, r_a],
)
self.assertSequenceEqual(
ReservedName.objects.extra(
select={"stuff": "name"}, order_by=("order", "stuff")
),
[r_b, r_a],
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_in_subquery(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name="apple"))),
{lunch},
)
self.assertEqual(
set(
Eaten.objects.filter(
food__in=Food.objects.filter(name="apple").values("eaten__meal")
)
),
set(),
)
self.assertEqual(
set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal="lunch"))),
{apple},
)
def test_nested_in_subquery(self):
extra = ExtraInfo.objects.create()
author = Author.objects.create(num=42, extra=extra)
report = Report.objects.create(creator=author)
comment = ReportComment.objects.create(report=report)
comments = ReportComment.objects.filter(
report__in=Report.objects.filter(
creator__in=extra.author_set.all(),
),
)
self.assertSequenceEqual(comments, [comment])
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])), {apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(set(Eaten.objects.filter(food=apple)), {lunch, dinner})
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(set(Food.objects.filter(eaten=lunch)), {apple})
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(list(Node.objects.filter(parent=node1)), [node2])
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(list(Node.objects.filter(node=node2)), [node1])
class IsNullTests(TestCase):
def test_primary_key(self):
custom = CustomPk.objects.create(name="pk")
null = Related.objects.create()
notnull = Related.objects.create(custom=custom)
self.assertSequenceEqual(
Related.objects.filter(custom__isnull=False), [notnull]
)
self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null])
def test_to_field(self):
apple = Food.objects.create(name="apple")
e1 = Eaten.objects.create(food=apple, meal="lunch")
e2 = Eaten.objects.create(meal="lunch")
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=False),
[e1],
)
self.assertSequenceEqual(
Eaten.objects.filter(food__isnull=True),
[e2],
)
class ConditionalTests(TestCase):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
@classmethod
def setUpTestData(cls):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name="t1", category=generic)
Tag.objects.create(name="t2", parent=t1, category=generic)
t3 = Tag.objects.create(name="t3", parent=t1)
Tag.objects.create(name="t4", parent=t3)
Tag.objects.create(name="t5", parent=t3)
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
with self.assertRaisesMessage(FieldError, "Infinite loop caused by ordering."):
list(LoopX.objects.all()) # Force queryset evaluation with list()
with self.assertRaisesMessage(FieldError, "Infinite loop caused by ordering."):
list(LoopZ.objects.all()) # Force queryset evaluation with list()
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by("parent")), 5)
# ... but you can still order in a non-recursive fashion among linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(LoopX.objects.order_by("y__x__y__x__id"), [])
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature("requires_explicit_null_ordering_when_grouping")
def test_null_ordering_added(self):
query = Tag.objects.values_list("parent_id", flat=True).order_by().query
query.group_by = ["parent_id"]
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
def test_in_list_limit(self):
# The "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = list(range(2050))
max_query_params = connection.features.max_query_params
if max_query_params is None or max_query_params >= len(numbers):
Number.objects.bulk_create(Number(num=num) for num in numbers)
for number in [1000, 1001, 2000, len(numbers)]:
with self.subTest(number=number):
self.assertEqual(
Number.objects.filter(num__in=numbers[:number]).count(), number
)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
@classmethod
def setUpTestData(cls):
objectas = []
objectbs = []
objectcs = []
a_info = ["one", "two", "three"]
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [
("un", 1, objectas[0]),
("deux", 2, objectas[0]),
("trois", 3, objectas[2]),
]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [("ein", objectas[2], objectbs[2]), ("zwei", objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name="two")
Q2 = Q(objectb__name="deux")
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name="two")
Q2 = Q(objectb__name="deux", objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name="deux")
Q2 = Q(objectc__objectb__name="deux")
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name="deux")
Q2 = Q(objecta__objectc__name="ein")
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name="deux")
Q2 = Q(objecta__objectc__objectb__name="trois")
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name="one", objectc__objecta__name="two")
Q2 = Q(
objecta__objectc__name="ein",
objectc__objecta__name="three",
objecta__objectb__name="trois",
)
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
"""
Can create an instance of a model with only the PK field (#17056)."
"""
DumbCategory.objects.create()
class ExcludeTests(TestCase):
@classmethod
def setUpTestData(cls):
f1 = Food.objects.create(name="apples")
cls.f2 = Food.objects.create(name="oranges")
Eaten.objects.create(food=f1, meal="dinner")
cls.j1 = Job.objects.create(name="Manager")
cls.r1 = Responsibility.objects.create(description="Playing golf")
cls.j2 = Job.objects.create(name="Programmer")
cls.r2 = Responsibility.objects.create(description="Programming")
JobResponsibilities.objects.create(job=cls.j1, responsibility=cls.r1)
JobResponsibilities.objects.create(job=cls.j2, responsibility=cls.r2)
def test_to_field(self):
self.assertSequenceEqual(
Food.objects.exclude(eaten__meal="dinner"),
[self.f2],
)
self.assertSequenceEqual(
Job.objects.exclude(responsibilities__description="Playing golf"),
[self.j2],
)
self.assertSequenceEqual(
Responsibility.objects.exclude(jobs__name="Manager"),
[self.r2],
)
def test_ticket14511(self):
alex = Person.objects.get_or_create(name="Alex")[0]
jane = Person.objects.get_or_create(name="Jane")[0]
oracle = Company.objects.get_or_create(name="Oracle")[0]
google = Company.objects.get_or_create(name="Google")[0]
microsoft = Company.objects.get_or_create(name="Microsoft")[0]
intel = Company.objects.get_or_create(name="Intel")[0]
def employ(employer, employee, title):
Employment.objects.get_or_create(
employee=employee, employer=employer, title=title
)
employ(oracle, alex, "Engineer")
employ(oracle, alex, "Developer")
employ(google, alex, "Engineer")
employ(google, alex, "Manager")
employ(microsoft, alex, "Manager")
employ(intel, alex, "Manager")
employ(microsoft, jane, "Developer")
employ(intel, jane, "Manager")
alex_tech_employers = (
alex.employers.filter(employment__title__in=("Engineer", "Developer"))
.distinct()
.order_by("name")
)
self.assertSequenceEqual(alex_tech_employers, [google, oracle])
alex_nontech_employers = (
alex.employers.exclude(employment__title__in=("Engineer", "Developer"))
.distinct()
.order_by("name")
)
self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft])
def test_exclude_reverse_fk_field_ref(self):
tag = Tag.objects.create()
Note.objects.create(tag=tag, note="note")
annotation = Annotation.objects.create(name="annotation", tag=tag)
self.assertEqual(
Annotation.objects.exclude(tag__note__note=F("name")).get(), annotation
)
def test_exclude_with_circular_fk_relation(self):
self.assertEqual(
ObjectB.objects.exclude(objecta__objectb__name=F("name")).count(), 0
)
def test_subquery_exclude_outerref(self):
qs = JobResponsibilities.objects.filter(
Exists(Responsibility.objects.exclude(jobs=OuterRef("job"))),
)
self.assertTrue(qs.exists())
self.r1.delete()
self.assertFalse(qs.exists())
def test_exclude_nullable_fields(self):
number = Number.objects.create(num=1, other_num=1)
Number.objects.create(num=2, other_num=2, another_num=2)
self.assertSequenceEqual(
Number.objects.exclude(other_num=F("another_num")),
[number],
)
self.assertSequenceEqual(
Number.objects.exclude(num=F("another_num")),
[number],
)
def test_exclude_multivalued_exists(self):
with CaptureQueriesContext(connection) as captured_queries:
self.assertSequenceEqual(
Job.objects.exclude(responsibilities__description="Programming"),
[self.j1],
)
self.assertIn("exists", captured_queries[0]["sql"].lower())
def test_exclude_subquery(self):
subquery = JobResponsibilities.objects.filter(
responsibility__description="bar",
) | JobResponsibilities.objects.exclude(
job__responsibilities__description="foo",
)
self.assertCountEqual(
Job.objects.annotate(
responsibility=subquery.filter(job=OuterRef("name"),).values(
"id"
)[:1]
),
[self.j1, self.j2],
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_exclude_unsaved_o2o_object(self):
jack = Staff.objects.create(name="jack")
jack_staff = StaffUser.objects.create(staff=jack)
unsaved_object = Staff(name="jane")
self.assertIsNone(unsaved_object.pk)
self.assertSequenceEqual(
StaffUser.objects.exclude(staff=unsaved_object), [jack_staff]
)
def test_exclude_unsaved_object(self):
# These tests will catch ValueError in Django 5.0 when passing unsaved
# model instances to related filters becomes forbidden.
# msg = "Model instances passed to related filters must be saved."
company = Company.objects.create(name="Django")
msg = "Passing unsaved model instances to related filters is deprecated."
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.exclude(employer=Company(name="unsaved"))
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
Employment.objects.exclude(employer__in=[company, Company(name="unsaved")])
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
StaffUser.objects.exclude(staff=Staff(name="unsaved"))
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
@classmethod
def setUpTestData(cls):
# Create a few Orders.
cls.o1 = Order.objects.create(pk=1)
cls.o2 = Order.objects.create(pk=2)
cls.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1)
cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1)
cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2)
cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2)
cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3)
cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(items__status=1),
[self.o3],
)
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(items__status=1).distinct(),
[self.o3],
)
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
[self.o3],
)
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertSequenceEqual(
Order.objects.exclude(Q(items__status=1)),
[self.o3],
)
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()),
)
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()),
)
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
[self.o1],
)
class Exclude15786(TestCase):
"""Regression test for #15786"""
def test_ticket15786(self):
c1 = SimpleCategory.objects.create(name="c1")
c2 = SimpleCategory.objects.create(name="c2")
OneToOneCategory.objects.create(category=c1)
OneToOneCategory.objects.create(category=c2)
rel = CategoryRelationship.objects.create(first=c1, second=c2)
self.assertEqual(
CategoryRelationship.objects.exclude(
first__onetoonecategory=F("second__onetoonecategory")
).get(),
rel,
)
class NullInExcludeTest(TestCase):
@classmethod
def setUpTestData(cls):
NullableName.objects.create(name="i1")
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = "" if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
["i1", none_val],
attrgetter("name"),
)
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=["i1"]),
[none_val],
attrgetter("name"),
)
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=["i3"]),
["i1", none_val],
attrgetter("name"),
)
inner_qs = NullableName.objects.filter(name="i1").values_list("name")
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val],
attrgetter("name"),
)
# The inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]), ["i1"], attrgetter("name")
)
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name="i1"))),
list(NullableName.objects.filter(Q(name="i1"))),
)
self.assertNotIn(
"IS NOT NULL", str(NullableName.objects.filter(~~Q(name="i1")).query)
)
class EmptyStringsAsNullTest(TestCase):
"""
Filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
@classmethod
def setUpTestData(cls):
cls.nc = NamedCategory.objects.create(name="")
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=["nonexistent"]),
[self.nc.pk],
attrgetter("pk"),
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=["nonexistent"]),
[self.nc.pk],
attrgetter("pk"),
)
def test_21001(self):
foo = NamedCategory.objects.create(name="foo")
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name=""), [foo.pk], attrgetter("pk")
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(SimpleTestCase):
class DummyNode:
def as_sql(self, compiler, connection):
return "dummy", []
class MockCompiler:
def compile(self, node):
return node.as_sql(self, connection)
def __call__(self, name):
return connection.ops.quote_name(name)
def test_empty_full_handling_conjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(compiler, connection), ("(dummy AND dummy)", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy AND dummy)", []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
def test_empty_full_handling_disjunction(self):
compiler = WhereNodeTest.MockCompiler()
w = WhereNode(children=[NothingNode()], connector="OR")
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector="OR")
self.assertEqual(w.as_sql(compiler, connection), ("(dummy OR dummy)", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy OR dummy)", []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector="OR")
self.assertEqual(w.as_sql(compiler, connection), ("dummy", []))
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy)", []))
def test_empty_nodes(self):
compiler = WhereNodeTest.MockCompiler()
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w.negate()
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.connector = "OR"
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
w.negate()
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[empty_w, NothingNode()], connector="OR")
self.assertEqual(w.as_sql(compiler, connection), ("", []))
w = WhereNode(children=[empty_w, NothingNode()], connector="AND")
with self.assertRaises(EmptyResultSet):
w.as_sql(compiler, connection)
class QuerySetExceptionTests(SimpleTestCase):
def test_iter_exceptions(self):
qs = ExtraInfo.objects.only("author")
msg = "'ManyToOneRel' object has no attribute 'attname'"
with self.assertRaisesMessage(AttributeError, msg):
list(qs)
def test_invalid_order_by(self):
msg = "Cannot resolve keyword '*' into field. Choices are: created, id, name"
with self.assertRaisesMessage(FieldError, msg):
Article.objects.order_by("*")
def test_invalid_order_by_raw_column_alias(self):
msg = (
"Cannot resolve keyword 'queries_author.name' into field. Choices "
"are: cover, created, creator, creator_id, id, modified, name, "
"note, note_id, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Item.objects.values("creator__name").order_by("queries_author.name")
def test_invalid_queryset_model(self):
msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".'
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.filter(extra=Article.objects.all()))
class NullJoinPromotionOrTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.d1 = ModelD.objects.create(name="foo")
d2 = ModelD.objects.create(name="bar")
cls.a1 = ModelA.objects.create(name="a1", d=cls.d1)
c = ModelC.objects.create(name="c")
b = ModelB.objects.create(name="b", c=c)
cls.a2 = ModelA.objects.create(name="a2", b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = Q(d__name="foo") | Q(b__name="foo") | Q(b__c__name="foo")
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count("INNER JOIN"), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("LEFT OUTER"), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(list(qs), [self.a2])
def test_null_join_demotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True))
self.assertIn(" INNER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False))
self.assertIn(" INNER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False))
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_ticket_21366(self):
n = Note.objects.create(note="n", misc="m")
e = ExtraInfo.objects.create(info="info", note=n)
a = Author.objects.create(name="Author1", num=1, extra=e)
Ranking.objects.create(rank=1, author=a)
r1 = Report.objects.create(name="Foo", creator=a)
r2 = Report.objects.create(name="Bar")
Report.objects.create(name="Bar", creator=a)
qs = Report.objects.filter(
Q(creator__ranking__isnull=True) | Q(creator__ranking__rank=1, name="Foo")
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count(" JOIN "), 2)
self.assertSequenceEqual(qs.order_by("name"), [r2, r1])
def test_ticket_21748(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
i3 = Identifier.objects.create(name="i3")
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertSequenceEqual(
Identifier.objects.filter(program=None, channel=None), [i3]
)
self.assertSequenceEqual(
Identifier.objects.exclude(program=None, channel=None).order_by("name"),
[i1, i2],
)
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(
~Q(program__id=p1.id, channel__id=c1.id)
).order_by("pk")
qs1_filter = Identifier.objects.filter(
program__id=p1.id, channel__id=c1.id
).order_by("pk")
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(
str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN")
)
self.assertEqual(2, str(qs1_doubleneg.query).count("INNER JOIN"))
self.assertEqual(
str(qs1_filter.query).count("INNER JOIN"),
str(qs1_doubleneg.query).count("INNER JOIN"),
)
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneg. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)
).order_by("pk")
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id))
).order_by("pk")
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(
str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN")
)
self.assertEqual(1, str(qs1_doubleneg.query).count("INNER JOIN"))
self.assertEqual(
str(qs1_filter.query).count("INNER JOIN"),
str(qs1_doubleneg.query).count("INNER JOIN"),
)
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name="i1")
i2 = Identifier.objects.create(name="i2")
Identifier.objects.create(name="i3")
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id))
).order_by("pk")
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id))
).order_by("pk")
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count("JOIN"), str(qs2.query).count("JOIN"))
self.assertEqual(0, str(qs1.query).count("INNER JOIN"))
self.assertEqual(
str(qs1.query).count("INNER JOIN"), str(qs2.query).count("INNER JOIN")
)
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# We don't accidentally trim reverse joins - we can't know if there is
# anything on the other side of the join, so trimming reverse joins
# can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn("INNER JOIN", str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
The queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name="foo").filter(tag__name="bar")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name="foo").select_related("tag")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name="foo").annotate(cnt=Count("tag__name"))
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name="foo") | Q(tag__name="bar"))
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name="foo").order_by("tag__name")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name="foo").filter(member__name="foo")
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name="r4").filter(report__name="r1")
self.assertEqual(str(qs.query).count("JOIN"), 2)
def test_inverted_q_across_relations(self):
"""
When a trimmable join is specified in the query (here school__), the
ORM detects it and removes unnecessary joins. The set of reusable joins
are updated after trimming the query so that other lookups don't
consider that the outer query's filters are in effect for the subquery
(#26551).
"""
springfield_elementary = School.objects.create()
hogward = School.objects.create()
Student.objects.create(school=springfield_elementary)
hp = Student.objects.create(school=hogward)
Classroom.objects.create(school=hogward, name="Potion")
Classroom.objects.create(school=springfield_elementary, name="Main")
qs = Student.objects.filter(
~(
Q(school__classroom__name="Main")
& Q(school__classroom__has_blackboard=None)
)
)
self.assertSequenceEqual(qs, [hp])
class DisjunctionPromotionTests(TestCase):
def test_disjunction_promotion_select_related(self):
fk1 = FK1.objects.create(f1="f1", f2="f2")
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(" JOIN "), 0)
qs = qs.select_related("a", "b")
self.assertEqual(str(qs.query).count(" INNER JOIN "), 0)
self.assertEqual(str(qs.query).count(" LEFT OUTER JOIN "), 2)
with self.assertNumQueries(1):
self.assertSequenceEqual(qs, [basea])
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(Q(b__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1="foo") | Q(c__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1="foo") | Q(c__f2="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2="bar")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1="foo") | Q(b__f2="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion3_demote(self):
# This one needs demotion logic: the first filter causes a to be
# outer joined, the second filter makes it inner join again.
qs = BaseA.objects.filter(Q(a__f1="foo") | Q(b__f2="foo")).filter(a__f2="bar")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion4_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
# Demote needed for the "a" join. It is marked as outer join by
# above filter (even if it is trimmed away).
qs = qs.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1="foo")
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion5_demote(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = qs.filter(Q(a__f1="foo") | Q(b__f1="foo"))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
# But b__f1 does.
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
qs = BaseA.objects.filter(Q(a__f1="foo") | Q(b__f1="foo"))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") & Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") & Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("INNER JOIN"), 2)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count("JOIN"), 0)
qs = BaseA.objects.filter(Q(a__f1="foo") | (Q(b__f1="foo") & Q(a__f1="bar")))
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
qs = BaseA.objects.filter(
(Q(a__f1="foo") | Q(b__f1="foo")) & (Q(a__f1="bar") | Q(c__f1="foo"))
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
self.assertEqual(str(qs.query).count("INNER JOIN"), 0)
qs = BaseA.objects.filter(
Q(a__f1="foo") | Q(a__f1="bar") & (Q(b__f1="bar") | Q(c__f1="foo"))
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F("b__f1")) | Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(qs.query).count("INNER JOIN"), 1)
qs = BaseA.objects.filter(Q(a__f1=F("c__f1")) | Q(b__f1="foo"))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
qs = BaseA.objects.filter(
Q(a__f1=F("b__f1")) | Q(a__f2=F("b__f2")) | Q(c__f1="foo")
)
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3)
qs = BaseA.objects.filter(Q(a__f1=F("c__f1")) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2)
self.assertEqual(str(qs.query).count("INNER JOIN"), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
i_extra = Identifier.objects.create(name="extra")
i_program = Identifier.objects.create(name="program")
program = Program.objects.create(identifier=i_program)
i_channel = Identifier.objects.create(name="channel")
channel = Channel.objects.create(identifier=i_channel)
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=channel).order_by("name"),
[i_channel, i_extra],
)
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=None).order_by("name"),
[i_program],
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text="pg3")
pg2 = Page.objects.create(text="pg2")
pg1 = Page.objects.create(text="pg1")
pa1 = Paragraph.objects.create(text="pa1")
pa1.page.set([pg1, pg2])
pa2 = Paragraph.objects.create(text="pa2")
pa2.page.set([pg2, pg3])
pa3 = Paragraph.objects.create(text="pa3")
ch1 = Chapter.objects.create(title="ch1", paragraph=pa1)
ch2 = Chapter.objects.create(title="ch2", paragraph=pa2)
ch3 = Chapter.objects.create(title="ch3", paragraph=pa3)
b1 = Book.objects.create(title="b1", chapter=ch1)
b2 = Book.objects.create(title="b2", chapter=ch2)
b3 = Book.objects.create(title="b3", chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text="pg1")
self.assertNotIn("IS NOT NULL", str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data="foo")
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data="bar", parent=my1)
parents = MyObject.objects.filter(parent=F("id"))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F("id"))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name="foo")
a1 = Annotation.objects.create(tag=t, name="a1")
a2 = Annotation.objects.create(tag=t, name="a2")
a3 = Annotation.objects.create(tag=t, name="a3")
n = Note.objects.create(note="foo", misc="bar")
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertIn(n, qs1)
self.assertNotIn(n, qs2)
self.assertIn(n, (qs1 | qs2))
class EmptyStringPromotionTests(SimpleTestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name="")
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn("LEFT OUTER JOIN", str(qs.query))
else:
self.assertNotIn("LEFT OUTER JOIN", str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# If a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertSequenceEqual(
Order.objects.filter(items__in=OrderItem.objects.values_list("status")),
[o1],
)
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data="foo")
lfa2 = LeafA.objects.create(data="bar")
lfb1 = LeafB.objects.create(data="lfb1")
lfb2 = LeafB.objects.create(data="lfb2")
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data="foo").values_list("pk", flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list("b__id", flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertSequenceEqual(qs, [lfb1])
class Ticket18785Tests(SimpleTestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = (
Item.objects.exclude(note__isnull=False)
.filter(name="something", creator__extra__isnull=True)
.order_by()
)
self.assertEqual(1, str(qs.query).count("INNER JOIN"))
self.assertEqual(0, str(qs.query).count("OUTER JOIN"))
class Ticket20788Tests(TestCase):
def test_ticket_20788(self):
Paragraph.objects.create()
paragraph = Paragraph.objects.create()
page = paragraph.page.create()
chapter = Chapter.objects.create(paragraph=paragraph)
Book.objects.create(chapter=chapter)
paragraph2 = Paragraph.objects.create()
Page.objects.create()
chapter2 = Chapter.objects.create(paragraph=paragraph2)
book2 = Book.objects.create(chapter=chapter2)
sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page)
self.assertSequenceEqual(sentences_not_in_pub, [book2])
class Ticket12807Tests(TestCase):
def test_ticket_12807(self):
p1 = Paragraph.objects.create()
p2 = Paragraph.objects.create()
# The ORed condition below should have no effect on the query - the
# ~Q(pk__in=[]) will always be True.
qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk))
self.assertSequenceEqual(qs, [p1])
class RelatedLookupTypeTests(TestCase):
error = 'Cannot query "%s": Must be "%s" instance.'
@classmethod
def setUpTestData(cls):
cls.oa = ObjectA.objects.create(name="oa")
cls.poa = ProxyObjectA.objects.get(name="oa")
cls.coa = ChildObjectA.objects.create(name="coa")
cls.wrong_type = Order.objects.create(id=cls.oa.pk)
cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1)
cls.pob1 = ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2)
cls.pob = ProxyObjectB.objects.all()
cls.c = ObjectC.objects.create(childobjecta=cls.coa)
def test_wrong_type_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup.
"""
# Passing incorrect object type
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.get(objecta=self.wrong_type)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta__in=[self.wrong_type])
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta=self.wrong_type)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)
):
ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob])
# Passing an object of the class on which query is done.
with self.assertRaisesMessage(
ValueError, self.error % (self.ob, ObjectA._meta.object_name)
):
ObjectB.objects.filter(objecta__in=[self.poa, self.ob])
with self.assertRaisesMessage(
ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)
):
ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob])
def test_wrong_backward_lookup(self):
"""
A ValueError is raised when the incorrect object type is passed to a
query lookup for backward relations.
"""
with self.assertRaisesMessage(
ValueError, self.error % (self.oa, ObjectB._meta.object_name)
):
ObjectA.objects.filter(objectb__in=[self.oa, self.ob])
with self.assertRaisesMessage(
ValueError, self.error % (self.oa, ObjectB._meta.object_name)
):
ObjectA.objects.exclude(objectb=self.oa)
with self.assertRaisesMessage(
ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)
):
ObjectA.objects.get(objectb=self.wrong_type)
def test_correct_lookup(self):
"""
When passing proxy model objects, child objects, or parent objects,
lookups work fine.
"""
out_a = [self.oa]
out_b = [self.ob, self.pob1]
out_c = [self.c]
# proxy model objects
self.assertSequenceEqual(
ObjectB.objects.filter(objecta=self.poa).order_by("name"), out_b
)
self.assertSequenceEqual(
ObjectA.objects.filter(objectb__in=self.pob).order_by("pk"), out_a * 2
)
# child objects
self.assertSequenceEqual(ObjectB.objects.filter(objecta__in=[self.coa]), [])
self.assertSequenceEqual(
ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by("name"),
out_b,
)
self.assertSequenceEqual(
ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by(
"name"
),
out_b,
)
# parent objects
self.assertSequenceEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c)
# QuerySet related object type checking shouldn't issue queries
# (the querysets aren't evaluated here, hence zero queries) (#23266).
with self.assertNumQueries(0):
ObjectB.objects.filter(objecta__in=ObjectA.objects.all())
def test_values_queryset_lookup(self):
"""
ValueQuerySets are not checked for compatibility with the lookup field.
"""
# Make sure the num and objecta field values match.
ob = ObjectB.objects.get(name="ob")
ob.num = ob.objecta.pk
ob.save()
pob = ObjectB.objects.get(name="pob")
pob.num = pob.objecta.pk
pob.save()
self.assertSequenceEqual(
ObjectB.objects.filter(
objecta__in=ObjectB.objects.values_list("num")
).order_by("pk"),
[ob, pob],
)
class Ticket14056Tests(TestCase):
def test_ticket_14056(self):
s1 = SharedConnection.objects.create(data="s1")
s2 = SharedConnection.objects.create(data="s2")
s3 = SharedConnection.objects.create(data="s3")
PointerA.objects.create(connection=s2)
expected_ordering = (
[s1, s3, s2] if connection.features.nulls_order_largest else [s2, s1, s3]
)
self.assertSequenceEqual(
SharedConnection.objects.order_by("-pointera__connection", "pk"),
expected_ordering,
)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name="jackstaff")
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name="jillstaff")
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
qs = Task.objects.select_related(
"creator__staffuser__staff", "owner__staffuser__staff"
)
self.assertEqual(str(qs.query).count(" JOIN "), 6)
task_select_related = qs.get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(
task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff,
)
self.assertEqual(
task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff,
)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related("parent").defer("parent__created")
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].parent.parent_bool, True)
class ValuesJoinPromotionTests(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(" INNER JOIN ", str(qs.query))
qs = qs.values("parent__parent__id")
self.assertIn(" INNER JOIN ", str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values("parent__parent__id")
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values("objecta__name")
self.assertIn(" INNER JOIN ", str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(" LEFT OUTER JOIN %s" % tblname, str(qs.query))
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name="sc1", name="sc1")
sc2 = SpecialCategory.objects.create(special_name="sc2", name="sc2")
sc3 = SpecialCategory.objects.create(special_name="sc3", name="sc3")
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertSequenceEqual(
SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by("name"),
[sc2, sc3],
)
self.assertSequenceEqual(
SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1]
)
class ReverseM2MCustomPkTests(TestCase):
def test_ticket_21879(self):
cpt1 = CustomPkTag.objects.create(id="cpt1", tag="cpt1")
cp1 = CustomPk.objects.create(name="cp1", extra="extra")
cp1.custompktag_set.add(cpt1)
self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1])
self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1])
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F("school")))
self.assertSequenceEqual(queryset, [st2])
class Ticket23605Tests(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
# See also #24090.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True, modelc_fk=c1, modela_fk=a1
)
complex_q = Q(
pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000
/ F("ticket23605b__modelc_fk__field_c0")
)
&
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True)
& ~Q(
ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True)
& Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
)
)
).filter(ticket23605b__field_b1=True)
)
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertSequenceEqual(qs1, [a1])
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertSequenceEqual(qs2, [a2])
class TestTicket24279(TestCase):
def test_ticket_24278(self):
School.objects.create()
qs = School.objects.filter(Q(pk__in=()) | Q())
self.assertQuerysetEqual(qs, [])
class TestInvalidValuesRelation(SimpleTestCase):
def test_invalid_values(self):
msg = "Field 'id' expected a number but got 'abc'."
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag="abc")
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag__in=[123, "abc"])
class TestTicket24605(TestCase):
def test_ticket_24605(self):
"""
Subquery table names should be quoted.
"""
i1 = Individual.objects.create(alive=True)
RelatedIndividual.objects.create(related=i1)
i2 = Individual.objects.create(alive=False)
RelatedIndividual.objects.create(related=i2)
i3 = Individual.objects.create(alive=True)
i4 = Individual.objects.create(alive=False)
self.assertSequenceEqual(
Individual.objects.filter(
Q(alive=False), Q(related_individual__isnull=True)
),
[i4],
)
self.assertSequenceEqual(
Individual.objects.exclude(
Q(alive=False), Q(related_individual__isnull=True)
).order_by("pk"),
[i1, i2, i3],
)
class Ticket23622Tests(TestCase):
@skipUnlessDBFeature("can_distinct_on_fields")
def test_ticket_23622(self):
"""
Make sure __pk__in and __in work the same for related fields when
using a distinct on subquery.
"""
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=0.0)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=123,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=23,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=234,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=12,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=567,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=76,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=7,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=56,
field_b1=True,
modelc_fk=c1,
)
qx = Q(
ticket23605b__pk__in=Ticket23605B.objects.order_by(
"modela_fk", "-field_b1"
).distinct("modela_fk")
) & Q(ticket23605b__field_b0__gte=300)
qy = Q(
ticket23605b__in=Ticket23605B.objects.order_by(
"modela_fk", "-field_b1"
).distinct("modela_fk")
) & Q(ticket23605b__field_b0__gte=300)
self.assertEqual(
set(Ticket23605A.objects.filter(qx).values_list("pk", flat=True)),
set(Ticket23605A.objects.filter(qy).values_list("pk", flat=True)),
)
self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
|
054a73a4a2e81b7fc585f5591d89cc90a97edae86683f4db7575be86b38ca735 | import operator
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import Exists, F, IntegerField, OuterRef, Subquery, Value
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import Author, Celebrity, ExtraInfo, Number, ReservedName
@skipUnlessDBFeature("supports_select_union")
class QuerySetSetOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.bulk_create(Number(num=i, other_num=10 - i) for i in range(10))
def assertNumbersEqual(self, queryset, expected_numbers, ordered=True):
self.assertQuerysetEqual(
queryset, expected_numbers, operator.attrgetter("num"), ordered
)
def test_simple_union(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=8)
qs3 = Number.objects.filter(num=5)
self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False)
@skipUnlessDBFeature("supports_select_intersection")
def test_simple_intersection(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__gte=5)
qs3 = Number.objects.filter(num__gte=4, num__lte=6)
self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False)
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.intersection(qs1).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.intersection(qs1).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
@skipUnlessDBFeature("supports_select_difference")
def test_simple_difference(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__lte=4)
self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False)
def test_union_distinct(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2, all=True))), 20)
self.assertEqual(len(list(qs1.union(qs2))), 10)
def test_union_none(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=8)
qs3 = qs1.union(qs2)
self.assertSequenceEqual(qs3.none(), [])
self.assertNumbersEqual(qs3, [0, 1, 8, 9], ordered=False)
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.intersection(qs2)), 0)
self.assertEqual(len(qs1.intersection(qs3)), 0)
self.assertEqual(len(qs2.intersection(qs1)), 0)
self.assertEqual(len(qs3.intersection(qs1)), 0)
self.assertEqual(len(qs2.intersection(qs2)), 0)
self.assertEqual(len(qs3.intersection(qs3)), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_difference_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.difference(qs2)), 10)
self.assertEqual(len(qs1.difference(qs3)), 10)
self.assertEqual(len(qs2.difference(qs1)), 0)
self.assertEqual(len(qs3.difference(qs1)), 0)
self.assertEqual(len(qs2.difference(qs2)), 0)
self.assertEqual(len(qs3.difference(qs3)), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_difference_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
qs2 = ReservedName.objects.none()
reserved_name = qs1.difference(qs2).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.difference(qs2).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
def test_union_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.union(qs2)), 10)
self.assertEqual(len(qs2.union(qs1)), 10)
self.assertEqual(len(qs1.union(qs3)), 10)
self.assertEqual(len(qs3.union(qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20)
self.assertEqual(len(qs2.union(qs2)), 0)
self.assertEqual(len(qs3.union(qs3)), 0)
def test_empty_qs_union_with_ordered_qs(self):
qs1 = Number.objects.order_by("num")
qs2 = Number.objects.none().union(qs1).order_by("num")
self.assertEqual(list(qs1), list(qs2))
def test_limits(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2)[:2])), 2)
def test_ordering(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2, num__lte=3)
self.assertNumbersEqual(qs1.union(qs2).order_by("-num"), [3, 2, 1, 0])
def test_ordering_by_alias(self):
qs1 = Number.objects.filter(num__lte=1).values(alias=F("num"))
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("num"))
self.assertQuerysetEqual(
qs1.union(qs2).order_by("-alias"),
[3, 2, 1, 0],
operator.itemgetter("alias"),
)
def test_ordering_by_f_expression(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2, num__lte=3)
self.assertNumbersEqual(qs1.union(qs2).order_by(F("num").desc()), [3, 2, 1, 0])
def test_ordering_by_f_expression_and_alias(self):
qs1 = Number.objects.filter(num__lte=1).values(alias=F("other_num"))
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("other_num"))
self.assertQuerysetEqual(
qs1.union(qs2).order_by(F("alias").desc()),
[10, 9, 8, 7],
operator.itemgetter("alias"),
)
Number.objects.create(num=-1)
self.assertQuerysetEqual(
qs1.union(qs2).order_by(F("alias").desc(nulls_last=True)),
[10, 9, 8, 7, None],
operator.itemgetter("alias"),
)
def test_union_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.union(qs1).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.union(qs1).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
# List of columns can be changed.
reserved_name = qs1.union(qs1).values_list("order").get()
self.assertEqual(reserved_name, (2,))
def test_union_with_two_annotated_values_list(self):
qs1 = (
Number.objects.filter(num=1)
.annotate(
count=Value(0, IntegerField()),
)
.values_list("num", "count")
)
qs2 = (
Number.objects.filter(num=2)
.values("pk")
.annotate(
count=F("num"),
)
.annotate(
num=Value(1, IntegerField()),
)
.values_list("num", "count")
)
self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
def test_union_with_extra_and_values_list(self):
qs1 = (
Number.objects.filter(num=1)
.extra(
select={"count": 0},
)
.values_list("num", "count")
)
qs2 = Number.objects.filter(num=2).extra(select={"count": 1})
self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
def test_union_with_values_list_on_annotated_and_unannotated(self):
ReservedName.objects.create(name="rn1", order=1)
qs1 = Number.objects.annotate(
has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef("num")))
).filter(has_reserved_name=True)
qs2 = Number.objects.filter(num=9)
self.assertCountEqual(qs1.union(qs2).values_list("num", flat=True), [1, 9])
def test_union_with_values_list_and_order(self):
ReservedName.objects.bulk_create(
[
ReservedName(name="rn1", order=7),
ReservedName(name="rn2", order=5),
ReservedName(name="rn0", order=6),
ReservedName(name="rn9", order=-1),
]
)
qs1 = ReservedName.objects.filter(order__gte=6)
qs2 = ReservedName.objects.filter(order__lte=5)
union_qs = qs1.union(qs2)
for qs, expected_result in (
# Order by a single column.
(union_qs.order_by("-pk").values_list("order", flat=True), [-1, 6, 5, 7]),
(union_qs.order_by("pk").values_list("order", flat=True), [7, 5, 6, -1]),
(union_qs.values_list("order", flat=True).order_by("-pk"), [-1, 6, 5, 7]),
(union_qs.values_list("order", flat=True).order_by("pk"), [7, 5, 6, -1]),
# Order by multiple columns.
(
union_qs.order_by("-name", "pk").values_list("order", flat=True),
[-1, 5, 7, 6],
),
(
union_qs.values_list("order", flat=True).order_by("-name", "pk"),
[-1, 5, 7, 6],
),
):
with self.subTest(qs=qs):
self.assertEqual(list(qs), expected_result)
def test_union_with_values_list_and_order_on_annotation(self):
qs1 = Number.objects.annotate(
annotation=Value(-1),
multiplier=F("annotation"),
).filter(num__gte=6)
qs2 = Number.objects.annotate(
annotation=Value(2),
multiplier=F("annotation"),
).filter(num__lte=5)
self.assertSequenceEqual(
qs1.union(qs2).order_by("annotation", "num").values_list("num", flat=True),
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5],
)
self.assertQuerysetEqual(
qs1.union(qs2)
.order_by(
F("annotation") * F("multiplier"),
"num",
)
.values("num"),
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5],
operator.itemgetter("num"),
)
def test_union_multiple_models_with_values_list_and_order(self):
reserved_name = ReservedName.objects.create(name="rn1", order=0)
qs1 = Celebrity.objects.all()
qs2 = ReservedName.objects.all()
self.assertSequenceEqual(
qs1.union(qs2).order_by("name").values_list("pk", flat=True),
[reserved_name.pk],
)
def test_union_multiple_models_with_values_list_and_order_by_extra_select(self):
reserved_name = ReservedName.objects.create(name="rn1", order=0)
qs1 = Celebrity.objects.extra(select={"extra_name": "name"})
qs2 = ReservedName.objects.extra(select={"extra_name": "name"})
self.assertSequenceEqual(
qs1.union(qs2).order_by("extra_name").values_list("pk", flat=True),
[reserved_name.pk],
)
def test_union_in_subquery(self):
ReservedName.objects.bulk_create(
[
ReservedName(name="rn1", order=8),
ReservedName(name="rn2", order=1),
ReservedName(name="rn3", order=5),
]
)
qs1 = Number.objects.filter(num__gt=7, num=OuterRef("order"))
qs2 = Number.objects.filter(num__lt=2, num=OuterRef("order"))
self.assertCountEqual(
ReservedName.objects.annotate(
number=Subquery(qs1.union(qs2).values("num")),
)
.filter(number__isnull=False)
.values_list("order", flat=True),
[8, 1],
)
def test_union_in_subquery_related_outerref(self):
e1 = ExtraInfo.objects.create(value=7, info="e3")
e2 = ExtraInfo.objects.create(value=5, info="e2")
e3 = ExtraInfo.objects.create(value=1, info="e1")
Author.objects.bulk_create(
[
Author(name="a1", num=1, extra=e1),
Author(name="a2", num=3, extra=e2),
Author(name="a3", num=2, extra=e3),
]
)
qs1 = ExtraInfo.objects.order_by().filter(value=OuterRef("num"))
qs2 = ExtraInfo.objects.order_by().filter(value__lt=OuterRef("extra__value"))
qs = (
Author.objects.annotate(
info=Subquery(qs1.union(qs2).values("info")[:1]),
)
.filter(info__isnull=False)
.values_list("name", flat=True)
)
self.assertCountEqual(qs, ["a1", "a2"])
# Combined queries don't mutate.
self.assertCountEqual(qs, ["a1", "a2"])
def test_count_union(self):
qs1 = Number.objects.filter(num__lte=1).values("num")
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values("num")
self.assertEqual(qs1.union(qs2).count(), 4)
def test_count_union_empty_result(self):
qs = Number.objects.filter(pk__in=[])
self.assertEqual(qs.union(qs).count(), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_count_difference(self):
qs1 = Number.objects.filter(num__lt=10)
qs2 = Number.objects.filter(num__lt=9)
self.assertEqual(qs1.difference(qs2).count(), 1)
@skipUnlessDBFeature("supports_select_intersection")
def test_count_intersection(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__lte=5)
self.assertEqual(qs1.intersection(qs2).count(), 1)
def test_exists_union(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__lte=5)
with CaptureQueriesContext(connection) as context:
self.assertIs(qs1.union(qs2).exists(), True)
captured_queries = context.captured_queries
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(
connection.ops.quote_name(Number._meta.pk.column),
captured_sql,
)
self.assertEqual(
captured_sql.count(connection.ops.limit_offset_sql(None, 1)),
3 if connection.features.supports_slicing_ordering_in_compound else 1,
)
def test_exists_union_empty_result(self):
qs = Number.objects.filter(pk__in=[])
self.assertIs(qs.union(qs).exists(), False)
@skipUnlessDBFeature("supports_select_intersection")
def test_exists_intersection(self):
qs1 = Number.objects.filter(num__gt=5)
qs2 = Number.objects.filter(num__lt=5)
self.assertIs(qs1.intersection(qs1).exists(), True)
self.assertIs(qs1.intersection(qs2).exists(), False)
@skipUnlessDBFeature("supports_select_difference")
def test_exists_difference(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__gte=3)
self.assertIs(qs1.difference(qs2).exists(), False)
self.assertIs(qs2.difference(qs1).exists(), True)
def test_get_union(self):
qs = Number.objects.filter(num=2)
self.assertEqual(qs.union(qs).get().num, 2)
@skipUnlessDBFeature("supports_select_difference")
def test_get_difference(self):
qs1 = Number.objects.all()
qs2 = Number.objects.exclude(num=2)
self.assertEqual(qs1.difference(qs2).get().num, 2)
@skipUnlessDBFeature("supports_select_intersection")
def test_get_intersection(self):
qs1 = Number.objects.all()
qs2 = Number.objects.filter(num=2)
self.assertEqual(qs1.intersection(qs2).get().num, 2)
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_ordering_subqueries(self):
qs1 = Number.objects.order_by("num")[:2]
qs2 = Number.objects.order_by("-num")[:2]
self.assertNumbersEqual(qs1.union(qs2).order_by("-num")[:4], [9, 8, 1, 0])
@skipIfDBFeature("supports_slicing_ordering_in_compound")
def test_unsupported_ordering_slicing_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
qs3 = Number.objects.all()
msg = "LIMIT/OFFSET not allowed in subqueries of compound statements"
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2[:10]))
msg = "ORDER BY not allowed in subqueries of compound statements"
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.order_by("id").union(qs2))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("id").union(qs3))
@skipIfDBFeature("supports_select_intersection")
def test_unsupported_intersection_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
msg = "intersection is not supported on this database backend"
with self.assertRaisesMessage(NotSupportedError, msg):
list(qs1.intersection(qs2))
def test_combining_multiple_models(self):
ReservedName.objects.create(name="99 little bugs", order=99)
qs1 = Number.objects.filter(num=1).values_list("num", flat=True)
qs2 = ReservedName.objects.values_list("order")
self.assertEqual(list(qs1.union(qs2).order_by("num")), [1, 99])
def test_order_raises_on_non_selected_column(self):
qs1 = (
Number.objects.filter()
.annotate(
annotation=Value(1, IntegerField()),
)
.values("annotation", num2=F("num"))
)
qs2 = Number.objects.filter().values("id", "num")
# Should not raise
list(qs1.union(qs2).order_by("annotation"))
list(qs1.union(qs2).order_by("num2"))
msg = "ORDER BY term does not match any column in the result set"
# 'id' is not part of the select
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("id"))
# 'num' got realiased to num2
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("num"))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by(F("num")))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by(F("num").desc()))
# switched order, now 'exists' again:
list(qs2.union(qs1).order_by("num"))
@skipUnlessDBFeature("supports_select_difference", "supports_select_intersection")
def test_qs_with_subcompound_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.intersection(Number.objects.filter(num__gt=1))
self.assertEqual(qs1.difference(qs2).count(), 2)
def test_order_by_same_type(self):
qs = Number.objects.all()
union = qs.union(qs)
numbers = list(range(10))
self.assertNumbersEqual(union.order_by("num"), numbers)
self.assertNumbersEqual(union.order_by("other_num"), reversed(numbers))
def test_unsupported_operations_on_combined_qs(self):
qs = Number.objects.all()
msg = "Calling QuerySet.%s() after %s() is not supported."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
for combinator in combinators:
for operation in (
"alias",
"annotate",
"defer",
"delete",
"distinct",
"exclude",
"extra",
"filter",
"only",
"prefetch_related",
"select_related",
"update",
):
with self.subTest(combinator=combinator, operation=operation):
with self.assertRaisesMessage(
NotSupportedError,
msg % (operation, combinator),
):
getattr(getattr(qs, combinator)(qs), operation)()
with self.assertRaisesMessage(
NotSupportedError,
msg % ("contains", combinator),
):
obj = Number.objects.first()
getattr(qs, combinator)(qs).contains(obj)
def test_get_with_filters_unsupported_on_combined_qs(self):
qs = Number.objects.all()
msg = "Calling QuerySet.get(...) with filters after %s() is not supported."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
for combinator in combinators:
with self.subTest(combinator=combinator):
with self.assertRaisesMessage(NotSupportedError, msg % combinator):
getattr(qs, combinator)(qs).get(num=2)
def test_operator_on_combined_qs_error(self):
qs = Number.objects.all()
msg = "Cannot use %s operator with combined queryset."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
operators = [
("|", operator.or_),
("&", operator.and_),
("^", operator.xor),
]
for combinator in combinators:
combined_qs = getattr(qs, combinator)(qs)
for operator_, operator_func in operators:
with self.subTest(combinator=combinator):
with self.assertRaisesMessage(TypeError, msg % operator_):
operator_func(qs, combined_qs)
with self.assertRaisesMessage(TypeError, msg % operator_):
operator_func(combined_qs, qs)
|
5325a62054e1bee524777e3389d32a0e934fb4f0a5610a14806d1a420bcd95b3 | from django.contrib.admin import ModelAdmin, TabularInline
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.test import RequestFactory, TestCase, override_settings
from django.urls import reverse
from .admin import InnerInline
from .admin import site as admin_site
from .models import (
Author,
BinaryTree,
Book,
BothVerboseNameProfile,
Chapter,
Child,
ChildModel1,
ChildModel2,
Fashionista,
FootNote,
Holder,
Holder2,
Holder3,
Holder4,
Inner,
Inner2,
Inner3,
Inner4Stacked,
Inner4Tabular,
Novel,
OutfitItem,
Parent,
ParentModelWithCustomPk,
Person,
Poll,
Profile,
ProfileCollection,
Question,
ShowInlineParent,
Sighting,
SomeChildModel,
SomeParentModel,
Teacher,
VerboseNamePluralProfile,
VerboseNameProfile,
)
INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>'
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", email="[email protected]", password="secret"
)
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInline(TestDataMixin, TestCase):
factory = RequestFactory()
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.holder = Holder.objects.create(dummy=13)
Inner.objects.create(dummy=42, holder=cls.holder)
cls.parent = SomeParentModel.objects.create(name="a")
SomeChildModel.objects.create(name="b", position="0", parent=cls.parent)
SomeChildModel.objects.create(name="c", position="1", parent=cls.parent)
cls.view_only_user = User.objects.create_user(
username="user",
password="pwd",
is_staff=True,
)
parent_ct = ContentType.objects.get_for_model(SomeParentModel)
child_ct = ContentType.objects.get_for_model(SomeChildModel)
permission = Permission.objects.get(
codename="view_someparentmodel",
content_type=parent_ct,
)
cls.view_only_user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="view_somechildmodel",
content_type=child_ct,
)
cls.view_only_user.user_permissions.add(permission)
def setUp(self):
self.client.force_login(self.superuser)
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
response = self.client.get(
reverse("admin:admin_inlines_holder_change", args=(self.holder.id,))
)
inner_formset = response.context["inline_admin_formsets"][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, "can_delete must be equal")
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly="")
response = self.client.get(
reverse("admin:admin_inlines_holder_change", args=(holder.id,))
)
self.assertContains(response, "<label>Inner readonly label:</label>")
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get(reverse("admin:admin_inlines_author_add"))
# The heading for the m2m inline block uses the right text
self.assertContains(response, "<h2>Author-book relationships</h2>")
# The "add another" label is correct
self.assertContains(response, "Add another Author-book relationship")
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname="Imelda")
item = OutfitItem.objects.create(name="Shoes")
# Imelda likes shoes, but can't carry her own bags.
data = {
"shoppingweakness_set-TOTAL_FORMS": 1,
"shoppingweakness_set-INITIAL_FORMS": 0,
"shoppingweakness_set-MAX_NUM_FORMS": 0,
"_save": "Save",
"person": person.id,
"max_weight": 0,
"shoppingweakness_set-0-item": item.id,
}
response = self.client.post(
reverse("admin:admin_inlines_fashionista_add"), data
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname="Imelda")), 1)
def test_tabular_inline_column_css_class(self):
"""
Field names are included in the context to output a field-specific
CSS class name in the column headers.
"""
response = self.client.get(reverse("admin:admin_inlines_poll_add"))
text_field, call_me_field = list(
response.context["inline_admin_formset"].fields()
)
# Editable field.
self.assertEqual(text_field["name"], "text")
self.assertContains(response, '<th class="column-text required">')
# Read-only field.
self.assertEqual(call_me_field["name"], "call_me")
self.assertContains(response, '<th class="column-call_me">')
def test_custom_form_tabular_inline_label(self):
"""
A model form with a form field specified (TitleForm.title1) should have
its label rendered in the tabular inline.
"""
response = self.client.get(reverse("admin:admin_inlines_titlecollection_add"))
self.assertContains(
response, '<th class="column-title1 required">Title1</th>', html=True
)
def test_custom_form_tabular_inline_extra_field_label(self):
response = self.client.get(reverse("admin:admin_inlines_outfititem_add"))
_, extra_field = list(response.context["inline_admin_formset"].fields())
self.assertEqual(extra_field["label"], "Extra field")
def test_non_editable_custom_form_tabular_inline_extra_field_label(self):
response = self.client.get(reverse("admin:admin_inlines_chapter_add"))
_, extra_field = list(response.context["inline_admin_formset"].fields())
self.assertEqual(extra_field["label"], "Extra field")
def test_custom_form_tabular_inline_overridden_label(self):
"""
SomeChildModelForm.__init__() overrides the label of a form field.
That label is displayed in the TabularInline.
"""
response = self.client.get(reverse("admin:admin_inlines_someparentmodel_add"))
field = list(response.context["inline_admin_formset"].fields())[0]
self.assertEqual(field["label"], "new label")
self.assertContains(
response, '<th class="column-name required">New label</th>', html=True
)
def test_tabular_non_field_errors(self):
"""
non_field_errors are displayed correctly, including the correct value
for colspan.
"""
data = {
"title_set-TOTAL_FORMS": 1,
"title_set-INITIAL_FORMS": 0,
"title_set-MAX_NUM_FORMS": 0,
"_save": "Save",
"title_set-0-title1": "a title",
"title_set-0-title2": "a different title",
}
response = self.client.post(
reverse("admin:admin_inlines_titlecollection_add"), data
)
# Here colspan is "4": two fields (title1 and title2), one hidden field
# and the delete checkbox.
self.assertContains(
response,
'<tr class="row-form-errors"><td colspan="4">'
'<ul class="errorlist nonfield">'
"<li>The two titles must be the same</li></ul></td></tr>",
)
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get(reverse("admin:admin_inlines_novel_add"))
# View should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="chapter_set-group"',
)
def test_callable_lookup(self):
"""
Admin inline should invoke local callable when its name is listed in
readonly_fields.
"""
response = self.client.get(reverse("admin:admin_inlines_poll_add"))
# Add parent object view should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="question_set-group"',
)
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, "<p>Callable in QuestionInline</p>")
def test_model_error_inline_with_readonly_field(self):
poll = Poll.objects.create(name="Test poll")
data = {
"question_set-TOTAL_FORMS": 1,
"question_set-INITIAL_FORMS": 0,
"question_set-MAX_NUM_FORMS": 0,
"_save": "Save",
"question_set-0-text": "Question",
"question_set-0-poll": poll.pk,
}
response = self.client.post(
reverse("admin:admin_inlines_poll_change", args=(poll.pk,)),
data,
)
self.assertContains(response, "Always invalid model.")
def test_help_text(self):
"""
The inlines' model field help texts are displayed when using both the
stacked and tabular layouts.
"""
response = self.client.get(reverse("admin:admin_inlines_holder4_add"))
self.assertContains(
response, '<div class="help">Awesome stacked help text is awesome.</div>', 4
)
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Awesome tabular help text is awesome.)" '
'title="Awesome tabular help text is awesome.">',
1,
)
# ReadOnly fields
response = self.client.get(reverse("admin:admin_inlines_capofamiglia_add"))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text for ReadOnlyInline)" '
'title="Help text for ReadOnlyInline">',
1,
)
def test_tabular_model_form_meta_readonly_field(self):
"""
Tabular inlines use ModelForm.Meta.help_texts and labels for read-only
fields.
"""
response = self.client.get(reverse("admin:admin_inlines_someparentmodel_add"))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text from ModelForm.Meta)" '
'title="Help text from ModelForm.Meta">',
)
self.assertContains(response, "Label from ModelForm.Meta")
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name="a")
SomeChildModel.objects.create(name="b", position="0", parent=parent)
SomeChildModel.objects.create(name="c", position="1", parent=parent)
response = self.client.get(
reverse("admin:admin_inlines_someparentmodel_change", args=(parent.pk,))
)
self.assertNotContains(response, '<td class="field-position">')
self.assertInHTML(
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1">',
response.rendered_content,
)
def test_tabular_inline_hidden_field_with_view_only_permissions(self):
"""
Content of hidden field is not visible in tabular inline when user has
view-only permission.
"""
self.client.force_login(self.view_only_user)
url = reverse(
"tabular_inline_hidden_field_admin:admin_inlines_someparentmodel_change",
args=(self.parent.pk,),
)
response = self.client.get(url)
self.assertInHTML(
'<th class="column-position hidden">Position</th>',
response.rendered_content,
)
self.assertInHTML(
'<td class="field-position hidden"><p>0</p></td>', response.rendered_content
)
self.assertInHTML(
'<td class="field-position hidden"><p>1</p></td>', response.rendered_content
)
def test_stacked_inline_hidden_field_with_view_only_permissions(self):
"""
Content of hidden field is not visible in stacked inline when user has
view-only permission.
"""
self.client.force_login(self.view_only_user)
url = reverse(
"stacked_inline_hidden_field_in_group_admin:"
"admin_inlines_someparentmodel_change",
args=(self.parent.pk,),
)
response = self.client.get(url)
# The whole line containing name + position fields is not hidden.
self.assertContains(
response, '<div class="form-row field-name field-position">'
)
# The div containing the position field is hidden.
self.assertInHTML(
'<div class="fieldBox field-position hidden">'
'<label class="inline">Position:</label>'
'<div class="readonly">0</div></div>',
response.rendered_content,
)
self.assertInHTML(
'<div class="fieldBox field-position hidden">'
'<label class="inline">Position:</label>'
'<div class="readonly">1</div></div>',
response.rendered_content,
)
def test_stacked_inline_single_hidden_field_in_line_with_view_only_permissions(
self,
):
"""
Content of hidden field is not visible in stacked inline when user has
view-only permission and the field is grouped on a separate line.
"""
self.client.force_login(self.view_only_user)
url = reverse(
"stacked_inline_hidden_field_on_single_line_admin:"
"admin_inlines_someparentmodel_change",
args=(self.parent.pk,),
)
response = self.client.get(url)
# The whole line containing position field is hidden.
self.assertInHTML(
'<div class="form-row hidden field-position">'
"<div><label>Position:</label>"
'<div class="readonly">0</div></div></div>',
response.rendered_content,
)
self.assertInHTML(
'<div class="form-row hidden field-position">'
"<div><label>Position:</label>"
'<div class="readonly">1</div></div></div>',
response.rendered_content,
)
def test_tabular_inline_with_hidden_field_non_field_errors_has_correct_colspan(
self,
):
"""
In tabular inlines, when a form has non-field errors, those errors
are rendered in a table line with a single cell spanning the whole
table width. Colspan must be equal to the number of visible columns.
"""
parent = SomeParentModel.objects.create(name="a")
child = SomeChildModel.objects.create(name="b", position="0", parent=parent)
url = reverse(
"tabular_inline_hidden_field_admin:admin_inlines_someparentmodel_change",
args=(parent.id,),
)
data = {
"name": parent.name,
"somechildmodel_set-TOTAL_FORMS": 1,
"somechildmodel_set-INITIAL_FORMS": 1,
"somechildmodel_set-MIN_NUM_FORMS": 0,
"somechildmodel_set-MAX_NUM_FORMS": 1000,
"_save": "Save",
"somechildmodel_set-0-id": child.id,
"somechildmodel_set-0-parent": parent.id,
"somechildmodel_set-0-name": child.name,
"somechildmodel_set-0-position": 1,
}
response = self.client.post(url, data)
# Form has 3 visible columns and 1 hidden column.
self.assertInHTML(
'<thead><tr><th class="original"></th>'
'<th class="column-name required">Name</th>'
'<th class="column-position required hidden">Position</th>'
"<th>Delete?</th></tr></thead>",
response.rendered_content,
)
# The non-field error must be spanned on 3 (visible) columns.
self.assertInHTML(
'<tr class="row-form-errors"><td colspan="3">'
'<ul class="errorlist nonfield"><li>A non-field error</li></ul></td></tr>',
response.rendered_content,
)
def test_non_related_name_inline(self):
"""
Multiple inlines with related_name='+' have correct form prefixes.
"""
response = self.client.get(reverse("admin:admin_inlines_capofamiglia_add"))
self.assertContains(
response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id">', html=True
)
self.assertContains(
response,
'<input type="hidden" name="-1-0-capo_famiglia" '
'id="id_-1-0-capo_famiglia">',
html=True,
)
self.assertContains(
response,
'<input id="id_-1-0-name" type="text" class="vTextField" name="-1-0-name" '
'maxlength="100">',
html=True,
)
self.assertContains(
response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id">', html=True
)
self.assertContains(
response,
'<input type="hidden" name="-2-0-capo_famiglia" '
'id="id_-2-0-capo_famiglia">',
html=True,
)
self.assertContains(
response,
'<input id="id_-2-0-name" type="text" class="vTextField" name="-2-0-name" '
'maxlength="100">',
html=True,
)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
The "View on Site" link is correct for locales that use thousand
separators.
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly="")
response = self.client.get(
reverse("admin:admin_inlines_holder_change", args=(holder.id,))
)
inner_shortcut = "r/%s/%s/" % (
ContentType.objects.get_for_model(inner).pk,
inner.pk,
)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
The "View on Site" link is correct for models with a custom primary key
field.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(
reverse("admin:admin_inlines_parentmodelwithcustompk_change", args=("foo",))
)
child1_shortcut = "r/%s/%s/" % (
ContentType.objects.get_for_model(child1).pk,
child1.pk,
)
child2_shortcut = "r/%s/%s/" % (
ContentType.objects.get_for_model(child2).pk,
child2.pk,
)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
An object can be created with inlines when it inherits another class.
"""
data = {
"name": "Martian",
"sighting_set-TOTAL_FORMS": 1,
"sighting_set-INITIAL_FORMS": 0,
"sighting_set-MAX_NUM_FORMS": 0,
"sighting_set-0-place": "Zone 51",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_inlines_extraterrestrial_add"), data
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name="Martian").count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = (
'<input id="id_binarytree_set-MAX_NUM_FORMS" '
'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d">'
)
# The total number of forms will remain the same in either case
total_forms_hidden = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2">'
)
response = self.client.get(reverse("admin:admin_inlines_binarytree_add"))
self.assertInHTML(max_forms_input % 3, response.rendered_content)
self.assertInHTML(total_forms_hidden, response.rendered_content)
response = self.client.get(
reverse("admin:admin_inlines_binarytree_change", args=(bt_head.id,))
)
self.assertInHTML(max_forms_input % 2, response.rendered_content)
self.assertInHTML(total_forms_hidden, response.rendered_content)
def test_min_num(self):
"""
min_num and extra determine number of forms.
"""
class MinNumInline(TabularInline):
model = BinaryTree
min_num = 2
extra = 3
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2">'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5">'
)
request = self.factory.get(reverse("admin:admin_inlines_binarytree_add"))
request.user = User(username="super", is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertInHTML(min_forms, response.rendered_content)
self.assertInHTML(total_forms, response.rendered_content)
def test_custom_min_num(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
class MinNumInline(TabularInline):
model = BinaryTree
extra = 3
def get_min_num(self, request, obj=None, **kwargs):
if obj:
return 5
return 2
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d">'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d">'
)
request = self.factory.get(reverse("admin:admin_inlines_binarytree_add"))
request.user = User(username="super", is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertInHTML(min_forms % 2, response.rendered_content)
self.assertInHTML(total_forms % 5, response.rendered_content)
request = self.factory.get(
reverse("admin:admin_inlines_binarytree_change", args=(bt_head.id,))
)
request.user = User(username="super", is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(bt_head.id))
self.assertInHTML(min_forms % 5, response.rendered_content)
self.assertInHTML(total_forms % 8, response.rendered_content)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get(reverse("admin:admin_inlines_author_add"))
self.assertContains(
response,
'<input id="id_nonautopkbook_set-0-rand_pk" '
'name="nonautopkbook_set-0-rand_pk" type="hidden">',
html=True,
)
self.assertContains(
response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" '
'name="nonautopkbook_set-2-0-rand_pk" type="hidden">',
html=True,
)
def test_inline_nonauto_noneditable_inherited_pk(self):
response = self.client.get(reverse("admin:admin_inlines_author_add"))
self.assertContains(
response,
'<input id="id_nonautopkbookchild_set-0-nonautopkbook_ptr" '
'name="nonautopkbookchild_set-0-nonautopkbook_ptr" type="hidden">',
html=True,
)
self.assertContains(
response,
'<input id="id_nonautopkbookchild_set-2-nonautopkbook_ptr" '
'name="nonautopkbookchild_set-2-nonautopkbook_ptr" type="hidden">',
html=True,
)
def test_inline_editable_pk(self):
response = self.client.get(reverse("admin:admin_inlines_author_add"))
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" '
'name="editablepkbook_set-0-manual_pk" type="number">',
html=True,
count=1,
)
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" '
'name="editablepkbook_set-2-0-manual_pk" type="number">',
html=True,
count=1,
)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get(
reverse("admin:admin_inlines_holder_change", args=(holder.pk,))
)
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1,
)
self.assertContains(
response, '<div class="inline-related" id="inner_set-1">', count=1
)
def test_inlines_show_change_link_registered(self):
"Inlines `show_change_link` for registered models when enabled."
holder = Holder4.objects.create(dummy=1)
item1 = Inner4Stacked.objects.create(dummy=1, holder=holder)
item2 = Inner4Tabular.objects.create(dummy=1, holder=holder)
items = (
("inner4stacked", item1.pk),
("inner4tabular", item2.pk),
)
response = self.client.get(
reverse("admin:admin_inlines_holder4_change", args=(holder.pk,))
)
self.assertTrue(
response.context["inline_admin_formset"].opts.has_registered_model
)
for model, pk in items:
url = reverse("admin:admin_inlines_%s_change" % model, args=(pk,))
self.assertContains(
response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML)
)
def test_inlines_show_change_link_unregistered(self):
"Inlines `show_change_link` disabled for unregistered models."
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(
reverse("admin:admin_inlines_parentmodelwithcustompk_change", args=("foo",))
)
self.assertFalse(
response.context["inline_admin_formset"].opts.has_registered_model
)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_tabular_inline_show_change_link_false_registered(self):
"Inlines `show_change_link` disabled by default."
poll = Poll.objects.create(name="New poll")
Question.objects.create(poll=poll)
response = self.client.get(
reverse("admin:admin_inlines_poll_change", args=(poll.pk,))
)
self.assertTrue(
response.context["inline_admin_formset"].opts.has_registered_model
)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_noneditable_inline_has_field_inputs(self):
"""Inlines without change permission shows field inputs on add form."""
response = self.client.get(
reverse("admin:admin_inlines_novelreadonlychapter_add")
)
self.assertContains(
response,
'<input type="text" name="chapter_set-0-name" '
'class="vTextField" maxlength="40" id="id_chapter_set-0-name">',
html=True,
)
def test_inlines_plural_heading_foreign_key(self):
response = self.client.get(reverse("admin:admin_inlines_holder4_add"))
self.assertContains(response, "<h2>Inner4 stackeds</h2>", html=True)
self.assertContains(response, "<h2>Inner4 tabulars</h2>", html=True)
def test_inlines_singular_heading_one_to_one(self):
response = self.client.get(reverse("admin:admin_inlines_person_add"))
self.assertContains(response, "<h2>Author</h2>", html=True) # Tabular.
self.assertContains(response, "<h2>Fashionista</h2>", html=True) # Stacked.
def test_inlines_based_on_model_state(self):
parent = ShowInlineParent.objects.create(show_inlines=False)
data = {
"show_inlines": "on",
"_save": "Save",
}
change_url = reverse(
"admin:admin_inlines_showinlineparent_change",
args=(parent.id,),
)
response = self.client.post(change_url, data)
self.assertEqual(response.status_code, 302)
parent.refresh_from_db()
self.assertIs(parent.show_inlines, True)
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlineMedia(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = reverse("admin:admin_inlines_holder_change", args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, "my_awesome_admin_scripts.js")
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = reverse("admin:admin_inlines_holder3_change", args=(holder.id,))
response = self.client.get(change_url)
self.assertEqual(
response.context["inline_admin_formsets"][0].media._js,
[
"admin/js/vendor/jquery/jquery.min.js",
"my_awesome_inline_scripts.js",
"custom_number.js",
"admin/js/jquery.init.js",
"admin/js/inlines.js",
],
)
self.assertContains(response, "my_awesome_inline_scripts.js")
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = reverse("admin:admin_inlines_holder2_change", args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, "my_awesome_admin_scripts.js")
self.assertContains(response, "my_awesome_inline_scripts.js")
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlineAdminForm(TestCase):
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name="Sally")
john = Parent.objects.create(name="John")
joe = Child.objects.create(name="Joe", teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlineProtectedOnDelete(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name="Lord of the rings")
chapter = Chapter.objects.create(novel=lotr, name="Many Meetings")
foot_note = FootNote.objects.create(chapter=chapter, note="yadda yadda")
change_url = reverse("admin:admin_inlines_novel_change", args=(lotr.id,))
response = self.client.get(change_url)
data = {
"name": lotr.name,
"chapter_set-TOTAL_FORMS": 1,
"chapter_set-INITIAL_FORMS": 1,
"chapter_set-MAX_NUM_FORMS": 1000,
"_save": "Save",
"chapter_set-0-id": chapter.id,
"chapter_set-0-name": chapter.name,
"chapter_set-0-novel": lotr.id,
"chapter_set-0-DELETE": "on",
}
response = self.client.post(change_url, data)
self.assertContains(
response,
"Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note),
)
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
@classmethod
def setUpTestData(cls):
cls.user = User(username="admin", is_staff=True, is_active=True)
cls.user.set_password("secret")
cls.user.save()
cls.author_ct = ContentType.objects.get_for_model(Author)
cls.holder_ct = ContentType.objects.get_for_model(Holder2)
cls.book_ct = ContentType.objects.get_for_model(Book)
cls.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(
codename="add_author", content_type=cls.author_ct
)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="change_author", content_type=cls.author_ct
)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="add_holder2", content_type=cls.holder_ct
)
cls.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="change_holder2", content_type=cls.holder_ct
)
cls.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name="The Author")
cls.book = author.books.create(name="The inline Book")
cls.author_change_url = reverse(
"admin:admin_inlines_author_change", args=(author.id,)
)
# Get the ID of the automatically created intermediate model for the
# Author-Book m2m.
author_book_auto_m2m_intermediate = Author.books.through.objects.get(
author=author, book=cls.book
)
cls.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
cls.holder = Holder2.objects.create(dummy=13)
cls.inner2 = Inner2.objects.create(dummy=42, holder=cls.holder)
def setUp(self):
self.holder_change_url = reverse(
"admin:admin_inlines_holder2_change", args=(self.holder.id,)
)
self.client.force_login(self.user)
def test_inline_add_m2m_noperm(self):
response = self.client.get(reverse("admin:admin_inlines_author_add"))
# No change permission on books, so no inline
self.assertNotContains(response, "<h2>Author-book relationships</h2>")
self.assertNotContains(response, "Add another Author-Book Relationship")
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get(reverse("admin:admin_inlines_holder2_add"))
# No permissions on Inner2s, so no inline
self.assertNotContains(response, "<h2>Inner2s</h2>")
self.assertNotContains(response, "Add another Inner2")
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, "<h2>Author-book relationships</h2>")
self.assertNotContains(response, "Add another Author-Book Relationship")
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, "<h2>Inner2s</h2>")
self.assertNotContains(response, "Add another Inner2")
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_view_only_perm(self):
permission = Permission.objects.get(
codename="view_book", content_type=self.book_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(reverse("admin:admin_inlines_author_add"))
# View-only inlines. (It could be nicer to hide the empty, non-editable
# inlines on the add page.)
self.assertIs(
response.context["inline_admin_formset"].has_view_permission, True
)
self.assertIs(
response.context["inline_admin_formset"].has_add_permission, False
)
self.assertIs(
response.context["inline_admin_formset"].has_change_permission, False
)
self.assertIs(
response.context["inline_admin_formset"].has_delete_permission, False
)
self.assertContains(response, "<h2>Author-book relationships</h2>")
self.assertContains(
response,
'<input type="hidden" name="Author_books-TOTAL_FORMS" value="0" '
'id="id_Author_books-TOTAL_FORMS">',
html=True,
)
self.assertNotContains(response, "Add another Author-Book Relationship")
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(
codename="add_book", content_type=self.book_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(reverse("admin:admin_inlines_author_add"))
# No change permission on Books, so no inline
self.assertNotContains(response, "<h2>Author-book relationships</h2>")
self.assertNotContains(response, "Add another Author-Book Relationship")
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(
codename="add_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(reverse("admin:admin_inlines_holder2_add"))
# Add permission on inner2s, so we get the inline
self.assertContains(response, "<h2>Inner2s</h2>")
self.assertContains(response, "Add another Inner2")
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS">',
html=True,
)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(
codename="add_book", content_type=self.book_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, "<h2>Author-book relationships</h2>")
self.assertNotContains(response, "Add another Author-Book Relationship")
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_view_only_perm(self):
permission = Permission.objects.get(
codename="view_book", content_type=self.book_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# View-only inlines.
self.assertIs(
response.context["inline_admin_formset"].has_view_permission, True
)
self.assertIs(
response.context["inline_admin_formset"].has_add_permission, False
)
self.assertIs(
response.context["inline_admin_formset"].has_change_permission, False
)
self.assertIs(
response.context["inline_admin_formset"].has_delete_permission, False
)
self.assertContains(response, "<h2>Author-book relationships</h2>")
self.assertContains(
response,
'<input type="hidden" name="Author_books-TOTAL_FORMS" value="1" '
'id="id_Author_books-TOTAL_FORMS">',
html=True,
)
# The field in the inline is read-only.
self.assertContains(response, "<p>%s</p>" % self.book)
self.assertNotContains(
response,
'<input type="checkbox" name="Author_books-0-DELETE" '
'id="id_Author_books-0-DELETE">',
html=True,
)
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(
codename="change_book", content_type=self.book_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertIs(
response.context["inline_admin_formset"].has_view_permission, True
)
self.assertIs(response.context["inline_admin_formset"].has_add_permission, True)
self.assertIs(
response.context["inline_admin_formset"].has_change_permission, True
)
self.assertIs(
response.context["inline_admin_formset"].has_delete_permission, True
)
self.assertContains(response, "<h2>Author-book relationships</h2>")
self.assertContains(response, "Add another Author-book relationship")
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS">',
html=True,
)
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-0-id" value="%i" '
'name="Author_books-0-id">' % self.author_book_auto_m2m_intermediate_id,
html=True,
)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(
codename="add_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, "<h2>Inner2s</h2>")
self.assertContains(response, "Add another Inner2")
# 3 extra forms only, not the existing instance form
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" '
'name="inner2_set-TOTAL_FORMS">',
html=True,
)
self.assertNotContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" '
'name="inner2_set-0-id">' % self.inner2.id,
html=True,
)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(
codename="change_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, "<h2>Inner2s</h2>", count=2)
# Just the one form for existing instances
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" '
'name="inner2_set-TOTAL_FORMS">',
html=True,
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" '
'name="inner2_set-0-id">' % self.inner2.id,
html=True,
)
# max-num 0 means we can't add new ones
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" value="0" '
'name="inner2_set-MAX_NUM_FORMS">',
html=True,
)
# TabularInline
self.assertContains(
response, '<th class="column-dummy required">Dummy</th>', html=True
)
self.assertContains(
response,
'<input type="number" name="inner2_set-2-0-dummy" value="%s" '
'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy,
html=True,
)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(
codename="add_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="change_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, "<h2>Inner2s</h2>")
# One form for existing instance and three extra for new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" '
'name="inner2_set-TOTAL_FORMS">',
html=True,
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" '
'name="inner2_set-0-id">' % self.inner2.id,
html=True,
)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(
codename="change_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="delete_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, "<h2>Inner2s</h2>")
# One form for existing instance only, no new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" '
'name="inner2_set-TOTAL_FORMS">',
html=True,
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" '
'name="inner2_set-0-id">' % self.inner2.id,
html=True,
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(
codename="add_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="change_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(
codename="delete_inner2", content_type=self.inner_ct
)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, "<h2>Inner2s</h2>", count=2)
# One form for existing instance only, three for new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" '
'name="inner2_set-TOTAL_FORMS">',
html=True,
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" '
'name="inner2_set-0-id">' % self.inner2.id,
html=True,
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
# TabularInline
self.assertContains(
response, '<th class="column-dummy required">Dummy</th>', html=True
)
self.assertContains(
response,
'<input type="number" name="inner2_set-2-0-dummy" value="%s" '
'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy,
html=True,
)
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestReadOnlyChangeViewInlinePermissions(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(
"testing", password="password", is_staff=True
)
cls.user.user_permissions.add(
Permission.objects.get(
codename="view_poll",
content_type=ContentType.objects.get_for_model(Poll),
)
)
cls.user.user_permissions.add(
*Permission.objects.filter(
codename__endswith="question",
content_type=ContentType.objects.get_for_model(Question),
).values_list("pk", flat=True)
)
cls.poll = Poll.objects.create(name="Survey")
cls.add_url = reverse("admin:admin_inlines_poll_add")
cls.change_url = reverse("admin:admin_inlines_poll_change", args=(cls.poll.id,))
def setUp(self):
self.client.force_login(self.user)
def test_add_url_not_allowed(self):
response = self.client.get(self.add_url)
self.assertEqual(response.status_code, 403)
response = self.client.post(self.add_url, {})
self.assertEqual(response.status_code, 403)
def test_post_to_change_url_not_allowed(self):
response = self.client.post(self.change_url, {})
self.assertEqual(response.status_code, 403)
def test_get_to_change_url_is_allowed(self):
response = self.client.get(self.change_url)
self.assertEqual(response.status_code, 200)
def test_main_model_is_rendered_as_read_only(self):
response = self.client.get(self.change_url)
self.assertContains(
response, '<div class="readonly">%s</div>' % self.poll.name, html=True
)
input = (
'<input type="text" name="name" value="%s" class="vTextField" '
'maxlength="40" required id="id_name">'
)
self.assertNotContains(response, input % self.poll.name, html=True)
def test_inlines_are_rendered_as_read_only(self):
question = Question.objects.create(
text="How will this be rendered?", poll=self.poll
)
response = self.client.get(self.change_url)
self.assertContains(
response, '<td class="field-text"><p>%s</p></td>' % question.text, html=True
)
self.assertNotContains(response, 'id="id_question_set-0-text"')
self.assertNotContains(response, 'id="id_related_objs-0-DELETE"')
def test_submit_line_shows_only_close_button(self):
response = self.client.get(self.change_url)
self.assertContains(
response,
'<a href="/admin/admin_inlines/poll/" class="closelink">Close</a>',
html=True,
)
delete_link = '<p class="deletelink-box"><a href="/admin/admin_inlines/poll/%s/delete/" class="deletelink">Delete</a></p>' # noqa
self.assertNotContains(response, delete_link % self.poll.id, html=True)
self.assertNotContains(
response,
'<input type="submit" value="Save and add another" name="_addanother">',
)
self.assertNotContains(
response,
'<input type="submit" value="Save and continue editing" name="_continue">',
)
def test_inline_delete_buttons_are_not_shown(self):
Question.objects.create(text="How will this be rendered?", poll=self.poll)
response = self.client.get(self.change_url)
self.assertNotContains(
response,
'<input type="checkbox" name="question_set-0-DELETE" '
'id="id_question_set-0-DELETE">',
html=True,
)
def test_extra_inlines_are_not_shown(self):
response = self.client.get(self.change_url)
self.assertNotContains(response, 'id="id_question_set-0-text"')
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestVerboseNameInlineForms(TestDataMixin, TestCase):
factory = RequestFactory()
def test_verbose_name_inline(self):
class NonVerboseProfileInline(TabularInline):
model = Profile
verbose_name = "Non-verbose childs"
class VerboseNameProfileInline(TabularInline):
model = VerboseNameProfile
verbose_name = "Childs with verbose name"
class VerboseNamePluralProfileInline(TabularInline):
model = VerboseNamePluralProfile
verbose_name = "Childs with verbose name plural"
class BothVerboseNameProfileInline(TabularInline):
model = BothVerboseNameProfile
verbose_name = "Childs with both verbose names"
modeladmin = ModelAdmin(ProfileCollection, admin_site)
modeladmin.inlines = [
NonVerboseProfileInline,
VerboseNameProfileInline,
VerboseNamePluralProfileInline,
BothVerboseNameProfileInline,
]
obj = ProfileCollection.objects.create()
url = reverse("admin:admin_inlines_profilecollection_change", args=(obj.pk,))
request = self.factory.get(url)
request.user = self.superuser
response = modeladmin.changeform_view(request)
self.assertNotContains(response, "Add another Profile")
# Non-verbose model.
self.assertContains(response, "<h2>Non-verbose childss</h2>")
self.assertContains(response, "Add another Non-verbose child")
self.assertNotContains(response, "<h2>Profiles</h2>")
# Model with verbose name.
self.assertContains(response, "<h2>Childs with verbose names</h2>")
self.assertContains(response, "Add another Childs with verbose name")
self.assertNotContains(response, "<h2>Model with verbose name onlys</h2>")
self.assertNotContains(response, "Add another Model with verbose name only")
# Model with verbose name plural.
self.assertContains(response, "<h2>Childs with verbose name plurals</h2>")
self.assertContains(response, "Add another Childs with verbose name plural")
self.assertNotContains(response, "<h2>Model with verbose name plural only</h2>")
# Model with both verbose names.
self.assertContains(response, "<h2>Childs with both verbose namess</h2>")
self.assertContains(response, "Add another Childs with both verbose names")
self.assertNotContains(response, "<h2>Model with both - plural name</h2>")
self.assertNotContains(response, "Add another Model with both - name")
def test_verbose_name_plural_inline(self):
class NonVerboseProfileInline(TabularInline):
model = Profile
verbose_name_plural = "Non-verbose childs"
class VerboseNameProfileInline(TabularInline):
model = VerboseNameProfile
verbose_name_plural = "Childs with verbose name"
class VerboseNamePluralProfileInline(TabularInline):
model = VerboseNamePluralProfile
verbose_name_plural = "Childs with verbose name plural"
class BothVerboseNameProfileInline(TabularInline):
model = BothVerboseNameProfile
verbose_name_plural = "Childs with both verbose names"
modeladmin = ModelAdmin(ProfileCollection, admin_site)
modeladmin.inlines = [
NonVerboseProfileInline,
VerboseNameProfileInline,
VerboseNamePluralProfileInline,
BothVerboseNameProfileInline,
]
obj = ProfileCollection.objects.create()
url = reverse("admin:admin_inlines_profilecollection_change", args=(obj.pk,))
request = self.factory.get(url)
request.user = self.superuser
response = modeladmin.changeform_view(request)
# Non-verbose model.
self.assertContains(response, "<h2>Non-verbose childs</h2>")
self.assertContains(response, "Add another Profile")
self.assertNotContains(response, "<h2>Profiles</h2>")
# Model with verbose name.
self.assertContains(response, "<h2>Childs with verbose name</h2>")
self.assertContains(response, "Add another Model with verbose name only")
self.assertNotContains(response, "<h2>Model with verbose name onlys</h2>")
# Model with verbose name plural.
self.assertContains(response, "<h2>Childs with verbose name plural</h2>")
self.assertContains(response, "Add another Profile")
self.assertNotContains(response, "<h2>Model with verbose name plural only</h2>")
# Model with both verbose names.
self.assertContains(response, "<h2>Childs with both verbose names</h2>")
self.assertContains(response, "Add another Model with both - name")
self.assertNotContains(response, "<h2>Model with both - plural name</h2>")
def test_both_verbose_names_inline(self):
class NonVerboseProfileInline(TabularInline):
model = Profile
verbose_name = "Non-verbose childs - name"
verbose_name_plural = "Non-verbose childs - plural name"
class VerboseNameProfileInline(TabularInline):
model = VerboseNameProfile
verbose_name = "Childs with verbose name - name"
verbose_name_plural = "Childs with verbose name - plural name"
class VerboseNamePluralProfileInline(TabularInline):
model = VerboseNamePluralProfile
verbose_name = "Childs with verbose name plural - name"
verbose_name_plural = "Childs with verbose name plural - plural name"
class BothVerboseNameProfileInline(TabularInline):
model = BothVerboseNameProfile
verbose_name = "Childs with both - name"
verbose_name_plural = "Childs with both - plural name"
modeladmin = ModelAdmin(ProfileCollection, admin_site)
modeladmin.inlines = [
NonVerboseProfileInline,
VerboseNameProfileInline,
VerboseNamePluralProfileInline,
BothVerboseNameProfileInline,
]
obj = ProfileCollection.objects.create()
url = reverse("admin:admin_inlines_profilecollection_change", args=(obj.pk,))
request = self.factory.get(url)
request.user = self.superuser
response = modeladmin.changeform_view(request)
self.assertNotContains(response, "Add another Profile")
# Non-verbose model.
self.assertContains(response, "<h2>Non-verbose childs - plural name</h2>")
self.assertContains(response, "Add another Non-verbose childs - name")
self.assertNotContains(response, "<h2>Profiles</h2>")
# Model with verbose name.
self.assertContains(response, "<h2>Childs with verbose name - plural name</h2>")
self.assertContains(response, "Add another Childs with verbose name - name")
self.assertNotContains(response, "<h2>Model with verbose name onlys</h2>")
# Model with verbose name plural.
self.assertContains(
response,
"<h2>Childs with verbose name plural - plural name</h2>",
)
self.assertContains(
response,
"Add another Childs with verbose name plural - name",
)
self.assertNotContains(response, "<h2>Model with verbose name plural only</h2>")
# Model with both verbose names.
self.assertContains(response, "<h2>Childs with both - plural name</h2>")
self.assertContains(response, "Add another Childs with both - name")
self.assertNotContains(response, "<h2>Model with both - plural name</h2>")
self.assertNotContains(response, "Add another Model with both - name")
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ["admin_inlines"] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
def test_add_stackeds(self):
"""
The "Add another XXX" link correctly adds items to the stacked formset.
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_inlines_holder4_add")
)
inline_id = "#inner4stacked_set-group"
rows_selector = "%s .dynamic-inner4stacked_set" % inline_id
self.assertCountSeleniumElements(rows_selector, 3)
add_button = self.selenium.find_element(
By.LINK_TEXT, "Add another Inner4 stacked"
)
add_button.click()
self.assertCountSeleniumElements(rows_selector, 4)
def test_delete_stackeds(self):
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_inlines_holder4_add")
)
inline_id = "#inner4stacked_set-group"
rows_selector = "%s .dynamic-inner4stacked_set" % inline_id
self.assertCountSeleniumElements(rows_selector, 3)
add_button = self.selenium.find_element(
By.LINK_TEXT, "Add another Inner4 stacked"
)
add_button.click()
add_button.click()
self.assertCountSeleniumElements(rows_selector, 5)
for delete_link in self.selenium.find_elements(
By.CSS_SELECTOR, "%s .inline-deletelink" % inline_id
):
delete_link.click()
with self.disable_implicit_wait():
self.assertCountSeleniumElements(rows_selector, 0)
def test_delete_invalid_stacked_inlines(self):
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_inlines_holder4_add")
)
inline_id = "#inner4stacked_set-group"
rows_selector = "%s .dynamic-inner4stacked_set" % inline_id
self.assertCountSeleniumElements(rows_selector, 3)
add_button = self.selenium.find_element(
By.LINK_TEXT,
"Add another Inner4 stacked",
)
add_button.click()
add_button.click()
self.assertCountSeleniumElements("#id_inner4stacked_set-4-dummy", 1)
# Enter some data and click 'Save'.
self.selenium.find_element(By.NAME, "dummy").send_keys("1")
self.selenium.find_element(By.NAME, "inner4stacked_set-0-dummy").send_keys(
"100"
)
self.selenium.find_element(By.NAME, "inner4stacked_set-1-dummy").send_keys(
"101"
)
self.selenium.find_element(By.NAME, "inner4stacked_set-2-dummy").send_keys(
"222"
)
self.selenium.find_element(By.NAME, "inner4stacked_set-3-dummy").send_keys(
"103"
)
self.selenium.find_element(By.NAME, "inner4stacked_set-4-dummy").send_keys(
"222"
)
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
# Sanity check.
self.assertCountSeleniumElements(rows_selector, 5)
errorlist = self.selenium.find_element(
By.CSS_SELECTOR,
"%s .dynamic-inner4stacked_set .errorlist li" % inline_id,
)
self.assertEqual("Please correct the duplicate values below.", errorlist.text)
delete_link = self.selenium.find_element(
By.CSS_SELECTOR, "#inner4stacked_set-4 .inline-deletelink"
)
delete_link.click()
self.assertCountSeleniumElements(rows_selector, 4)
with self.disable_implicit_wait(), self.assertRaises(NoSuchElementException):
self.selenium.find_element(
By.CSS_SELECTOR,
"%s .dynamic-inner4stacked_set .errorlist li" % inline_id,
)
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
# The objects have been created in the database.
self.assertEqual(Inner4Stacked.objects.count(), 4)
def test_delete_invalid_tabular_inlines(self):
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_inlines_holder4_add")
)
inline_id = "#inner4tabular_set-group"
rows_selector = "%s .dynamic-inner4tabular_set" % inline_id
self.assertCountSeleniumElements(rows_selector, 3)
add_button = self.selenium.find_element(
By.LINK_TEXT, "Add another Inner4 tabular"
)
add_button.click()
add_button.click()
self.assertCountSeleniumElements("#id_inner4tabular_set-4-dummy", 1)
# Enter some data and click 'Save'.
self.selenium.find_element(By.NAME, "dummy").send_keys("1")
self.selenium.find_element(By.NAME, "inner4tabular_set-0-dummy").send_keys(
"100"
)
self.selenium.find_element(By.NAME, "inner4tabular_set-1-dummy").send_keys(
"101"
)
self.selenium.find_element(By.NAME, "inner4tabular_set-2-dummy").send_keys(
"222"
)
self.selenium.find_element(By.NAME, "inner4tabular_set-3-dummy").send_keys(
"103"
)
self.selenium.find_element(By.NAME, "inner4tabular_set-4-dummy").send_keys(
"222"
)
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
# Sanity Check.
self.assertCountSeleniumElements(rows_selector, 5)
# Non-field errorlist is in its own <tr> just before
# tr#inner4tabular_set-3:
errorlist = self.selenium.find_element(
By.CSS_SELECTOR,
"%s #inner4tabular_set-3 + .row-form-errors .errorlist li" % inline_id,
)
self.assertEqual("Please correct the duplicate values below.", errorlist.text)
delete_link = self.selenium.find_element(
By.CSS_SELECTOR, "#inner4tabular_set-4 .inline-deletelink"
)
delete_link.click()
self.assertCountSeleniumElements(rows_selector, 4)
with self.disable_implicit_wait(), self.assertRaises(NoSuchElementException):
self.selenium.find_element(
By.CSS_SELECTOR,
"%s .dynamic-inner4tabular_set .errorlist li" % inline_id,
)
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
# The objects have been created in the database.
self.assertEqual(Inner4Tabular.objects.count(), 4)
def test_add_inlines(self):
"""
The "Add another XXX" link correctly adds items to the inline form.
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_inlines_profilecollection_add")
)
# There's only one inline to start with and it has the correct ID.
self.assertCountSeleniumElements(".dynamic-profile_set", 1)
self.assertEqual(
self.selenium.find_elements(By.CSS_SELECTOR, ".dynamic-profile_set")[
0
].get_attribute("id"),
"profile_set-0",
)
self.assertCountSeleniumElements(
".dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]", 1
)
self.assertCountSeleniumElements(
".dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]", 1
)
# Add an inline
self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click()
# The inline has been added, it has the right id, and it contains the
# correct fields.
self.assertCountSeleniumElements(".dynamic-profile_set", 2)
self.assertEqual(
self.selenium.find_elements(By.CSS_SELECTOR, ".dynamic-profile_set")[
1
].get_attribute("id"),
"profile_set-1",
)
self.assertCountSeleniumElements(
".dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]", 1
)
self.assertCountSeleniumElements(
".dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]", 1
)
# Let's add another one to be sure
self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click()
self.assertCountSeleniumElements(".dynamic-profile_set", 3)
self.assertEqual(
self.selenium.find_elements(By.CSS_SELECTOR, ".dynamic-profile_set")[
2
].get_attribute("id"),
"profile_set-2",
)
self.assertCountSeleniumElements(
".dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]", 1
)
self.assertCountSeleniumElements(
".dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]", 1
)
# Enter some data and click 'Save'
self.selenium.find_element(By.NAME, "profile_set-0-first_name").send_keys(
"0 first name 1"
)
self.selenium.find_element(By.NAME, "profile_set-0-last_name").send_keys(
"0 last name 2"
)
self.selenium.find_element(By.NAME, "profile_set-1-first_name").send_keys(
"1 first name 1"
)
self.selenium.find_element(By.NAME, "profile_set-1-last_name").send_keys(
"1 last name 2"
)
self.selenium.find_element(By.NAME, "profile_set-2-first_name").send_keys(
"2 first name 1"
)
self.selenium.find_element(By.NAME, "profile_set-2-last_name").send_keys(
"2 last name 2"
)
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
# The objects have been created in the database
self.assertEqual(ProfileCollection.objects.count(), 1)
self.assertEqual(Profile.objects.count(), 3)
def test_add_inline_link_absent_for_view_only_parent_model(self):
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
user = User.objects.create_user("testing", password="password", is_staff=True)
user.user_permissions.add(
Permission.objects.get(
codename="view_poll",
content_type=ContentType.objects.get_for_model(Poll),
)
)
user.user_permissions.add(
*Permission.objects.filter(
codename__endswith="question",
content_type=ContentType.objects.get_for_model(Question),
).values_list("pk", flat=True)
)
self.admin_login(username="testing", password="password")
poll = Poll.objects.create(name="Survey")
change_url = reverse("admin:admin_inlines_poll_change", args=(poll.id,))
self.selenium.get(self.live_server_url + change_url)
with self.disable_implicit_wait():
with self.assertRaises(NoSuchElementException):
self.selenium.find_element(By.LINK_TEXT, "Add another Question")
def test_delete_inlines(self):
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_inlines_profilecollection_add")
)
# Add a few inlines
self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click()
self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click()
self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click()
self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click()
self.assertCountSeleniumElements(
"#profile_set-group table tr.dynamic-profile_set", 5
)
self.assertCountSeleniumElements(
"form#profilecollection_form tr.dynamic-profile_set#profile_set-0", 1
)
self.assertCountSeleniumElements(
"form#profilecollection_form tr.dynamic-profile_set#profile_set-1", 1
)
self.assertCountSeleniumElements(
"form#profilecollection_form tr.dynamic-profile_set#profile_set-2", 1
)
self.assertCountSeleniumElements(
"form#profilecollection_form tr.dynamic-profile_set#profile_set-3", 1
)
self.assertCountSeleniumElements(
"form#profilecollection_form tr.dynamic-profile_set#profile_set-4", 1
)
# Click on a few delete buttons
self.selenium.find_element(
By.CSS_SELECTOR,
"form#profilecollection_form tr.dynamic-profile_set#profile_set-1 "
"td.delete a",
).click()
self.selenium.find_element(
By.CSS_SELECTOR,
"form#profilecollection_form tr.dynamic-profile_set#profile_set-2 "
"td.delete a",
).click()
# The rows are gone and the IDs have been re-sequenced
self.assertCountSeleniumElements(
"#profile_set-group table tr.dynamic-profile_set", 3
)
self.assertCountSeleniumElements(
"form#profilecollection_form tr.dynamic-profile_set#profile_set-0", 1
)
self.assertCountSeleniumElements(
"form#profilecollection_form tr.dynamic-profile_set#profile_set-1", 1
)
self.assertCountSeleniumElements(
"form#profilecollection_form tr.dynamic-profile_set#profile_set-2", 1
)
def test_collapsed_inlines(self):
from selenium.webdriver.common.by import By
# Collapsed inlines have SHOW/HIDE links.
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_inlines_author_add")
)
# One field is in a stacked inline, other in a tabular one.
test_fields = [
"#id_nonautopkbook_set-0-title",
"#id_nonautopkbook_set-2-0-title",
]
show_links = self.selenium.find_elements(By.LINK_TEXT, "SHOW")
self.assertEqual(len(show_links), 3)
for show_index, field_name in enumerate(test_fields, 0):
self.wait_until_invisible(field_name)
show_links[show_index].click()
self.wait_until_visible(field_name)
hide_links = self.selenium.find_elements(By.LINK_TEXT, "HIDE")
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields, 0):
self.wait_until_visible(field_name)
hide_links[hide_index].click()
self.wait_until_invisible(field_name)
def test_added_stacked_inline_with_collapsed_fields(self):
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_inlines_teacher_add")
)
self.selenium.find_element(By.LINK_TEXT, "Add another Child").click()
test_fields = ["#id_child_set-0-name", "#id_child_set-1-name"]
show_links = self.selenium.find_elements(By.LINK_TEXT, "SHOW")
self.assertEqual(len(show_links), 2)
for show_index, field_name in enumerate(test_fields, 0):
self.wait_until_invisible(field_name)
show_links[show_index].click()
self.wait_until_visible(field_name)
hide_links = self.selenium.find_elements(By.LINK_TEXT, "HIDE")
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields, 0):
self.wait_until_visible(field_name)
hide_links[hide_index].click()
self.wait_until_invisible(field_name)
def assertBorder(self, element, border):
width, style, color = border.split(" ")
border_properties = [
"border-bottom-%s",
"border-left-%s",
"border-right-%s",
"border-top-%s",
]
for prop in border_properties:
prop = prop % "width"
self.assertEqual(element.value_of_css_property(prop), width)
for prop in border_properties:
prop = prop % "style"
self.assertEqual(element.value_of_css_property(prop), style)
# Convert hex color to rgb.
self.assertRegex(color, "#[0-9a-f]{6}")
r, g, b = int(color[1:3], 16), int(color[3:5], 16), int(color[5:], 16)
# The value may be expressed as either rgb() or rgba() depending on the
# browser.
colors = [
"rgb(%d, %d, %d)" % (r, g, b),
"rgba(%d, %d, %d, 1)" % (r, g, b),
]
for prop in border_properties:
prop = prop % "color"
self.assertIn(element.value_of_css_property(prop), colors)
def test_inline_formset_error_input_border(self):
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_inlines_holder5_add")
)
self.wait_until_visible("#id_dummy")
self.selenium.find_element(By.ID, "id_dummy").send_keys(1)
fields = ["id_inner5stacked_set-0-dummy", "id_inner5tabular_set-0-dummy"]
show_links = self.selenium.find_elements(By.LINK_TEXT, "SHOW")
for show_index, field_name in enumerate(fields):
show_links[show_index].click()
self.wait_until_visible("#" + field_name)
self.selenium.find_element(By.ID, field_name).send_keys(1)
# Before save all inputs have default border
for inline in ("stacked", "tabular"):
for field_name in ("name", "select", "text"):
element_id = "id_inner5%s_set-0-%s" % (inline, field_name)
self.assertBorder(
self.selenium.find_element(By.ID, element_id),
"1px solid #cccccc",
)
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
# Test the red border around inputs by css selectors
stacked_selectors = [".errors input", ".errors select", ".errors textarea"]
for selector in stacked_selectors:
self.assertBorder(
self.selenium.find_element(By.CSS_SELECTOR, selector),
"1px solid #ba2121",
)
tabular_selectors = [
"td ul.errorlist + input",
"td ul.errorlist + select",
"td ul.errorlist + textarea",
]
for selector in tabular_selectors:
self.assertBorder(
self.selenium.find_element(By.CSS_SELECTOR, selector),
"1px solid #ba2121",
)
def test_inline_formset_error(self):
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
self.selenium.get(
self.live_server_url + reverse("admin:admin_inlines_holder5_add")
)
stacked_inline_formset_selector = (
"div#inner5stacked_set-group fieldset.module.collapse"
)
tabular_inline_formset_selector = (
"div#inner5tabular_set-group fieldset.module.collapse"
)
# Inlines without errors, both inlines collapsed
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.assertCountSeleniumElements(
stacked_inline_formset_selector + ".collapsed", 1
)
self.assertCountSeleniumElements(
tabular_inline_formset_selector + ".collapsed", 1
)
show_links = self.selenium.find_elements(By.LINK_TEXT, "SHOW")
self.assertEqual(len(show_links), 2)
# Inlines with errors, both inlines expanded
test_fields = ["#id_inner5stacked_set-0-dummy", "#id_inner5tabular_set-0-dummy"]
for show_index, field_name in enumerate(test_fields):
show_links[show_index].click()
self.wait_until_visible(field_name)
self.selenium.find_element(By.ID, field_name[1:]).send_keys(1)
hide_links = self.selenium.find_elements(By.LINK_TEXT, "HIDE")
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields):
hide_link = hide_links[hide_index]
self.selenium.execute_script(
"window.scrollTo(0, %s);" % hide_link.location["y"]
)
hide_link.click()
self.wait_until_invisible(field_name)
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
with self.disable_implicit_wait():
self.assertCountSeleniumElements(
stacked_inline_formset_selector + ".collapsed", 0
)
self.assertCountSeleniumElements(
tabular_inline_formset_selector + ".collapsed", 0
)
self.assertCountSeleniumElements(stacked_inline_formset_selector, 1)
self.assertCountSeleniumElements(tabular_inline_formset_selector, 1)
def test_inlines_verbose_name(self):
"""
The item added by the "Add another XXX" link must use the correct
verbose_name in the inline form.
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret")
# Hide sidebar.
self.selenium.get(
self.live_server_url + reverse("admin:admin_inlines_course_add")
)
toggle_button = self.selenium.find_element(
By.CSS_SELECTOR, "#toggle-nav-sidebar"
)
toggle_button.click()
# Each combination of horizontal/vertical filter with stacked/tabular
# inlines.
tests = [
"admin:admin_inlines_course_add",
"admin:admin_inlines_courseproxy_add",
"admin:admin_inlines_courseproxy1_add",
"admin:admin_inlines_courseproxy2_add",
]
css_selector = ".dynamic-class_set#class_set-%s h2"
for url_name in tests:
with self.subTest(url=url_name):
self.selenium.get(self.live_server_url + reverse(url_name))
# First inline shows the verbose_name.
available, chosen = self.selenium.find_elements(
By.CSS_SELECTOR, css_selector % 0
)
self.assertEqual(available.text, "AVAILABLE ATTENDANT")
self.assertEqual(chosen.text, "CHOSEN ATTENDANT")
# Added inline should also have the correct verbose_name.
self.selenium.find_element(By.LINK_TEXT, "Add another Class").click()
available, chosen = self.selenium.find_elements(
By.CSS_SELECTOR, css_selector % 1
)
self.assertEqual(available.text, "AVAILABLE ATTENDANT")
self.assertEqual(chosen.text, "CHOSEN ATTENDANT")
# Third inline should also have the correct verbose_name.
self.selenium.find_element(By.LINK_TEXT, "Add another Class").click()
available, chosen = self.selenium.find_elements(
By.CSS_SELECTOR, css_selector % 2
)
self.assertEqual(available.text, "AVAILABLE ATTENDANT")
self.assertEqual(chosen.text, "CHOSEN ATTENDANT")
|
09a9633ed5612d506e1d01797beb58e4cfec8737f5897429241b37b29cff12a4 | """
Testing of admin inline formsets.
"""
import random
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
class Parent(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Teacher(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Child(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return "I am %s, a child of %s" % (self.name, self.parent)
class Book(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=50)
books = models.ManyToManyField(Book)
person = models.OneToOneField("Person", models.CASCADE, null=True)
class NonAutoPKBook(models.Model):
rand_pk = models.IntegerField(primary_key=True, editable=False)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
def save(self, *args, **kwargs):
while not self.rand_pk:
test_pk = random.randint(1, 99999)
if not NonAutoPKBook.objects.filter(rand_pk=test_pk).exists():
self.rand_pk = test_pk
super().save(*args, **kwargs)
class NonAutoPKBookChild(NonAutoPKBook):
pass
class EditablePKBook(models.Model):
manual_pk = models.IntegerField(primary_key=True)
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=50)
class Holder(models.Model):
dummy = models.IntegerField()
class Inner(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder, models.CASCADE)
readonly = models.CharField("Inner readonly label", max_length=1)
def get_absolute_url(self):
return "/inner/"
class Holder2(models.Model):
dummy = models.IntegerField()
class Inner2(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder2, models.CASCADE)
class Holder3(models.Model):
dummy = models.IntegerField()
class Inner3(models.Model):
dummy = models.IntegerField()
holder = models.ForeignKey(Holder3, models.CASCADE)
# Models for ticket #8190
class Holder4(models.Model):
dummy = models.IntegerField()
class Inner4Stacked(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["dummy", "holder"], name="unique_stacked_dummy_per_holder"
)
]
class Inner4Tabular(models.Model):
dummy = models.IntegerField(help_text="Awesome tabular help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["dummy", "holder"], name="unique_tabular_dummy_per_holder"
)
]
# Models for ticket #31441
class Holder5(models.Model):
dummy = models.IntegerField()
class Inner5Stacked(models.Model):
name = models.CharField(max_length=10)
select = models.CharField(choices=(("1", "One"), ("2", "Two")), max_length=10)
text = models.TextField()
dummy = models.IntegerField()
holder = models.ForeignKey(Holder5, models.CASCADE)
class Inner5Tabular(models.Model):
name = models.CharField(max_length=10)
select = models.CharField(choices=(("1", "One"), ("2", "Two")), max_length=10)
text = models.TextField()
dummy = models.IntegerField()
holder = models.ForeignKey(Holder5, models.CASCADE)
# Models for #12749
class Person(models.Model):
firstname = models.CharField(max_length=15)
class OutfitItem(models.Model):
name = models.CharField(max_length=15)
class Fashionista(models.Model):
person = models.OneToOneField(Person, models.CASCADE, primary_key=True)
weaknesses = models.ManyToManyField(
OutfitItem, through="ShoppingWeakness", blank=True
)
class ShoppingWeakness(models.Model):
fashionista = models.ForeignKey(Fashionista, models.CASCADE)
item = models.ForeignKey(OutfitItem, models.CASCADE)
# Models for #13510
class TitleCollection(models.Model):
pass
class Title(models.Model):
collection = models.ForeignKey(
TitleCollection, models.SET_NULL, blank=True, null=True
)
title1 = models.CharField(max_length=100)
title2 = models.CharField(max_length=100)
# Models for #15424
class Poll(models.Model):
name = models.CharField(max_length=40)
class Question(models.Model):
text = models.CharField(max_length=40)
poll = models.ForeignKey(Poll, models.CASCADE)
def clean(self):
raise ValidationError("Always invalid model.")
class Novel(models.Model):
name = models.CharField(max_length=40)
class NovelReadonlyChapter(Novel):
class Meta:
proxy = True
class Chapter(models.Model):
name = models.CharField(max_length=40)
novel = models.ForeignKey(Novel, models.CASCADE)
class FootNote(models.Model):
"""
Model added for ticket 19838
"""
chapter = models.ForeignKey(Chapter, models.PROTECT)
note = models.CharField(max_length=40)
# Models for #16838
class CapoFamiglia(models.Model):
name = models.CharField(max_length=100)
class Consigliere(models.Model):
name = models.CharField(max_length=100, help_text="Help text for Consigliere")
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name="+")
class SottoCapo(models.Model):
name = models.CharField(max_length=100)
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE, related_name="+")
class ReadOnlyInline(models.Model):
name = models.CharField(max_length=100, help_text="Help text for ReadOnlyInline")
capo_famiglia = models.ForeignKey(CapoFamiglia, models.CASCADE)
# Models for #18433
class ParentModelWithCustomPk(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
class ChildModel1(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return "/child_model1/"
class ChildModel2(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return "/child_model2/"
# Models for #19425
class BinaryTree(models.Model):
name = models.CharField(max_length=100)
parent = models.ForeignKey("self", models.SET_NULL, null=True, blank=True)
# Models for #19524
class LifeForm(models.Model):
pass
class ExtraTerrestrial(LifeForm):
name = models.CharField(max_length=100)
class Sighting(models.Model):
et = models.ForeignKey(ExtraTerrestrial, models.CASCADE)
place = models.CharField(max_length=100)
# Models for #18263
class SomeParentModel(models.Model):
name = models.CharField(max_length=1)
class SomeChildModel(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField()
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
readonly_field = models.CharField(max_length=1)
# Models for #30231
class Course(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
class Class(models.Model):
person = models.ManyToManyField(Person, verbose_name="attendant")
course = models.ForeignKey(Course, on_delete=models.CASCADE)
class CourseProxy(Course):
class Meta:
proxy = True
class CourseProxy1(Course):
class Meta:
proxy = True
class CourseProxy2(Course):
class Meta:
proxy = True
# Other models
class ShowInlineParent(models.Model):
show_inlines = models.BooleanField(default=False)
class ShowInlineChild(models.Model):
parent = models.ForeignKey(ShowInlineParent, on_delete=models.CASCADE)
class ProfileCollection(models.Model):
pass
class Profile(models.Model):
collection = models.ForeignKey(
ProfileCollection, models.SET_NULL, blank=True, null=True
)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
class VerboseNameProfile(Profile):
class Meta:
verbose_name = "Model with verbose name only"
class VerboseNamePluralProfile(Profile):
class Meta:
verbose_name_plural = "Model with verbose name plural only"
class BothVerboseNameProfile(Profile):
class Meta:
verbose_name = "Model with both - name"
verbose_name_plural = "Model with both - plural name"
|
ada041bb93bcea6055cc31104eab6872c257e3f8418672b16fe1622176b5daf1 | import datetime
import pickle
import unittest
import uuid
from collections import namedtuple
from copy import deepcopy
from decimal import Decimal
from unittest import mock
from django.core.exceptions import FieldError
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import (
AutoField,
Avg,
BinaryField,
BooleanField,
Case,
CharField,
Count,
DateField,
DateTimeField,
DecimalField,
DurationField,
Exists,
Expression,
ExpressionList,
ExpressionWrapper,
F,
FloatField,
Func,
IntegerField,
Max,
Min,
Model,
OrderBy,
OuterRef,
Q,
StdDev,
Subquery,
Sum,
TimeField,
UUIDField,
Value,
Variance,
When,
)
from django.db.models.expressions import (
Col,
Combinable,
CombinedExpression,
RawSQL,
Ref,
)
from django.db.models.functions import (
Coalesce,
Concat,
Left,
Length,
Lower,
Substr,
Upper,
)
from django.db.models.sql import constants
from django.db.models.sql.datastructures import Join
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import (
Approximate,
CaptureQueriesContext,
isolate_apps,
register_lookup,
)
from django.utils.functional import SimpleLazyObject
from .models import (
UUID,
UUIDPK,
Company,
Employee,
Experiment,
Manager,
Number,
RemoteEmployee,
Result,
SimulationRun,
Time,
)
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.example_inc = Company.objects.create(
name="Example Inc.",
num_employees=2300,
num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10),
)
cls.foobar_ltd = Company.objects.create(
name="Foobar Ltd.",
num_employees=3,
num_chairs=4,
based_in_eu=True,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20),
)
cls.max = Employee.objects.create(
firstname="Max", lastname="Mustermann", salary=30
)
cls.gmbh = Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1, ceo=cls.max
)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by("name", "num_employees", "num_chairs")
def test_annotate_values_aggregate(self):
companies = (
Company.objects.annotate(
salaries=F("ceo__salary"),
)
.values("num_employees", "salaries")
.aggregate(
result=Sum(
F("salaries") + F("num_employees"), output_field=IntegerField()
),
)
)
self.assertEqual(companies["result"], 2395)
def test_annotate_values_filter(self):
companies = (
Company.objects.annotate(
foo=RawSQL("%s", ["value"]),
)
.filter(foo="value")
.order_by("name")
)
self.assertSequenceEqual(
companies,
[self.example_inc, self.foobar_ltd, self.gmbh],
)
def test_annotate_values_count(self):
companies = Company.objects.annotate(foo=RawSQL("%s", ["value"]))
self.assertEqual(companies.count(), 3)
@skipUnlessDBFeature("supports_boolean_expr_in_select_clause")
def test_filtering_on_annotate_that_uses_q(self):
self.assertEqual(
Company.objects.annotate(
num_employees_check=ExpressionWrapper(
Q(num_employees__gt=3), output_field=BooleanField()
)
)
.filter(num_employees_check=True)
.count(),
2,
)
def test_filtering_on_q_that_is_boolean(self):
self.assertEqual(
Company.objects.filter(
ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField())
).count(),
2,
)
def test_filtering_on_rawsql_that_is_boolean(self):
self.assertEqual(
Company.objects.filter(
RawSQL("num_employees > %s", (3,), output_field=BooleanField()),
).count(),
2,
)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertSequenceEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")),
[
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{"num_chairs": 1, "name": "Test GmbH", "num_employees": 32},
],
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 32, "name": "Test GmbH", "num_employees": 32},
],
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 2302, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 5, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 34, "name": "Test GmbH", "num_employees": 32},
],
)
def test_order_of_operations(self):
# Law of order of operations is followed
self.company_query.update(
num_chairs=F("num_employees") + 2 * F("num_employees")
)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 6900, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 9, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 96, "name": "Test GmbH", "num_employees": 32},
],
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(
num_chairs=(F("num_employees") + 2) * F("num_employees")
)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 5294600, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 15, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 1088, "name": "Test GmbH", "num_employees": 32},
],
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(Company.objects.update(point_of_contact=F("ceo")), 3)
self.assertQuerysetEqual(
Company.objects.all(),
["Joe Smith", "Frank Meyer", "Max Mustermann"],
lambda c: str(c.point_of_contact),
ordered=False,
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(), [None, None], lambda n: n.float, ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F("ceo"))
c = Company.objects.first()
c.point_of_contact = Employee.objects.create(
firstname="Guido", lastname="van Rossum"
)
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")),
["Foobar Ltd.", "Test GmbH"],
lambda c: c.name,
ordered=False,
)
Company.objects.exclude(ceo__firstname=F("point_of_contact__firstname")).update(
name="foo"
)
self.assertEqual(
Company.objects.exclude(ceo__firstname=F("point_of_contact__firstname"))
.get()
.name,
"foo",
)
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name=F("point_of_contact__lastname"))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
self.gmbh.num_employees = F("num_employees") + 4
self.gmbh.save()
self.gmbh.refresh_from_db()
self.assertEqual(self.gmbh.num_employees, 36)
def test_new_object_save(self):
# We should be able to use Funcs when inserting new data
test_co = Company(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1, ceo=self.max
)
test_co.save()
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_new_object_create(self):
test_co = Company.objects.create(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1, ceo=self.max
)
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_object_create_with_aggregate(self):
# Aggregates are not allowed when inserting new data
msg = (
"Aggregate functions are not allowed in this query "
"(num_employees=Max(Value(1)))."
)
with self.assertRaisesMessage(FieldError, msg):
Company.objects.create(
name="Company",
num_employees=Max(Value(1)),
num_chairs=1,
ceo=Employee.objects.create(
firstname="Just", lastname="Doit", salary=30
),
)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(pk=self.gmbh.pk)
msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.'
with self.assertRaisesMessage(ValueError, msg):
test_gmbh.point_of_contact = F("ceo")
test_gmbh.point_of_contact = self.gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__lastname")
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
test_gmbh.save()
def test_update_inherited_field_value(self):
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
RemoteEmployee.objects.update(adjusted_salary=F("salary") * 5)
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5, ceo=self.max
)
acme.num_employees = F("num_employees") + 16
msg = (
'Failed to insert expression "Col(expressions_company, '
'expressions.Company.num_employees) + Value(16)" on '
"expressions.Company.num_employees. F() expressions can only be "
"used to update, not to insert."
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
acme.num_employees = 12
acme.name = Lower(F("name"))
msg = (
'Failed to insert expression "Lower(Col(expressions_company, '
'expressions.Company.name))" on expressions.Company.name. F() '
"expressions can only be used to update, not to insert."
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
test = Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F("lastname"))
self.assertSequenceEqual(queryset, [test])
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertSequenceEqual(
Employee.objects.filter(lastname__startswith=F("firstname")),
[e2, e3] if connection.features.has_case_insensitive_like else [e2],
)
qs = Employee.objects.filter(lastname__istartswith=F("firstname")).order_by(
"pk"
)
self.assertSequenceEqual(qs, [e2, e3])
def test_ticket_18375_join_reuse(self):
# Reverse multijoin F() references and the lookup target the same join.
# Pre #18375 the F() join was generated first and the lookup couldn't
# reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F("company_ceo_set__num_employees")
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F("company_ceo_set__num_employees"),
company_ceo_set__num_chairs__gte=1,
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F("pk"),
pk=F("company_ceo_set__num_employees"),
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_chained_filters(self):
# F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(company_ceo_set__num_employees=F("pk")).filter(
company_ceo_set__num_employees=F("company_ceo_set__num_employees")
)
self.assertEqual(str(qs.query).count("JOIN"), 2)
def test_order_by_exists(self):
mary = Employee.objects.create(
firstname="Mary", lastname="Mustermann", salary=20
)
mustermanns_by_seniority = Employee.objects.filter(
lastname="Mustermann"
).order_by(
# Order by whether the employee is the CEO of a company
Exists(Company.objects.filter(ceo=OuterRef("pk"))).desc()
)
self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary])
def test_order_by_multiline_sql(self):
raw_order_by = (
RawSQL(
"""
CASE WHEN num_employees > 1000
THEN num_chairs
ELSE 0 END
""",
[],
).desc(),
RawSQL(
"""
CASE WHEN num_chairs > 1
THEN 1
ELSE 0 END
""",
[],
).asc(),
)
for qs in (
Company.objects.all(),
Company.objects.distinct(),
):
with self.subTest(qs=qs):
self.assertSequenceEqual(
qs.order_by(*raw_order_by),
[self.example_inc, self.gmbh, self.foobar_ltd],
)
def test_outerref(self):
inner = Company.objects.filter(point_of_contact=OuterRef("pk"))
msg = (
"This queryset contains a reference to an outer query and may only "
"be used in a subquery."
)
with self.assertRaisesMessage(ValueError, msg):
inner.exists()
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
self.assertIs(outer.exists(), True)
def test_exist_single_field_output_field(self):
queryset = Company.objects.values("pk")
self.assertIsInstance(Exists(queryset).output_field, BooleanField)
def test_subquery(self):
Company.objects.filter(name="Example Inc.").update(
point_of_contact=Employee.objects.get(firstname="Joe", lastname="Smith"),
ceo=self.max,
)
Employee.objects.create(firstname="Bob", lastname="Brown", salary=40)
qs = (
Employee.objects.annotate(
is_point_of_contact=Exists(
Company.objects.filter(point_of_contact=OuterRef("pk"))
),
is_not_point_of_contact=~Exists(
Company.objects.filter(point_of_contact=OuterRef("pk"))
),
is_ceo_of_small_company=Exists(
Company.objects.filter(num_employees__lt=200, ceo=OuterRef("pk"))
),
is_ceo_small_2=~~Exists(
Company.objects.filter(num_employees__lt=200, ceo=OuterRef("pk"))
),
largest_company=Subquery(
Company.objects.order_by("-num_employees")
.filter(Q(ceo=OuterRef("pk")) | Q(point_of_contact=OuterRef("pk")))
.values("name")[:1],
output_field=CharField(),
),
)
.values(
"firstname",
"is_point_of_contact",
"is_not_point_of_contact",
"is_ceo_of_small_company",
"is_ceo_small_2",
"largest_company",
)
.order_by("firstname")
)
results = list(qs)
# Could use Coalesce(subq, Value('')) instead except for the bug in
# cx_Oracle mentioned in #23843.
bob = results[0]
if (
bob["largest_company"] == ""
and connection.features.interprets_empty_strings_as_nulls
):
bob["largest_company"] = None
self.assertEqual(
results,
[
{
"firstname": "Bob",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": False,
"is_ceo_small_2": False,
"largest_company": None,
},
{
"firstname": "Frank",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": True,
"is_ceo_small_2": True,
"largest_company": "Foobar Ltd.",
},
{
"firstname": "Joe",
"is_point_of_contact": True,
"is_not_point_of_contact": False,
"is_ceo_of_small_company": False,
"is_ceo_small_2": False,
"largest_company": "Example Inc.",
},
{
"firstname": "Max",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": True,
"is_ceo_small_2": True,
"largest_company": "Example Inc.",
},
],
)
# A less elegant way to write the same query: this uses a LEFT OUTER
# JOIN and an IS NULL, inside a WHERE NOT IN which is probably less
# efficient than EXISTS.
self.assertCountEqual(
qs.filter(is_point_of_contact=True).values("pk"),
Employee.objects.exclude(company_point_of_contact_set=None).values("pk"),
)
def test_subquery_eq(self):
qs = Employee.objects.annotate(
is_ceo=Exists(Company.objects.filter(ceo=OuterRef("pk"))),
is_point_of_contact=Exists(
Company.objects.filter(point_of_contact=OuterRef("pk")),
),
small_company=Exists(
queryset=Company.objects.filter(num_employees__lt=200),
),
).filter(is_ceo=True, is_point_of_contact=False, small_company=True)
self.assertNotEqual(
qs.query.annotations["is_ceo"],
qs.query.annotations["is_point_of_contact"],
)
self.assertNotEqual(
qs.query.annotations["is_ceo"],
qs.query.annotations["small_company"],
)
def test_subquery_sql(self):
employees = Employee.objects.all()
employees_subquery = Subquery(employees)
self.assertIs(employees_subquery.query.subquery, True)
self.assertIs(employees.query.subquery, False)
compiler = employees_subquery.query.get_compiler(connection=connection)
sql, _ = employees_subquery.as_sql(compiler, connection)
self.assertIn("(SELECT ", sql)
def test_in_subquery(self):
# This is a contrived test (and you really wouldn't write this query),
# but it is a succinct way to test the __in=Subquery() construct.
small_companies = Company.objects.filter(num_employees__lt=200).values("pk")
subquery_test = Company.objects.filter(pk__in=Subquery(small_companies))
self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh])
subquery_test2 = Company.objects.filter(
pk=Subquery(small_companies.filter(num_employees=3))
)
self.assertCountEqual(subquery_test2, [self.foobar_ltd])
def test_uuid_pk_subquery(self):
u = UUIDPK.objects.create()
UUID.objects.create(uuid_fk=u)
qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values("uuid_fk__id")))
self.assertCountEqual(qs, [u])
def test_nested_subquery(self):
inner = Company.objects.filter(point_of_contact=OuterRef("pk"))
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
contrived = Employee.objects.annotate(
is_point_of_contact=Subquery(
outer.filter(pk=OuterRef("pk")).values("is_point_of_contact"),
output_field=BooleanField(),
),
)
self.assertCountEqual(contrived.values_list(), outer.values_list())
def test_nested_subquery_join_outer_ref(self):
inner = Employee.objects.filter(pk=OuterRef("ceo__pk")).values("pk")
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
ceo__in=inner,
ceo__pk=OuterRef("pk"),
).values("pk"),
),
)
self.assertSequenceEqual(
qs.values_list("ceo_company", flat=True),
[self.example_inc.pk, self.foobar_ltd.pk, self.gmbh.pk],
)
def test_nested_subquery_outer_ref_2(self):
first = Time.objects.create(time="09:00")
second = Time.objects.create(time="17:00")
third = Time.objects.create(time="21:00")
SimulationRun.objects.bulk_create(
[
SimulationRun(start=first, end=second, midpoint="12:00"),
SimulationRun(start=first, end=third, midpoint="15:00"),
SimulationRun(start=second, end=first, midpoint="00:00"),
]
)
inner = Time.objects.filter(
time=OuterRef(OuterRef("time")), pk=OuterRef("start")
).values("time")
middle = SimulationRun.objects.annotate(other=Subquery(inner)).values("other")[
:1
]
outer = Time.objects.annotate(other=Subquery(middle, output_field=TimeField()))
# This is a contrived example. It exercises the double OuterRef form.
self.assertCountEqual(outer, [first, second, third])
def test_nested_subquery_outer_ref_with_autofield(self):
first = Time.objects.create(time="09:00")
second = Time.objects.create(time="17:00")
SimulationRun.objects.create(start=first, end=second, midpoint="12:00")
inner = SimulationRun.objects.filter(start=OuterRef(OuterRef("pk"))).values(
"start"
)
middle = Time.objects.annotate(other=Subquery(inner)).values("other")[:1]
outer = Time.objects.annotate(
other=Subquery(middle, output_field=IntegerField())
)
# This exercises the double OuterRef form with AutoField as pk.
self.assertCountEqual(outer, [first, second])
def test_annotations_within_subquery(self):
Company.objects.filter(num_employees__lt=50).update(
ceo=Employee.objects.get(firstname="Frank")
)
inner = (
Company.objects.filter(ceo=OuterRef("pk"))
.values("ceo")
.annotate(total_employees=Sum("num_employees"))
.values("total_employees")
)
outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(
salary__lte=Subquery(inner)
)
self.assertSequenceEqual(
outer.order_by("-total_employees").values("salary", "total_employees"),
[
{"salary": 10, "total_employees": 2300},
{"salary": 20, "total_employees": 35},
],
)
def test_subquery_references_joined_table_twice(self):
inner = Company.objects.filter(
num_chairs__gte=OuterRef("ceo__salary"),
num_employees__gte=OuterRef("point_of_contact__salary"),
)
# Another contrived example (there is no need to have a subquery here)
outer = Company.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertFalse(outer.exists())
def test_subquery_filter_by_aggregate(self):
Number.objects.create(integer=1000, float=1.2)
Employee.objects.create(salary=1000)
qs = Number.objects.annotate(
min_valuable_count=Subquery(
Employee.objects.filter(
salary=OuterRef("integer"),
)
.annotate(cnt=Count("salary"))
.filter(cnt__gt=0)
.values("cnt")[:1]
),
)
self.assertEqual(qs.get().float, 1.2)
def test_subquery_filter_by_lazy(self):
self.max.manager = Manager.objects.create(name="Manager")
self.max.save()
max_manager = SimpleLazyObject(
lambda: Manager.objects.get(pk=self.max.manager.pk)
)
qs = Company.objects.annotate(
ceo_manager=Subquery(
Employee.objects.filter(
lastname=OuterRef("ceo__lastname"),
).values("manager"),
),
).filter(ceo_manager=max_manager)
self.assertEqual(qs.get(), self.gmbh)
def test_aggregate_subquery_annotation(self):
with self.assertNumQueries(1) as ctx:
aggregate = Company.objects.annotate(
ceo_salary=Subquery(
Employee.objects.filter(
id=OuterRef("ceo_id"),
).values("salary")
),
).aggregate(
ceo_salary_gt_20=Count("pk", filter=Q(ceo_salary__gt=20)),
)
self.assertEqual(aggregate, {"ceo_salary_gt_20": 1})
# Aggregation over a subquery annotation doesn't annotate the subquery
# twice in the inner query.
sql = ctx.captured_queries[0]["sql"]
self.assertLessEqual(sql.count("SELECT"), 3)
# GROUP BY isn't required to aggregate over a query that doesn't
# contain nested aggregates.
self.assertNotIn("GROUP BY", sql)
@skipUnlessDBFeature("supports_over_clause")
def test_aggregate_rawsql_annotation(self):
with self.assertNumQueries(1) as ctx:
aggregate = Company.objects.annotate(
salary=RawSQL("SUM(num_chairs) OVER (ORDER BY num_employees)", []),
).aggregate(
count=Count("pk"),
)
self.assertEqual(aggregate, {"count": 3})
sql = ctx.captured_queries[0]["sql"]
self.assertNotIn("GROUP BY", sql)
def test_explicit_output_field(self):
class FuncA(Func):
output_field = CharField()
class FuncB(Func):
pass
expr = FuncB(FuncA())
self.assertEqual(expr.output_field, FuncA.output_field)
def test_outerref_mixed_case_table_name(self):
inner = Result.objects.filter(result_time__gte=OuterRef("experiment__assigned"))
outer = Result.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertFalse(outer.exists())
def test_outerref_with_operator(self):
inner = Company.objects.filter(num_employees=OuterRef("ceo__salary") + 2)
outer = Company.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertEqual(outer.get().name, "Test GmbH")
def test_nested_outerref_with_function(self):
self.gmbh.point_of_contact = Employee.objects.get(lastname="Meyer")
self.gmbh.save()
inner = Employee.objects.filter(
lastname__startswith=Left(OuterRef(OuterRef("lastname")), 1),
)
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
point_of_contact__in=inner,
ceo__pk=OuterRef("pk"),
).values("name"),
),
).filter(ceo_company__isnull=False)
self.assertEqual(qs.get().ceo_company, "Test GmbH")
def test_annotation_with_outerref(self):
gmbh_salary = Company.objects.annotate(
max_ceo_salary_raise=Subquery(
Company.objects.annotate(
salary_raise=OuterRef("num_employees") + F("num_employees"),
)
.order_by("-salary_raise")
.values("salary_raise")[:1],
output_field=IntegerField(),
),
).get(pk=self.gmbh.pk)
self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332)
def test_annotation_with_nested_outerref(self):
self.gmbh.point_of_contact = Employee.objects.get(lastname="Meyer")
self.gmbh.save()
inner = Employee.objects.annotate(
outer_lastname=OuterRef(OuterRef("lastname")),
).filter(lastname__startswith=Left("outer_lastname", 1))
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
point_of_contact__in=inner,
ceo__pk=OuterRef("pk"),
).values("name"),
),
).filter(ceo_company__isnull=False)
self.assertEqual(qs.get().ceo_company, "Test GmbH")
def test_pickle_expression(self):
expr = Value(1)
expr.convert_value # populate cached property
self.assertEqual(pickle.loads(pickle.dumps(expr)), expr)
def test_incorrect_field_in_F_expression(self):
with self.assertRaisesMessage(
FieldError, "Cannot resolve keyword 'nope' into field."
):
list(Employee.objects.filter(firstname=F("nope")))
def test_incorrect_joined_field_in_F_expression(self):
with self.assertRaisesMessage(
FieldError, "Cannot resolve keyword 'nope' into field."
):
list(Company.objects.filter(ceo__pk=F("point_of_contact__nope")))
def test_exists_in_filter(self):
inner = Company.objects.filter(ceo=OuterRef("pk")).values("pk")
qs1 = Employee.objects.filter(Exists(inner))
qs2 = Employee.objects.annotate(found=Exists(inner)).filter(found=True)
self.assertCountEqual(qs1, qs2)
self.assertFalse(Employee.objects.exclude(Exists(inner)).exists())
self.assertCountEqual(qs2, Employee.objects.exclude(~Exists(inner)))
def test_subquery_in_filter(self):
inner = Company.objects.filter(ceo=OuterRef("pk")).values("based_in_eu")
self.assertSequenceEqual(
Employee.objects.filter(Subquery(inner)),
[self.foobar_ltd.ceo],
)
def test_subquery_group_by_outerref_in_filter(self):
inner = (
Company.objects.annotate(
employee=OuterRef("pk"),
)
.values("employee")
.annotate(
min_num_chairs=Min("num_chairs"),
)
.values("ceo")
)
self.assertIs(Employee.objects.filter(pk__in=Subquery(inner)).exists(), True)
def test_case_in_filter_if_boolean_output_field(self):
is_ceo = Company.objects.filter(ceo=OuterRef("pk"))
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
qs = Employee.objects.filter(
Case(
When(Exists(is_ceo), then=True),
When(Exists(is_poc), then=True),
default=False,
output_field=BooleanField(),
),
)
self.assertCountEqual(qs, [self.example_inc.ceo, self.foobar_ltd.ceo, self.max])
def test_boolean_expression_combined(self):
is_ceo = Company.objects.filter(ceo=OuterRef("pk"))
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) | Exists(is_poc)),
[self.example_inc.ceo, self.foobar_ltd.ceo, self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) & Exists(is_poc)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) & Q(salary__gte=30)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)),
[self.example_inc.ceo, self.max],
)
self.assertCountEqual(
Employee.objects.filter(Q(salary__gte=30) & Exists(is_ceo)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Q(salary__lt=15) | Exists(is_poc)),
[self.example_inc.ceo, self.max],
)
def test_boolean_expression_combined_with_empty_Q(self):
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
tests = [
Exists(is_poc) & Q(),
Q() & Exists(is_poc),
Exists(is_poc) | Q(),
Q() | Exists(is_poc),
Q(Exists(is_poc)) & Q(),
Q() & Q(Exists(is_poc)),
Q(Exists(is_poc)) | Q(),
Q() | Q(Exists(is_poc)),
]
for conditions in tests:
with self.subTest(conditions):
self.assertCountEqual(Employee.objects.filter(conditions), [self.max])
def test_boolean_expression_in_Q(self):
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
self.assertCountEqual(Employee.objects.filter(Q(Exists(is_poc))), [self.max])
class IterableLookupInnerExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
ceo = Employee.objects.create(firstname="Just", lastname="Doit", salary=30)
# MySQL requires that the values calculated for expressions don't pass
# outside of the field's range, so it's inconvenient to use the values
# in the more general tests.
cls.c5020 = Company.objects.create(
name="5020 Ltd", num_employees=50, num_chairs=20, ceo=ceo
)
cls.c5040 = Company.objects.create(
name="5040 Ltd", num_employees=50, num_chairs=40, ceo=ceo
)
cls.c5050 = Company.objects.create(
name="5050 Ltd", num_employees=50, num_chairs=50, ceo=ceo
)
cls.c5060 = Company.objects.create(
name="5060 Ltd", num_employees=50, num_chairs=60, ceo=ceo
)
cls.c99300 = Company.objects.create(
name="99300 Ltd", num_employees=99, num_chairs=300, ceo=ceo
)
def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self):
# __in lookups can use F() expressions for integers.
queryset = Company.objects.filter(num_employees__in=([F("num_chairs") - 10]))
self.assertSequenceEqual(queryset, [self.c5060])
self.assertCountEqual(
Company.objects.filter(
num_employees__in=([F("num_chairs") - 10, F("num_chairs") + 10])
),
[self.c5040, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(
num_employees__in=(
[F("num_chairs") - 10, F("num_chairs"), F("num_chairs") + 10]
)
),
[self.c5040, self.c5050, self.c5060],
)
def test_expressions_in_lookups_join_choice(self):
midpoint = datetime.time(13, 0)
t1 = Time.objects.create(time=datetime.time(12, 0))
t2 = Time.objects.create(time=datetime.time(14, 0))
s1 = SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=None, midpoint=midpoint)
queryset = SimulationRun.objects.filter(
midpoint__range=[F("start__time"), F("end__time")]
)
self.assertSequenceEqual(queryset, [s1])
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.INNER)
queryset = SimulationRun.objects.exclude(
midpoint__range=[F("start__time"), F("end__time")]
)
self.assertQuerysetEqual(queryset, [], ordered=False)
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.LOUTER)
def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self):
# Range lookups can use F() expressions for integers.
Company.objects.filter(num_employees__exact=F("num_chairs"))
self.assertCountEqual(
Company.objects.filter(num_employees__range=(F("num_chairs"), 100)),
[self.c5020, self.c5040, self.c5050],
)
self.assertCountEqual(
Company.objects.filter(
num_employees__range=(F("num_chairs") - 10, F("num_chairs") + 10)
),
[self.c5040, self.c5050, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(num_employees__range=(F("num_chairs") - 10, 100)),
[self.c5020, self.c5040, self.c5050, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(num_employees__range=(1, 100)),
[self.c5020, self.c5040, self.c5050, self.c5060, self.c99300],
)
def test_range_lookup_namedtuple(self):
EmployeeRange = namedtuple("EmployeeRange", ["minimum", "maximum"])
qs = Company.objects.filter(
num_employees__range=EmployeeRange(minimum=51, maximum=100),
)
self.assertSequenceEqual(qs, [self.c99300])
@unittest.skipUnless(
connection.vendor == "sqlite",
"This defensive test only works on databases that don't validate parameter "
"types",
)
def test_expressions_not_introduce_sql_injection_via_untrusted_string_inclusion(
self,
):
"""
This tests that SQL injection isn't possible using compilation of
expressions in iterable filters, as their compilation happens before
the main query compilation. It's limited to SQLite, as PostgreSQL,
Oracle and other vendors have defense in depth against this by type
checking. Testing against SQLite (the most permissive of the built-in
databases) demonstrates that the problem doesn't exist while keeping
the test simple.
"""
queryset = Company.objects.filter(name__in=[F("num_chairs") + "1)) OR ((1==1"])
self.assertQuerysetEqual(queryset, [], ordered=False)
def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self):
start = datetime.datetime(2016, 2, 3, 15, 0, 0)
end = datetime.datetime(2016, 2, 5, 15, 0, 0)
experiment_1 = Experiment.objects.create(
name="Integrity testing",
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
experiment_2 = Experiment.objects.create(
name="Taste testing",
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
r1 = Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 2, 4, 15, 0, 0),
)
Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 3, 10, 2, 0, 0),
)
Result.objects.create(
experiment=experiment_2,
result_time=datetime.datetime(2016, 1, 8, 5, 0, 0),
)
within_experiment_time = [F("experiment__start"), F("experiment__end")]
queryset = Result.objects.filter(result_time__range=within_experiment_time)
self.assertSequenceEqual(queryset, [r1])
class FTests(SimpleTestCase):
def test_deepcopy(self):
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_deconstruct(self):
f = F("name")
path, args, kwargs = f.deconstruct()
self.assertEqual(path, "django.db.models.F")
self.assertEqual(args, (f.name,))
self.assertEqual(kwargs, {})
def test_equal(self):
f = F("name")
same_f = F("name")
other_f = F("username")
self.assertEqual(f, same_f)
self.assertNotEqual(f, other_f)
def test_hash(self):
d = {F("name"): "Bob"}
self.assertIn(F("name"), d)
self.assertEqual(d[F("name")], "Bob")
def test_not_equal_Value(self):
f = F("name")
value = Value("name")
self.assertNotEqual(f, value)
self.assertNotEqual(value, f)
class ExpressionsTests(TestCase):
def test_F_reuse(self):
f = F("id")
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.",
num_employees=2300,
num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith"),
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create(
[
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="_ohn"),
]
)
claude = Employee.objects.create(firstname="Jean-Claude", lastname="Claude")
john = Employee.objects.create(firstname="Johnny", lastname="John")
john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%Joh\\n")
self.assertCountEqual(
Employee.objects.filter(firstname__contains=F("lastname")),
[john_sign, john, claude],
)
self.assertCountEqual(
Employee.objects.filter(firstname__startswith=F("lastname")),
[john_sign, john],
)
self.assertSequenceEqual(
Employee.objects.filter(firstname__endswith=F("lastname")),
[claude],
)
def test_insensitive_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create(
[
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="_ohn"),
]
)
claude = Employee.objects.create(firstname="Jean-Claude", lastname="claude")
john = Employee.objects.create(firstname="Johnny", lastname="john")
john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%joh\\n")
self.assertCountEqual(
Employee.objects.filter(firstname__icontains=F("lastname")),
[john_sign, john, claude],
)
self.assertCountEqual(
Employee.objects.filter(firstname__istartswith=F("lastname")),
[john_sign, john],
)
self.assertSequenceEqual(
Employee.objects.filter(firstname__iendswith=F("lastname")),
[claude],
)
@isolate_apps("expressions")
class SimpleExpressionTests(SimpleTestCase):
def test_equal(self):
self.assertEqual(Expression(), Expression())
self.assertEqual(
Expression(IntegerField()), Expression(output_field=IntegerField())
)
self.assertEqual(Expression(IntegerField()), mock.ANY)
self.assertNotEqual(Expression(IntegerField()), Expression(CharField()))
class TestModel(Model):
field = IntegerField()
other_field = IntegerField()
self.assertNotEqual(
Expression(TestModel._meta.get_field("field")),
Expression(TestModel._meta.get_field("other_field")),
)
def test_hash(self):
self.assertEqual(hash(Expression()), hash(Expression()))
self.assertEqual(
hash(Expression(IntegerField())),
hash(Expression(output_field=IntegerField())),
)
self.assertNotEqual(
hash(Expression(IntegerField())),
hash(Expression(CharField())),
)
class TestModel(Model):
field = IntegerField()
other_field = IntegerField()
self.assertNotEqual(
hash(Expression(TestModel._meta.get_field("field"))),
hash(Expression(TestModel._meta.get_field("other_field"))),
)
class ExpressionsNumericTests(TestCase):
@classmethod
def setUpTestData(cls):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
Number.objects.update(float=F("integer"))
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[(-1, -1), (42, 42), (1337, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0).update(integer=F("integer") + 1), 2
)
self.assertQuerysetEqual(
Number.objects.all(),
[(-1, -1), (43, 42), (1338, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0).update(integer=F("integer") + 1), 2
)
self.assertQuerysetEqual(
Number.objects.exclude(float=F("integer")),
[(43, 42), (1338, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_filter_decimal_expression(self):
obj = Number.objects.create(integer=0, float=1, decimal_value=Decimal("1"))
qs = Number.objects.annotate(
x=ExpressionWrapper(Value(1), output_field=DecimalField()),
).filter(Q(x=1, integer=0) & Q(x=Decimal("1")))
self.assertSequenceEqual(qs, [obj])
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(
Number.objects.filter(pk=n.pk).update(float=F("integer") + F("float") * 2),
1,
)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(
Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3)
)
def test_decimal_expression(self):
n = Number.objects.create(integer=1, decimal_value=Decimal("0.5"))
n.decimal_value = F("decimal_value") - Decimal("0.4")
n.save()
n.refresh_from_db()
self.assertEqual(n.decimal_value, Decimal("0.1"))
class ExpressionOperatorTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n = Number.objects.create(integer=42, float=15.5)
cls.n1 = Number.objects.create(integer=-42, float=-15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") + 15, float=F("float") + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)
)
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") - 15, float=F("float") - 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3)
)
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") * 15, float=F("float") * 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)
)
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") / 2, float=F("float") / 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3)
)
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F("integer") % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
def test_lefthand_modulo_null(self):
# LH Modulo arithmetic on integers.
Employee.objects.create(firstname="John", lastname="Doe", salary=None)
qs = Employee.objects.annotate(modsalary=F("salary") % 20)
self.assertIsNone(qs.get().salary)
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F("integer").bitand(56))
Number.objects.filter(pk=self.n1.pk).update(integer=F("integer").bitand(-56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64)
def test_lefthand_bitwise_left_shift_operator(self):
Number.objects.update(integer=F("integer").bitleftshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168)
def test_lefthand_bitwise_right_shift_operator(self):
Number.objects.update(integer=F("integer").bitrightshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11)
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.update(integer=F("integer").bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10)
def test_lefthand_transformed_field_bitwise_or(self):
Employee.objects.create(firstname="Max", lastname="Mustermann")
with register_lookup(CharField, Length):
qs = Employee.objects.annotate(bitor=F("lastname__length").bitor(48))
self.assertEqual(qs.get().bitor, 58)
def test_lefthand_power(self):
# LH Power arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") ** 2, float=F("float") ** 1.5
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2)
)
def test_lefthand_bitwise_xor(self):
Number.objects.update(integer=F("integer").bitxor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 26)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -26)
def test_lefthand_bitwise_xor_null(self):
employee = Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.update(salary=F("salary").bitxor(48))
employee.refresh_from_db()
self.assertIsNone(employee.salary)
def test_lefthand_bitwise_xor_right_null(self):
employee = Employee.objects.create(firstname="John", lastname="Doe", salary=48)
Employee.objects.update(salary=F("salary").bitxor(None))
employee.refresh_from_db()
self.assertIsNone(employee.salary)
@unittest.skipUnless(
connection.vendor == "oracle", "Oracle doesn't support bitwise XOR."
)
def test_lefthand_bitwise_xor_not_supported(self):
msg = "Bitwise XOR is not supported in Oracle."
with self.assertRaisesMessage(NotSupportedError, msg):
Number.objects.update(integer=F("integer").bitxor(48))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(
integer=15 + F("integer"), float=42.7 + F("float")
)
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)
)
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(
integer=15 - F("integer"), float=42.7 - F("float")
)
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3)
)
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=15 * F("integer"), float=42.7 * F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)
)
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=640 / F("integer"), float=42.7 / F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3)
)
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F("integer"))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
def test_righthand_power(self):
# RH Power arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=2 ** F("integer"), float=1.5 ** F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3)
)
class FTimeDeltaTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
delta5 = datetime.timedelta(days=90)
# Test data is set so that deltas and delays will be
# strictly increasing.
cls.deltas = []
cls.delays = []
cls.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
cls.e0 = Experiment.objects.create(
name="e0",
assigned=sday,
start=stime,
end=end,
completed=end.date(),
estimated_time=delta0,
)
cls.deltas.append(delta0)
cls.delays.append(
cls.e0.start - datetime.datetime.combine(cls.e0.assigned, midnight)
)
cls.days_long.append(cls.e0.completed - cls.e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite.
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(
name="e1",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta1,
)
cls.deltas.append(delta1)
cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight))
cls.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(
name="e2",
assigned=sday - datetime.timedelta(3),
start=stime,
end=end,
completed=end.date(),
estimated_time=datetime.timedelta(hours=1),
)
cls.deltas.append(delta2)
cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight))
cls.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(
name="e3",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta3,
)
cls.deltas.append(delta3)
cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight))
cls.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(
name="e4",
assigned=sday - datetime.timedelta(10),
start=stime,
end=end,
completed=end.date(),
estimated_time=delta4 - datetime.timedelta(1),
)
cls.deltas.append(delta4)
cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight))
cls.days_long.append(e4.completed - e4.assigned)
# e5: started a month after assignment, very long duration
delay = datetime.timedelta(30)
end = stime + delay + delta5
e5 = Experiment.objects.create(
name="e5",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta5,
)
cls.deltas.append(delta5)
cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight))
cls.days_long.append(e5.completed - e5.assigned)
cls.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(
end__lt=F("start") + datetime.timedelta(hours=1)
)
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643 - Crash when compiling query more than once
qs = Experiment.objects.filter(end__lt=F("start") + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
# Intentionally no assert
def test_delta_add(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.filter(end__lt=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(end__lt=delta + F("start"))
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(end__lte=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_delta_subtract(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.filter(start__gt=F("end") - delta)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(start__gte=F("end") - delta)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_exclude(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.exclude(end__lt=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[i:])
test_set = [
e.name for e in Experiment.objects.exclude(end__lte=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[i + 1 :])
def test_date_comparison(self):
for i, days in enumerate(self.days_long):
test_set = [
e.name
for e in Experiment.objects.filter(completed__lt=F("assigned") + days)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(completed__lte=F("assigned") + days)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_mixed_comparisons1(self):
for i, delay in enumerate(self.delays):
test_set = [
e.name
for e in Experiment.objects.filter(assigned__gt=F("start") - delay)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(assigned__gte=F("start") - delay)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_mixed_comparisons2(self):
for i, delay in enumerate(self.delays):
delay = datetime.timedelta(delay.days)
test_set = [
e.name
for e in Experiment.objects.filter(start__lt=F("assigned") + delay)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(
start__lte=F("assigned") + delay + datetime.timedelta(1)
)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_delta_update(self):
for delta in self.deltas:
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F("start") + delta, end=F("end") + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F("start") * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [
e.name
for e in Experiment.objects.filter(start=F("start") + F("estimated_time"))
]
self.assertEqual(zeros, ["e0"])
end_less = [
e.name
for e in Experiment.objects.filter(end__lt=F("start") + F("estimated_time"))
]
self.assertEqual(end_less, ["e2"])
delta_math = [
e.name
for e in Experiment.objects.filter(
end__gte=F("start") + F("estimated_time") + datetime.timedelta(hours=1)
)
]
self.assertEqual(delta_math, ["e4"])
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("start") + Value(None, output_field=DurationField()),
output_field=DateTimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
def test_durationfield_multiply_divide(self):
Experiment.objects.update(scalar=2)
tests = [
(Decimal("2"), 2),
(F("scalar"), 2),
(2, 2),
(3.2, 3.2),
]
for expr, scalar in tests:
with self.subTest(expr=expr):
qs = Experiment.objects.annotate(
multiplied=ExpressionWrapper(
expr * F("estimated_time"),
output_field=DurationField(),
),
divided=ExpressionWrapper(
F("estimated_time") / expr,
output_field=DurationField(),
),
)
for experiment in qs:
self.assertEqual(
experiment.multiplied,
experiment.estimated_time * scalar,
)
self.assertEqual(
experiment.divided,
experiment.estimated_time / scalar,
)
def test_duration_expressions(self):
for delta in self.deltas:
qs = Experiment.objects.annotate(duration=F("estimated_time") + delta)
for obj in qs:
self.assertEqual(obj.duration, obj.estimated_time + delta)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_subtraction(self):
queryset = Experiment.objects.annotate(
completion_duration=F("completed") - F("assigned"),
)
at_least_5_days = {
e.name
for e in queryset.filter(
completion_duration__gte=datetime.timedelta(days=5)
)
}
self.assertEqual(at_least_5_days, {"e3", "e4", "e5"})
at_least_120_days = {
e.name
for e in queryset.filter(
completion_duration__gte=datetime.timedelta(days=120)
)
}
self.assertEqual(at_least_120_days, {"e5"})
less_than_5_days = {
e.name
for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))
}
self.assertEqual(less_than_5_days, {"e0", "e1", "e2"})
queryset = Experiment.objects.annotate(
difference=F("completed") - Value(None, output_field=DateField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("completed") - Value(None, output_field=DurationField()),
output_field=DateField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_subquery_subtraction(self):
subquery = Experiment.objects.filter(pk=OuterRef("pk")).values("completed")
queryset = Experiment.objects.annotate(
difference=subquery - F("completed"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_case_subtraction(self):
queryset = Experiment.objects.annotate(
date_case=Case(
When(Q(name="e0"), then=F("completed")),
output_field=DateField(),
),
completed_value=Value(
self.e0.completed,
output_field=DateField(),
),
difference=F("date_case") - F("completed_value"),
).filter(difference=datetime.timedelta())
self.assertEqual(queryset.get(), self.e0)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_time_subtraction(self):
Time.objects.create(time=datetime.time(12, 30, 15, 2345))
queryset = Time.objects.annotate(
difference=F("time") - Value(datetime.time(11, 15, 0)),
)
self.assertEqual(
queryset.get().difference,
datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345),
)
queryset = Time.objects.annotate(
difference=F("time") - Value(None, output_field=TimeField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Time.objects.annotate(
shifted=ExpressionWrapper(
F("time") - Value(None, output_field=DurationField()),
output_field=TimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_time_subquery_subtraction(self):
Time.objects.create(time=datetime.time(12, 30, 15, 2345))
subquery = Time.objects.filter(pk=OuterRef("pk")).values("time")
queryset = Time.objects.annotate(
difference=subquery - F("time"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subtraction(self):
under_estimate = [
e.name
for e in Experiment.objects.filter(estimated_time__gt=F("end") - F("start"))
]
self.assertEqual(under_estimate, ["e2"])
over_estimate = [
e.name
for e in Experiment.objects.filter(estimated_time__lt=F("end") - F("start"))
]
self.assertEqual(over_estimate, ["e4"])
queryset = Experiment.objects.annotate(
difference=F("start") - Value(None, output_field=DateTimeField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("start") - Value(None, output_field=DurationField()),
output_field=DateTimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subquery_subtraction(self):
subquery = Experiment.objects.filter(pk=OuterRef("pk")).values("start")
queryset = Experiment.objects.annotate(
difference=subquery - F("start"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subtraction_microseconds(self):
delta = datetime.timedelta(microseconds=8999999999999999)
Experiment.objects.update(end=F("start") + delta)
qs = Experiment.objects.annotate(delta=F("end") - F("start"))
for e in qs:
self.assertEqual(e.delta, delta)
def test_duration_with_datetime(self):
# Exclude e1 which has very high precision so we can test this on all
# backends regardless of whether or not it supports
# microsecond_precision.
over_estimate = (
Experiment.objects.exclude(name="e1")
.filter(
completed__gt=self.stime + F("estimated_time"),
)
.order_by("name")
)
self.assertQuerysetEqual(over_estimate, ["e3", "e4", "e5"], lambda e: e.name)
def test_duration_with_datetime_microseconds(self):
delta = datetime.timedelta(microseconds=8999999999999999)
qs = Experiment.objects.annotate(
dt=ExpressionWrapper(
F("start") + delta,
output_field=DateTimeField(),
)
)
for e in qs:
self.assertEqual(e.dt, e.start + delta)
def test_date_minus_duration(self):
more_than_4_days = Experiment.objects.filter(
assigned__lt=F("completed") - Value(datetime.timedelta(days=4))
)
self.assertQuerysetEqual(more_than_4_days, ["e3", "e4", "e5"], lambda e: e.name)
def test_negative_timedelta_update(self):
# subtract 30 seconds, 30 minutes, 2 hours and 2 days
experiments = (
Experiment.objects.filter(name="e0")
.annotate(
start_sub_seconds=F("start") + datetime.timedelta(seconds=-30),
)
.annotate(
start_sub_minutes=F("start_sub_seconds")
+ datetime.timedelta(minutes=-30),
)
.annotate(
start_sub_hours=F("start_sub_minutes") + datetime.timedelta(hours=-2),
)
.annotate(
new_start=F("start_sub_hours") + datetime.timedelta(days=-2),
)
)
expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0)
# subtract 30 microseconds
experiments = experiments.annotate(
new_start=F("new_start") + datetime.timedelta(microseconds=-30)
)
expected_start += datetime.timedelta(microseconds=+746970)
experiments.update(start=F("new_start"))
e0 = Experiment.objects.get(name="e0")
self.assertEqual(e0.start, expected_start)
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(
uuid=Value(
uuid.UUID("12345678901234567890123456789012"), output_field=UUIDField()
)
)
self.assertEqual(
UUID.objects.get().uuid, uuid.UUID("12345678901234567890123456789012")
)
def test_deconstruct(self):
value = Value("name")
path, args, kwargs = value.deconstruct()
self.assertEqual(path, "django.db.models.Value")
self.assertEqual(args, (value.value,))
self.assertEqual(kwargs, {})
def test_deconstruct_output_field(self):
value = Value("name", output_field=CharField())
path, args, kwargs = value.deconstruct()
self.assertEqual(path, "django.db.models.Value")
self.assertEqual(args, (value.value,))
self.assertEqual(len(kwargs), 1)
self.assertEqual(
kwargs["output_field"].deconstruct(), CharField().deconstruct()
)
def test_repr(self):
tests = [
(None, "Value(None)"),
("str", "Value('str')"),
(True, "Value(True)"),
(42, "Value(42)"),
(
datetime.datetime(2019, 5, 15),
"Value(datetime.datetime(2019, 5, 15, 0, 0))",
),
(Decimal("3.14"), "Value(Decimal('3.14'))"),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(repr(Value(value)), expected)
def test_equal(self):
value = Value("name")
self.assertEqual(value, Value("name"))
self.assertNotEqual(value, Value("username"))
def test_hash(self):
d = {Value("name"): "Bob"}
self.assertIn(Value("name"), d)
self.assertEqual(d[Value("name")], "Bob")
def test_equal_output_field(self):
value = Value("name", output_field=CharField())
same_value = Value("name", output_field=CharField())
other_value = Value("name", output_field=TimeField())
no_output_field = Value("name")
self.assertEqual(value, same_value)
self.assertNotEqual(value, other_value)
self.assertNotEqual(value, no_output_field)
def test_raise_empty_expressionlist(self):
msg = "ExpressionList requires at least one expression"
with self.assertRaisesMessage(ValueError, msg):
ExpressionList()
def test_compile_unresolved(self):
# This test might need to be revisited later on if #25425 is enforced.
compiler = Time.objects.all().query.get_compiler(connection=connection)
value = Value("foo")
self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"]))
value = Value("foo", output_field=CharField())
self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"]))
def test_output_field_decimalfield(self):
Time.objects.create()
time = Time.objects.annotate(one=Value(1, output_field=DecimalField())).first()
self.assertEqual(time.one, 1)
def test_resolve_output_field(self):
value_types = [
("str", CharField),
(True, BooleanField),
(42, IntegerField),
(3.14, FloatField),
(datetime.date(2019, 5, 15), DateField),
(datetime.datetime(2019, 5, 15), DateTimeField),
(datetime.time(3, 16), TimeField),
(datetime.timedelta(1), DurationField),
(Decimal("3.14"), DecimalField),
(b"", BinaryField),
(uuid.uuid4(), UUIDField),
]
for value, output_field_type in value_types:
with self.subTest(type=type(value)):
expr = Value(value)
self.assertIsInstance(expr.output_field, output_field_type)
def test_resolve_output_field_failure(self):
msg = "Cannot resolve expression type, unknown output_field"
with self.assertRaisesMessage(FieldError, msg):
Value(object()).output_field
def test_output_field_does_not_create_broken_validators(self):
"""
The output field for a given Value doesn't get cleaned & validated,
however validators may still be instantiated for a given field type
and this demonstrates that they don't throw an exception.
"""
value_types = [
"str",
True,
42,
3.14,
datetime.date(2019, 5, 15),
datetime.datetime(2019, 5, 15),
datetime.time(3, 16),
datetime.timedelta(1),
Decimal("3.14"),
b"",
uuid.uuid4(),
]
for value in value_types:
with self.subTest(type=type(value)):
field = Value(value)._resolve_output_field()
field.clean(value, model_instance=None)
class ExistsTests(TestCase):
def test_optimizations(self):
with CaptureQueriesContext(connection) as context:
list(
Experiment.objects.values(
exists=Exists(
Experiment.objects.order_by("pk"),
)
).order_by()
)
captured_queries = context.captured_queries
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(
connection.ops.quote_name(Experiment._meta.pk.column),
captured_sql,
)
self.assertIn(
connection.ops.limit_offset_sql(None, 1),
captured_sql,
)
self.assertNotIn("ORDER BY", captured_sql)
def test_negated_empty_exists(self):
manager = Manager.objects.create()
qs = Manager.objects.filter(~Exists(Manager.objects.none()) & Q(pk=manager.pk))
self.assertSequenceEqual(qs, [manager])
def test_select_negated_empty_exists(self):
manager = Manager.objects.create()
qs = Manager.objects.annotate(
not_exists=~Exists(Manager.objects.none())
).filter(pk=manager.pk)
self.assertSequenceEqual(qs, [manager])
self.assertIs(qs.get().not_exists, True)
class FieldTransformTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
cls.ex1 = Experiment.objects.create(
name="Experiment 1",
assigned=sday,
completed=sday + datetime.timedelta(2),
estimated_time=datetime.timedelta(2),
start=stime,
end=stime + datetime.timedelta(2),
)
def test_month_aggregation(self):
self.assertEqual(
Experiment.objects.aggregate(month_count=Count("assigned__month")),
{"month_count": 1},
)
def test_transform_in_values(self):
self.assertSequenceEqual(
Experiment.objects.values("assigned__month"),
[{"assigned__month": 6}],
)
def test_multiple_transforms_in_values(self):
self.assertSequenceEqual(
Experiment.objects.values("end__date__month"),
[{"end__date__month": 6}],
)
class ReprTests(SimpleTestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>",
)
self.assertEqual(
repr(When(Q(age__gte=18), then=Value("legal"))),
"<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value('legal')>",
)
self.assertEqual(repr(Col("alias", "field")), "Col(alias, field)")
self.assertEqual(repr(F("published")), "F(published)")
self.assertEqual(
repr(F("cost") + F("tax")), "<CombinedExpression: F(cost) + F(tax)>"
)
self.assertEqual(
repr(ExpressionWrapper(F("cost") + F("tax"), IntegerField())),
"ExpressionWrapper(F(cost) + F(tax))",
)
self.assertEqual(
repr(Func("published", function="TO_CHAR")),
"Func(F(published), function=TO_CHAR)",
)
self.assertEqual(repr(OrderBy(Value(1))), "OrderBy(Value(1), descending=False)")
self.assertEqual(repr(RawSQL("table.col", [])), "RawSQL(table.col, [])")
self.assertEqual(
repr(Ref("sum_cost", Sum("cost"))), "Ref(sum_cost, Sum(F(cost)))"
)
self.assertEqual(repr(Value(1)), "Value(1)")
self.assertEqual(
repr(ExpressionList(F("col"), F("anothercol"))),
"ExpressionList(F(col), F(anothercol))",
)
self.assertEqual(
repr(ExpressionList(OrderBy(F("col"), descending=False))),
"ExpressionList(OrderBy(F(col), descending=False))",
)
def test_functions(self):
self.assertEqual(repr(Coalesce("a", "b")), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat("a", "b")), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length("a")), "Length(F(a))")
self.assertEqual(repr(Lower("a")), "Lower(F(a))")
self.assertEqual(repr(Substr("a", 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper("a")), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg("a")), "Avg(F(a))")
self.assertEqual(repr(Count("a")), "Count(F(a))")
self.assertEqual(repr(Count("*")), "Count('*')")
self.assertEqual(repr(Max("a")), "Max(F(a))")
self.assertEqual(repr(Min("a")), "Min(F(a))")
self.assertEqual(repr(StdDev("a")), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum("a")), "Sum(F(a))")
self.assertEqual(
repr(Variance("a", sample=True)), "Variance(F(a), sample=True)"
)
def test_distinct_aggregates(self):
self.assertEqual(repr(Count("a", distinct=True)), "Count(F(a), distinct=True)")
self.assertEqual(repr(Count("*", distinct=True)), "Count('*', distinct=True)")
def test_filtered_aggregates(self):
filter = Q(a=1)
self.assertEqual(
repr(Avg("a", filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Count("a", filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Max("a", filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Min("a", filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(StdDev("a", filter=filter)),
"StdDev(F(a), filter=(AND: ('a', 1)), sample=False)",
)
self.assertEqual(
repr(Sum("a", filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Variance("a", sample=True, filter=filter)),
"Variance(F(a), filter=(AND: ('a', 1)), sample=True)",
)
self.assertEqual(
repr(Count("a", filter=filter, distinct=True)),
"Count(F(a), distinct=True, filter=(AND: ('a', 1)))",
)
class CombinableTests(SimpleTestCase):
bitwise_msg = (
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def test_negation(self):
c = Combinable()
self.assertEqual(-c, c * -1)
def test_and(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() & Combinable()
def test_or(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() | Combinable()
def test_xor(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() ^ Combinable()
def test_reversed_and(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() & Combinable()
def test_reversed_or(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() | Combinable()
def test_reversed_xor(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() ^ Combinable()
class CombinedExpressionTests(SimpleTestCase):
def test_resolve_output_field(self):
tests = [
(IntegerField, AutoField, IntegerField),
(AutoField, IntegerField, IntegerField),
(IntegerField, DecimalField, DecimalField),
(DecimalField, IntegerField, DecimalField),
(IntegerField, FloatField, FloatField),
(FloatField, IntegerField, FloatField),
]
connectors = [Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV]
for lhs, rhs, combined in tests:
for connector in connectors:
with self.subTest(
lhs=lhs, connector=connector, rhs=rhs, combined=combined
):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
self.assertIsInstance(expr.output_field, combined)
class ExpressionWrapperTests(SimpleTestCase):
def test_empty_group_by(self):
expr = ExpressionWrapper(Value(3), output_field=IntegerField())
self.assertEqual(expr.get_group_by_cols(alias=None), [])
def test_non_empty_group_by(self):
value = Value("f")
value.output_field = None
expr = ExpressionWrapper(Lower(value), output_field=IntegerField())
group_by_cols = expr.get_group_by_cols(alias=None)
self.assertEqual(group_by_cols, [expr.expression])
self.assertEqual(group_by_cols[0].output_field, expr.output_field)
class OrderByTests(SimpleTestCase):
def test_equal(self):
self.assertEqual(
OrderBy(F("field"), nulls_last=True),
OrderBy(F("field"), nulls_last=True),
)
self.assertNotEqual(
OrderBy(F("field"), nulls_last=True),
OrderBy(F("field"), nulls_last=False),
)
def test_hash(self):
self.assertEqual(
hash(OrderBy(F("field"), nulls_last=True)),
hash(OrderBy(F("field"), nulls_last=True)),
)
self.assertNotEqual(
hash(OrderBy(F("field"), nulls_last=True)),
hash(OrderBy(F("field"), nulls_last=False)),
)
|
372c22b910dd867545d16f4a392464793893e2347a34eb679aaac6e722642d27 | import copy
import json
import os
import pickle
import unittest
import uuid
from django.core.exceptions import DisallowedRedirect
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import request_finished
from django.db import close_old_connections
from django.http import (
BadHeaderError,
HttpResponse,
HttpResponseNotAllowed,
HttpResponseNotModified,
HttpResponsePermanentRedirect,
HttpResponseRedirect,
JsonResponse,
QueryDict,
SimpleCookie,
StreamingHttpResponse,
parse_cookie,
)
from django.test import SimpleTestCase
from django.utils.functional import lazystr
class QueryDictTests(SimpleTestCase):
def test_create_with_no_args(self):
self.assertEqual(QueryDict(), QueryDict(""))
def test_missing_key(self):
q = QueryDict()
with self.assertRaises(KeyError):
q.__getitem__("foo")
def test_immutability(self):
q = QueryDict()
with self.assertRaises(AttributeError):
q.__setitem__("something", "bar")
with self.assertRaises(AttributeError):
q.setlist("foo", ["bar"])
with self.assertRaises(AttributeError):
q.appendlist("foo", ["bar"])
with self.assertRaises(AttributeError):
q.update({"foo": "bar"})
with self.assertRaises(AttributeError):
q.pop("foo")
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
def test_immutable_get_with_default(self):
q = QueryDict()
self.assertEqual(q.get("foo", "default"), "default")
def test_immutable_basic_operations(self):
q = QueryDict()
self.assertEqual(q.getlist("foo"), [])
self.assertNotIn("foo", q)
self.assertEqual(list(q), [])
self.assertEqual(list(q.items()), [])
self.assertEqual(list(q.lists()), [])
self.assertEqual(list(q.keys()), [])
self.assertEqual(list(q.values()), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), "")
def test_single_key_value(self):
"""Test QueryDict with one key/value pair"""
q = QueryDict("foo=bar")
self.assertEqual(q["foo"], "bar")
with self.assertRaises(KeyError):
q.__getitem__("bar")
with self.assertRaises(AttributeError):
q.__setitem__("something", "bar")
self.assertEqual(q.get("foo", "default"), "bar")
self.assertEqual(q.get("bar", "default"), "default")
self.assertEqual(q.getlist("foo"), ["bar"])
self.assertEqual(q.getlist("bar"), [])
with self.assertRaises(AttributeError):
q.setlist("foo", ["bar"])
with self.assertRaises(AttributeError):
q.appendlist("foo", ["bar"])
self.assertIn("foo", q)
self.assertNotIn("bar", q)
self.assertEqual(list(q), ["foo"])
self.assertEqual(list(q.items()), [("foo", "bar")])
self.assertEqual(list(q.lists()), [("foo", ["bar"])])
self.assertEqual(list(q.keys()), ["foo"])
self.assertEqual(list(q.values()), ["bar"])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({"foo": "bar"})
with self.assertRaises(AttributeError):
q.pop("foo")
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault("foo", "bar")
self.assertEqual(q.urlencode(), "foo=bar")
def test_urlencode(self):
q = QueryDict(mutable=True)
q["next"] = "/a&b/"
self.assertEqual(q.urlencode(), "next=%2Fa%26b%2F")
self.assertEqual(q.urlencode(safe="/"), "next=/a%26b/")
q = QueryDict(mutable=True)
q["next"] = "/t\xebst&key/"
self.assertEqual(q.urlencode(), "next=%2Ft%C3%ABst%26key%2F")
self.assertEqual(q.urlencode(safe="/"), "next=/t%C3%ABst%26key/")
def test_urlencode_int(self):
# Normally QueryDict doesn't contain non-string values but lazily
# written tests may make that mistake.
q = QueryDict(mutable=True)
q["a"] = 1
self.assertEqual(q.urlencode(), "a=1")
def test_mutable_copy(self):
"""A copy of a QueryDict is mutable."""
q = QueryDict().copy()
with self.assertRaises(KeyError):
q.__getitem__("foo")
q["name"] = "john"
self.assertEqual(q["name"], "john")
def test_mutable_delete(self):
q = QueryDict(mutable=True)
q["name"] = "john"
del q["name"]
self.assertNotIn("name", q)
def test_basic_mutable_operations(self):
q = QueryDict(mutable=True)
q["name"] = "john"
self.assertEqual(q.get("foo", "default"), "default")
self.assertEqual(q.get("name", "default"), "john")
self.assertEqual(q.getlist("name"), ["john"])
self.assertEqual(q.getlist("foo"), [])
q.setlist("foo", ["bar", "baz"])
self.assertEqual(q.get("foo", "default"), "baz")
self.assertEqual(q.getlist("foo"), ["bar", "baz"])
q.appendlist("foo", "another")
self.assertEqual(q.getlist("foo"), ["bar", "baz", "another"])
self.assertEqual(q["foo"], "another")
self.assertIn("foo", q)
self.assertCountEqual(q, ["foo", "name"])
self.assertCountEqual(q.items(), [("foo", "another"), ("name", "john")])
self.assertCountEqual(
q.lists(), [("foo", ["bar", "baz", "another"]), ("name", ["john"])]
)
self.assertCountEqual(q.keys(), ["foo", "name"])
self.assertCountEqual(q.values(), ["another", "john"])
q.update({"foo": "hello"})
self.assertEqual(q["foo"], "hello")
self.assertEqual(q.get("foo", "not available"), "hello")
self.assertEqual(q.getlist("foo"), ["bar", "baz", "another", "hello"])
self.assertEqual(q.pop("foo"), ["bar", "baz", "another", "hello"])
self.assertEqual(q.pop("foo", "not there"), "not there")
self.assertEqual(q.get("foo", "not there"), "not there")
self.assertEqual(q.setdefault("foo", "bar"), "bar")
self.assertEqual(q["foo"], "bar")
self.assertEqual(q.getlist("foo"), ["bar"])
self.assertIn(q.urlencode(), ["foo=bar&name=john", "name=john&foo=bar"])
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
"""Test QueryDict with two key/value pairs with same keys."""
q = QueryDict("vote=yes&vote=no")
self.assertEqual(q["vote"], "no")
with self.assertRaises(AttributeError):
q.__setitem__("something", "bar")
self.assertEqual(q.get("vote", "default"), "no")
self.assertEqual(q.get("foo", "default"), "default")
self.assertEqual(q.getlist("vote"), ["yes", "no"])
self.assertEqual(q.getlist("foo"), [])
with self.assertRaises(AttributeError):
q.setlist("foo", ["bar", "baz"])
with self.assertRaises(AttributeError):
q.setlist("foo", ["bar", "baz"])
with self.assertRaises(AttributeError):
q.appendlist("foo", ["bar"])
self.assertIn("vote", q)
self.assertNotIn("foo", q)
self.assertEqual(list(q), ["vote"])
self.assertEqual(list(q.items()), [("vote", "no")])
self.assertEqual(list(q.lists()), [("vote", ["yes", "no"])])
self.assertEqual(list(q.keys()), ["vote"])
self.assertEqual(list(q.values()), ["no"])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({"foo": "bar"})
with self.assertRaises(AttributeError):
q.pop("foo")
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault("foo", "bar")
with self.assertRaises(AttributeError):
q.__delitem__("vote")
def test_pickle(self):
q = QueryDict()
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict("a=b&c=d")
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict("a=b&c=d&a=1")
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
def test_update_from_querydict(self):
"""Regression test for #8278: QueryDict.update(QueryDict)"""
x = QueryDict("a=1&a=2", mutable=True)
y = QueryDict("a=3&a=4")
x.update(y)
self.assertEqual(x.getlist("a"), ["1", "2", "3", "4"])
def test_non_default_encoding(self):
"""#13572 - QueryDict with a non-default encoding"""
q = QueryDict("cur=%A4", encoding="iso-8859-15")
self.assertEqual(q.encoding, "iso-8859-15")
self.assertEqual(list(q.items()), [("cur", "€")])
self.assertEqual(q.urlencode(), "cur=%A4")
q = q.copy()
self.assertEqual(q.encoding, "iso-8859-15")
self.assertEqual(list(q.items()), [("cur", "€")])
self.assertEqual(q.urlencode(), "cur=%A4")
self.assertEqual(copy.copy(q).encoding, "iso-8859-15")
self.assertEqual(copy.deepcopy(q).encoding, "iso-8859-15")
def test_querydict_fromkeys(self):
self.assertEqual(
QueryDict.fromkeys(["key1", "key2", "key3"]), QueryDict("key1&key2&key3")
)
def test_fromkeys_with_nonempty_value(self):
self.assertEqual(
QueryDict.fromkeys(["key1", "key2", "key3"], value="val"),
QueryDict("key1=val&key2=val&key3=val"),
)
def test_fromkeys_is_immutable_by_default(self):
# Match behavior of __init__() which is also immutable by default.
q = QueryDict.fromkeys(["key1", "key2", "key3"])
with self.assertRaisesMessage(
AttributeError, "This QueryDict instance is immutable"
):
q["key4"] = "nope"
def test_fromkeys_mutable_override(self):
q = QueryDict.fromkeys(["key1", "key2", "key3"], mutable=True)
q["key4"] = "yep"
self.assertEqual(q, QueryDict("key1&key2&key3&key4=yep"))
def test_duplicates_in_fromkeys_iterable(self):
self.assertEqual(QueryDict.fromkeys("xyzzy"), QueryDict("x&y&z&z&y"))
def test_fromkeys_with_nondefault_encoding(self):
key_utf16 = b"\xff\xfe\x8e\x02\xdd\x01\x9e\x02"
value_utf16 = b"\xff\xfe\xdd\x01n\x00l\x00P\x02\x8c\x02"
q = QueryDict.fromkeys([key_utf16], value=value_utf16, encoding="utf-16")
expected = QueryDict("", mutable=True)
expected["ʎǝʞ"] = "ǝnlɐʌ"
self.assertEqual(q, expected)
def test_fromkeys_empty_iterable(self):
self.assertEqual(QueryDict.fromkeys([]), QueryDict(""))
def test_fromkeys_noniterable(self):
with self.assertRaises(TypeError):
QueryDict.fromkeys(0)
class HttpResponseTests(SimpleTestCase):
def test_headers_type(self):
r = HttpResponse()
# ASCII strings or bytes values are converted to strings.
r.headers["key"] = "test"
self.assertEqual(r.headers["key"], "test")
r.headers["key"] = b"test"
self.assertEqual(r.headers["key"], "test")
self.assertIn(b"test", r.serialize_headers())
# Non-ASCII values are serialized to Latin-1.
r.headers["key"] = "café"
self.assertIn("café".encode("latin-1"), r.serialize_headers())
# Other Unicode values are MIME-encoded (there's no way to pass them as
# bytes).
r.headers["key"] = "†"
self.assertEqual(r.headers["key"], "=?utf-8?b?4oCg?=")
self.assertIn(b"=?utf-8?b?4oCg?=", r.serialize_headers())
# The response also converts string or bytes keys to strings, but requires
# them to contain ASCII
r = HttpResponse()
del r.headers["Content-Type"]
r.headers["foo"] = "bar"
headers = list(r.headers.items())
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0], ("foo", "bar"))
r = HttpResponse()
del r.headers["Content-Type"]
r.headers[b"foo"] = "bar"
headers = list(r.headers.items())
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0], ("foo", "bar"))
self.assertIsInstance(headers[0][0], str)
r = HttpResponse()
with self.assertRaises(UnicodeError):
r.headers.__setitem__("føø", "bar")
with self.assertRaises(UnicodeError):
r.headers.__setitem__("føø".encode(), "bar")
def test_long_line(self):
# Bug #20889: long lines trigger newlines to be added to headers
# (which is not allowed due to bug #10188)
h = HttpResponse()
f = b"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88"
f = f.decode("utf-8")
h.headers["Content-Disposition"] = 'attachment; filename="%s"' % f
# This one is triggering https://bugs.python.org/issue20747, that is Python
# will itself insert a newline in the header
h.headers[
"Content-Disposition"
] = 'attachment; filename="EdelRot_Blu\u0308te (3)-0.JPG"'
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
with self.assertRaises(BadHeaderError):
r.headers.__setitem__("test\rstr", "test")
with self.assertRaises(BadHeaderError):
r.headers.__setitem__("test\nstr", "test")
def test_encoded_with_newlines_in_headers(self):
"""
Keys & values which throw a UnicodeError when encoding/decoding should
still be checked for newlines and re-raised as a BadHeaderError.
These specifically would still throw BadHeaderError after decoding
successfully, because the newlines are sandwiched in the middle of the
string and email.Header leaves those as they are.
"""
r = HttpResponse()
pairs = (
("†\nother", "test"),
("test", "†\nother"),
(b"\xe2\x80\xa0\nother", "test"),
("test", b"\xe2\x80\xa0\nother"),
)
msg = "Header values can't contain newlines"
for key, value in pairs:
with self.subTest(key=key, value=value):
with self.assertRaisesMessage(BadHeaderError, msg):
r[key] = value
def test_dict_behavior(self):
"""
Test for bug #14020: Make HttpResponse.get work like dict.get
"""
r = HttpResponse()
self.assertIsNone(r.get("test"))
def test_non_string_content(self):
# Bug 16494: HttpResponse should behave consistently with non-strings
r = HttpResponse(12345)
self.assertEqual(r.content, b"12345")
# test content via property
r = HttpResponse()
r.content = 12345
self.assertEqual(r.content, b"12345")
def test_memoryview_content(self):
r = HttpResponse(memoryview(b"memoryview"))
self.assertEqual(r.content, b"memoryview")
def test_iter_content(self):
r = HttpResponse(["abc", "def", "ghi"])
self.assertEqual(r.content, b"abcdefghi")
# test iter content via property
r = HttpResponse()
r.content = ["idan", "alex", "jacob"]
self.assertEqual(r.content, b"idanalexjacob")
r = HttpResponse()
r.content = [1, 2, 3]
self.assertEqual(r.content, b"123")
# test odd inputs
r = HttpResponse()
r.content = ["1", "2", 3, "\u079e"]
# '\xde\x9e' == unichr(1950).encode()
self.assertEqual(r.content, b"123\xde\x9e")
# .content can safely be accessed multiple times.
r = HttpResponse(iter(["hello", "world"]))
self.assertEqual(r.content, r.content)
self.assertEqual(r.content, b"helloworld")
# __iter__ can safely be called multiple times (#20187).
self.assertEqual(b"".join(r), b"helloworld")
self.assertEqual(b"".join(r), b"helloworld")
# Accessing .content still works.
self.assertEqual(r.content, b"helloworld")
# Accessing .content also works if the response was iterated first.
r = HttpResponse(iter(["hello", "world"]))
self.assertEqual(b"".join(r), b"helloworld")
self.assertEqual(r.content, b"helloworld")
# Additional content can be written to the response.
r = HttpResponse(iter(["hello", "world"]))
self.assertEqual(r.content, b"helloworld")
r.write("!")
self.assertEqual(r.content, b"helloworld!")
def test_iterator_isnt_rewound(self):
# Regression test for #13222
r = HttpResponse("abc")
i = iter(r)
self.assertEqual(list(i), [b"abc"])
self.assertEqual(list(i), [])
def test_lazy_content(self):
r = HttpResponse(lazystr("helloworld"))
self.assertEqual(r.content, b"helloworld")
def test_file_interface(self):
r = HttpResponse()
r.write(b"hello")
self.assertEqual(r.tell(), 5)
r.write("привет")
self.assertEqual(r.tell(), 17)
r = HttpResponse(["abc"])
r.write("def")
self.assertEqual(r.tell(), 6)
self.assertEqual(r.content, b"abcdef")
# with Content-Encoding header
r = HttpResponse()
r.headers["Content-Encoding"] = "winning"
r.write(b"abc")
r.write(b"def")
self.assertEqual(r.content, b"abcdef")
def test_stream_interface(self):
r = HttpResponse("asdf")
self.assertEqual(r.getvalue(), b"asdf")
r = HttpResponse()
self.assertIs(r.writable(), True)
r.writelines(["foo\n", "bar\n", "baz\n"])
self.assertEqual(r.content, b"foo\nbar\nbaz\n")
def test_unsafe_redirect(self):
bad_urls = [
'data:text/html,<script>window.alert("xss")</script>',
"mailto:[email protected]",
"file:///etc/passwd",
]
for url in bad_urls:
with self.assertRaises(DisallowedRedirect):
HttpResponseRedirect(url)
with self.assertRaises(DisallowedRedirect):
HttpResponsePermanentRedirect(url)
def test_header_deletion(self):
r = HttpResponse("hello")
r.headers["X-Foo"] = "foo"
del r.headers["X-Foo"]
self.assertNotIn("X-Foo", r.headers)
# del doesn't raise a KeyError on nonexistent headers.
del r.headers["X-Foo"]
def test_instantiate_with_headers(self):
r = HttpResponse("hello", headers={"X-Foo": "foo"})
self.assertEqual(r.headers["X-Foo"], "foo")
self.assertEqual(r.headers["x-foo"], "foo")
def test_content_type(self):
r = HttpResponse("hello", content_type="application/json")
self.assertEqual(r.headers["Content-Type"], "application/json")
def test_content_type_headers(self):
r = HttpResponse("hello", headers={"Content-Type": "application/json"})
self.assertEqual(r.headers["Content-Type"], "application/json")
def test_content_type_mutually_exclusive(self):
msg = (
"'headers' must not contain 'Content-Type' when the "
"'content_type' parameter is provided."
)
with self.assertRaisesMessage(ValueError, msg):
HttpResponse(
"hello",
content_type="application/json",
headers={"Content-Type": "text/csv"},
)
class HttpResponseSubclassesTests(SimpleTestCase):
def test_redirect(self):
response = HttpResponseRedirect("/redirected/")
self.assertEqual(response.status_code, 302)
# Standard HttpResponse init args can be used
response = HttpResponseRedirect(
"/redirected/",
content="The resource has temporarily moved",
)
self.assertContains(
response, "The resource has temporarily moved", status_code=302
)
self.assertEqual(response.url, response.headers["Location"])
def test_redirect_lazy(self):
"""Make sure HttpResponseRedirect works with lazy strings."""
r = HttpResponseRedirect(lazystr("/redirected/"))
self.assertEqual(r.url, "/redirected/")
def test_redirect_repr(self):
response = HttpResponseRedirect("/redirected/")
expected = (
'<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", '
'url="/redirected/">'
)
self.assertEqual(repr(response), expected)
def test_invalid_redirect_repr(self):
"""
If HttpResponseRedirect raises DisallowedRedirect, its __repr__()
should work (in the debug view, for example).
"""
response = HttpResponseRedirect.__new__(HttpResponseRedirect)
with self.assertRaisesMessage(
DisallowedRedirect, "Unsafe redirect to URL with protocol 'ssh'"
):
HttpResponseRedirect.__init__(response, "ssh://foo")
expected = (
'<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", '
'url="ssh://foo">'
)
self.assertEqual(repr(response), expected)
def test_not_modified(self):
response = HttpResponseNotModified()
self.assertEqual(response.status_code, 304)
# 304 responses should not have content/content-type
with self.assertRaises(AttributeError):
response.content = "Hello dear"
self.assertNotIn("content-type", response)
def test_not_modified_repr(self):
response = HttpResponseNotModified()
self.assertEqual(repr(response), "<HttpResponseNotModified status_code=304>")
def test_not_allowed(self):
response = HttpResponseNotAllowed(["GET"])
self.assertEqual(response.status_code, 405)
# Standard HttpResponse init args can be used
response = HttpResponseNotAllowed(
["GET"], content="Only the GET method is allowed"
)
self.assertContains(response, "Only the GET method is allowed", status_code=405)
def test_not_allowed_repr(self):
response = HttpResponseNotAllowed(["GET", "OPTIONS"], content_type="text/plain")
expected = (
'<HttpResponseNotAllowed [GET, OPTIONS] status_code=405, "text/plain">'
)
self.assertEqual(repr(response), expected)
def test_not_allowed_repr_no_content_type(self):
response = HttpResponseNotAllowed(("GET", "POST"))
del response.headers["Content-Type"]
self.assertEqual(
repr(response), "<HttpResponseNotAllowed [GET, POST] status_code=405>"
)
class JsonResponseTests(SimpleTestCase):
def test_json_response_non_ascii(self):
data = {"key": "łóżko"}
response = JsonResponse(data)
self.assertEqual(json.loads(response.content.decode()), data)
def test_json_response_raises_type_error_with_default_setting(self):
with self.assertRaisesMessage(
TypeError,
"In order to allow non-dict objects to be serialized set the "
"safe parameter to False",
):
JsonResponse([1, 2, 3])
def test_json_response_text(self):
response = JsonResponse("foobar", safe=False)
self.assertEqual(json.loads(response.content.decode()), "foobar")
def test_json_response_list(self):
response = JsonResponse(["foo", "bar"], safe=False)
self.assertEqual(json.loads(response.content.decode()), ["foo", "bar"])
def test_json_response_uuid(self):
u = uuid.uuid4()
response = JsonResponse(u, safe=False)
self.assertEqual(json.loads(response.content.decode()), str(u))
def test_json_response_custom_encoder(self):
class CustomDjangoJSONEncoder(DjangoJSONEncoder):
def encode(self, o):
return json.dumps({"foo": "bar"})
response = JsonResponse({}, encoder=CustomDjangoJSONEncoder)
self.assertEqual(json.loads(response.content.decode()), {"foo": "bar"})
def test_json_response_passing_arguments_to_json_dumps(self):
response = JsonResponse({"foo": "bar"}, json_dumps_params={"indent": 2})
self.assertEqual(response.content.decode(), '{\n "foo": "bar"\n}')
class StreamingHttpResponseTests(SimpleTestCase):
def test_streaming_response(self):
r = StreamingHttpResponse(iter(["hello", "world"]))
# iterating over the response itself yields bytestring chunks.
chunks = list(r)
self.assertEqual(chunks, [b"hello", b"world"])
for chunk in chunks:
self.assertIsInstance(chunk, bytes)
# and the response can only be iterated once.
self.assertEqual(list(r), [])
# even when a sequence that can be iterated many times, like a list,
# is given as content.
r = StreamingHttpResponse(["abc", "def"])
self.assertEqual(list(r), [b"abc", b"def"])
self.assertEqual(list(r), [])
# iterating over strings still yields bytestring chunks.
r.streaming_content = iter(["hello", "café"])
chunks = list(r)
# '\xc3\xa9' == unichr(233).encode()
self.assertEqual(chunks, [b"hello", b"caf\xc3\xa9"])
for chunk in chunks:
self.assertIsInstance(chunk, bytes)
# streaming responses don't have a `content` attribute.
self.assertFalse(hasattr(r, "content"))
# and you can't accidentally assign to a `content` attribute.
with self.assertRaises(AttributeError):
r.content = "xyz"
# but they do have a `streaming_content` attribute.
self.assertTrue(hasattr(r, "streaming_content"))
# that exists so we can check if a response is streaming, and wrap or
# replace the content iterator.
r.streaming_content = iter(["abc", "def"])
r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
self.assertEqual(list(r), [b"ABC", b"DEF"])
# coercing a streaming response to bytes doesn't return a complete HTTP
# message like a regular response does. it only gives us the headers.
r = StreamingHttpResponse(iter(["hello", "world"]))
self.assertEqual(bytes(r), b"Content-Type: text/html; charset=utf-8")
# and this won't consume its content.
self.assertEqual(list(r), [b"hello", b"world"])
# additional content cannot be written to the response.
r = StreamingHttpResponse(iter(["hello", "world"]))
with self.assertRaises(Exception):
r.write("!")
# and we can't tell the current position.
with self.assertRaises(Exception):
r.tell()
r = StreamingHttpResponse(iter(["hello", "world"]))
self.assertEqual(r.getvalue(), b"helloworld")
def test_repr(self):
r = StreamingHttpResponse(iter(["hello", "café"]))
self.assertEqual(
repr(r),
'<StreamingHttpResponse status_code=200, "text/html; charset=utf-8">',
)
class FileCloseTests(SimpleTestCase):
def setUp(self):
# Disable the request_finished signal during this test
# to avoid interfering with the database connection.
request_finished.disconnect(close_old_connections)
def tearDown(self):
request_finished.connect(close_old_connections)
def test_response(self):
filename = os.path.join(os.path.dirname(__file__), "abc.txt")
# file isn't closed until we close the response.
file1 = open(filename)
r = HttpResponse(file1)
self.assertTrue(file1.closed)
r.close()
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = HttpResponse(file1)
r.content = file2
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
def test_streaming_response(self):
filename = os.path.join(os.path.dirname(__file__), "abc.txt")
# file isn't closed until we close the response.
file1 = open(filename)
r = StreamingHttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = StreamingHttpResponse(file1)
r.streaming_content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
class CookieTests(unittest.TestCase):
def test_encode(self):
"""Semicolons and commas are encoded."""
c = SimpleCookie()
c["test"] = "An,awkward;value"
self.assertNotIn(";", c.output().rstrip(";")) # IE compat
self.assertNotIn(",", c.output().rstrip(";")) # Safari compat
def test_decode(self):
"""Semicolons and commas are decoded."""
c = SimpleCookie()
c["test"] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c["test"].value, c2["test"].value)
c3 = parse_cookie(c.output()[12:])
self.assertEqual(c["test"].value, c3["test"])
def test_nonstandard_keys(self):
"""
A single non-standard cookie name doesn't affect all cookies (#13007).
"""
self.assertIn("good_cookie", parse_cookie("good_cookie=yes;bad:cookie=yes"))
def test_repeated_nonstandard_keys(self):
"""
A repeated non-standard name doesn't affect all cookies (#15852).
"""
self.assertIn("good_cookie", parse_cookie("a:=b; a:=c; good_cookie=yes"))
def test_python_cookies(self):
"""
Test cases copied from Python's Lib/test/test_http_cookies.py
"""
self.assertEqual(
parse_cookie("chips=ahoy; vienna=finger"),
{"chips": "ahoy", "vienna": "finger"},
)
# Here parse_cookie() differs from Python's cookie parsing in that it
# treats all semicolons as delimiters, even within quotes.
self.assertEqual(
parse_cookie('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'),
{"keebler": '"E=mc2', "L": '\\"Loves\\"', "fudge": "\\012", "": '"'},
)
# Illegal cookies that have an '=' char in an unquoted value.
self.assertEqual(parse_cookie("keebler=E=mc2"), {"keebler": "E=mc2"})
# Cookies with ':' character in their name.
self.assertEqual(
parse_cookie("key:term=value:term"), {"key:term": "value:term"}
)
# Cookies with '[' and ']'.
self.assertEqual(
parse_cookie("a=b; c=[; d=r; f=h"), {"a": "b", "c": "[", "d": "r", "f": "h"}
)
def test_cookie_edgecases(self):
# Cookies that RFC6265 allows.
self.assertEqual(
parse_cookie("a=b; Domain=example.com"), {"a": "b", "Domain": "example.com"}
)
# parse_cookie() has historically kept only the last cookie with the
# same name.
self.assertEqual(parse_cookie("a=b; h=i; a=c"), {"a": "c", "h": "i"})
def test_invalid_cookies(self):
"""
Cookie strings that go against RFC6265 but browsers will send if set
via document.cookie.
"""
# Chunks without an equals sign appear as unnamed values per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
self.assertIn(
"django_language", parse_cookie("abc=def; unnamed; django_language=en")
)
# Even a double quote may be an unnamed value.
self.assertEqual(parse_cookie('a=b; "; c=d'), {"a": "b", "": '"', "c": "d"})
# Spaces in names and values, and an equals sign in values.
self.assertEqual(
parse_cookie("a b c=d e = f; gh=i"), {"a b c": "d e = f", "gh": "i"}
)
# More characters the spec forbids.
self.assertEqual(
parse_cookie('a b,c<>@:/[]?{}=d " =e,f g'),
{"a b,c<>@:/[]?{}": 'd " =e,f g'},
)
# Unicode characters. The spec only allows ASCII.
self.assertEqual(
parse_cookie("saint=André Bessette"), {"saint": "André Bessette"}
)
# Browsers don't send extra whitespace or semicolons in Cookie headers,
# but parse_cookie() should parse whitespace the same way
# document.cookie parses whitespace.
self.assertEqual(
parse_cookie(" = b ; ; = ; c = ; "), {"": "b", "c": ""}
)
def test_samesite(self):
c = SimpleCookie("name=value; samesite=lax; httponly")
self.assertEqual(c["name"]["samesite"], "lax")
self.assertIn("SameSite=lax", c.output())
def test_httponly_after_load(self):
c = SimpleCookie()
c.load("name=val")
c["name"]["httponly"] = True
self.assertTrue(c["name"]["httponly"])
def test_load_dict(self):
c = SimpleCookie()
c.load({"name": "val"})
self.assertEqual(c["name"].value, "val")
def test_pickle(self):
rawdata = 'Customer="WILE_E_COYOTE"; Path=/acme; Version=1'
expected_output = "Set-Cookie: %s" % rawdata
C = SimpleCookie()
C.load(rawdata)
self.assertEqual(C.output(), expected_output)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
C1 = pickle.loads(pickle.dumps(C, protocol=proto))
self.assertEqual(C1.output(), expected_output)
class HttpResponseHeadersTestCase(SimpleTestCase):
"""Headers by treating HttpResponse like a dictionary."""
def test_headers(self):
response = HttpResponse()
response["X-Foo"] = "bar"
self.assertEqual(response["X-Foo"], "bar")
self.assertEqual(response.headers["X-Foo"], "bar")
self.assertIn("X-Foo", response)
self.assertIs(response.has_header("X-Foo"), True)
del response["X-Foo"]
self.assertNotIn("X-Foo", response)
self.assertNotIn("X-Foo", response.headers)
# del doesn't raise a KeyError on nonexistent headers.
del response["X-Foo"]
def test_headers_as_iterable_of_tuple_pairs(self):
response = HttpResponse(headers=(("X-Foo", "bar"),))
self.assertEqual(response["X-Foo"], "bar")
def test_headers_bytestring(self):
response = HttpResponse()
response["X-Foo"] = b"bar"
self.assertEqual(response["X-Foo"], "bar")
self.assertEqual(response.headers["X-Foo"], "bar")
def test_newlines_in_headers(self):
response = HttpResponse()
with self.assertRaises(BadHeaderError):
response["test\rstr"] = "test"
with self.assertRaises(BadHeaderError):
response["test\nstr"] = "test"
|
83b73a6824dc6055254d0898be26b5adeb37cf0d4848c5b07e94b7fa9a9bf77d | import gettext
import os
import re
from datetime import datetime, timedelta
from importlib import import_module
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import (
CharField,
DateField,
DateTimeField,
ForeignKey,
ManyToManyField,
UUIDField,
)
from django.test import SimpleTestCase, TestCase, override_settings
from django.urls import reverse
from django.utils import translation
from .models import (
Advisor,
Album,
Band,
Bee,
Car,
Company,
Event,
Honeycomb,
Individual,
Inventory,
Member,
MyFileField,
Profile,
ReleaseEvent,
School,
Student,
UnsafeLimitChoicesTo,
VideoStream,
)
from .widgetadmin import site as widget_admin_site
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email=None
)
cls.u2 = User.objects.create_user(username="testser", password="secret")
Car.objects.create(owner=cls.superuser, make="Volkswagen", model="Passat")
Car.objects.create(owner=cls.u2, make="BMW", model="M3")
class AdminFormfieldForDBFieldTests(SimpleTestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin):
pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
self.assertIsInstance(widget, widgetclass)
# Return the formfield so that other tests can continue
return ff
def test_DateField(self):
self.assertFormfield(Event, "start_date", widgets.AdminDateWidget)
def test_DateTimeField(self):
self.assertFormfield(Member, "birthdate", widgets.AdminSplitDateTime)
def test_TimeField(self):
self.assertFormfield(Event, "start_time", widgets.AdminTimeWidget)
def test_TextField(self):
self.assertFormfield(Event, "description", widgets.AdminTextareaWidget)
def test_URLField(self):
self.assertFormfield(Event, "link", widgets.AdminURLFieldWidget)
def test_IntegerField(self):
self.assertFormfield(Event, "min_age", widgets.AdminIntegerFieldWidget)
def test_CharField(self):
self.assertFormfield(Member, "name", widgets.AdminTextInputWidget)
def test_EmailField(self):
self.assertFormfield(Member, "email", widgets.AdminEmailInputWidget)
def test_FileField(self):
self.assertFormfield(Album, "cover_art", widgets.AdminFileWidget)
def test_ForeignKey(self):
self.assertFormfield(Event, "main_band", forms.Select)
def test_raw_id_ForeignKey(self):
self.assertFormfield(
Event,
"main_band",
widgets.ForeignKeyRawIdWidget,
raw_id_fields=["main_band"],
)
def test_radio_fields_ForeignKey(self):
ff = self.assertFormfield(
Event,
"main_band",
widgets.AdminRadioSelect,
radio_fields={"main_band": admin.VERTICAL},
)
self.assertIsNone(ff.empty_label)
def test_radio_fields_foreignkey_formfield_overrides_empty_label(self):
class MyModelAdmin(admin.ModelAdmin):
radio_fields = {"parent": admin.VERTICAL}
formfield_overrides = {
ForeignKey: {"empty_label": "Custom empty label"},
}
ma = MyModelAdmin(Inventory, admin.site)
ff = ma.formfield_for_dbfield(Inventory._meta.get_field("parent"), request=None)
self.assertEqual(ff.empty_label, "Custom empty label")
def test_many_to_many(self):
self.assertFormfield(Band, "members", forms.SelectMultiple)
def test_raw_id_many_to_many(self):
self.assertFormfield(
Band, "members", widgets.ManyToManyRawIdWidget, raw_id_fields=["members"]
)
def test_filtered_many_to_many(self):
self.assertFormfield(
Band, "members", widgets.FilteredSelectMultiple, filter_vertical=["members"]
)
def test_formfield_overrides(self):
self.assertFormfield(
Event,
"start_date",
forms.TextInput,
formfield_overrides={DateField: {"widget": forms.TextInput}},
)
def test_formfield_overrides_widget_instances(self):
"""
Widget instances in formfield_overrides are not shared between
different fields. (#19423)
"""
class BandAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {"widget": forms.TextInput(attrs={"size": "10"})}
}
ma = BandAdmin(Band, admin.site)
f1 = ma.formfield_for_dbfield(Band._meta.get_field("name"), request=None)
f2 = ma.formfield_for_dbfield(Band._meta.get_field("style"), request=None)
self.assertNotEqual(f1.widget, f2.widget)
self.assertEqual(f1.widget.attrs["maxlength"], "100")
self.assertEqual(f2.widget.attrs["maxlength"], "20")
self.assertEqual(f2.widget.attrs["size"], "10")
def test_formfield_overrides_m2m_filter_widget(self):
"""
The autocomplete_fields, raw_id_fields, filter_vertical, and
filter_horizontal widgets for ManyToManyFields may be overridden by
specifying a widget in formfield_overrides.
"""
class BandAdmin(admin.ModelAdmin):
filter_vertical = ["members"]
formfield_overrides = {
ManyToManyField: {"widget": forms.CheckboxSelectMultiple},
}
ma = BandAdmin(Band, admin.site)
field = ma.formfield_for_dbfield(Band._meta.get_field("members"), request=None)
self.assertIsInstance(field.widget.widget, forms.CheckboxSelectMultiple)
def test_formfield_overrides_for_datetime_field(self):
"""
Overriding the widget for DateTimeField doesn't overrides the default
form_class for that field (#26449).
"""
class MemberAdmin(admin.ModelAdmin):
formfield_overrides = {
DateTimeField: {"widget": widgets.AdminSplitDateTime}
}
ma = MemberAdmin(Member, admin.site)
f1 = ma.formfield_for_dbfield(Member._meta.get_field("birthdate"), request=None)
self.assertIsInstance(f1.widget, widgets.AdminSplitDateTime)
self.assertIsInstance(f1, forms.SplitDateTimeField)
def test_formfield_overrides_for_custom_field(self):
"""
formfield_overrides works for a custom field class.
"""
class AlbumAdmin(admin.ModelAdmin):
formfield_overrides = {MyFileField: {"widget": forms.TextInput()}}
ma = AlbumAdmin(Member, admin.site)
f1 = ma.formfield_for_dbfield(
Album._meta.get_field("backside_art"), request=None
)
self.assertIsInstance(f1.widget, forms.TextInput)
def test_field_with_choices(self):
self.assertFormfield(Member, "gender", forms.Select)
def test_choices_with_radio_fields(self):
self.assertFormfield(
Member,
"gender",
widgets.AdminRadioSelect,
radio_fields={"gender": admin.VERTICAL},
)
def test_inheritance(self):
self.assertFormfield(Album, "backside_art", widgets.AdminFileWidget)
def test_m2m_widgets(self):
"""m2m fields help text as it applies to admin app (#9321)."""
class AdvisorAdmin(admin.ModelAdmin):
filter_vertical = ["companies"]
self.assertFormfield(
Advisor,
"companies",
widgets.FilteredSelectMultiple,
filter_vertical=["companies"],
)
ma = AdvisorAdmin(Advisor, admin.site)
f = ma.formfield_for_dbfield(Advisor._meta.get_field("companies"), request=None)
self.assertEqual(
f.help_text,
"Hold down “Control”, or “Command” on a Mac, to select more than one.",
)
@override_settings(ROOT_URLCONF="admin_widgets.urls")
class AdminFormfieldForDBFieldWithRequestTests(TestDataMixin, TestCase):
def test_filter_choices_by_request_user(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin:admin_widgets_cartire_add"))
self.assertNotContains(response, "BMW M3")
self.assertContains(response, "Volkswagen Passat")
@override_settings(ROOT_URLCONF="admin_widgets.urls")
class AdminForeignKeyWidgetChangeList(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_changelist_ForeignKey(self):
response = self.client.get(reverse("admin:admin_widgets_car_changelist"))
self.assertContains(response, "/auth/user/add/")
@override_settings(ROOT_URLCONF="admin_widgets.urls")
class AdminForeignKeyRawIdWidget(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_nonexistent_target_id(self):
band = Band.objects.create(name="Bogey Blues")
pk = band.pk
band.delete()
post_data = {
"main_band": str(pk),
}
# Try posting with a nonexistent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post(reverse("admin:admin_widgets_event_add"), post_data)
self.assertContains(
response,
"Select a valid choice. That choice is not one of the available choices.",
)
def test_invalid_target_id(self):
for test_str in ("Iñtërnâtiônàlizætiøn", "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post(
reverse("admin:admin_widgets_event_add"), {"main_band": test_str}
)
self.assertContains(
response,
"Select a valid choice. That choice is not one of the available "
"choices.",
)
def test_url_params_from_lookup_dict_any_iterable(self):
lookup1 = widgets.url_params_from_lookup_dict({"color__in": ("red", "blue")})
lookup2 = widgets.url_params_from_lookup_dict({"color__in": ["red", "blue"]})
self.assertEqual(lookup1, {"color__in": "red,blue"})
self.assertEqual(lookup1, lookup2)
def test_url_params_from_lookup_dict_callable(self):
def my_callable():
return "works"
lookup1 = widgets.url_params_from_lookup_dict({"myfield": my_callable})
lookup2 = widgets.url_params_from_lookup_dict({"myfield": my_callable()})
self.assertEqual(lookup1, lookup2)
def test_label_and_url_for_value_invalid_uuid(self):
field = Bee._meta.get_field("honeycomb")
self.assertIsInstance(field.target_field, UUIDField)
widget = widgets.ForeignKeyRawIdWidget(field.remote_field, admin.site)
self.assertEqual(widget.label_and_url_for_value("invalid-uuid"), ("", ""))
class FilteredSelectMultipleWidgetTest(SimpleTestCase):
def test_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple("test\\", False)
self.assertHTMLEqual(
w.render("test", "test"),
'<select multiple name="test" class="selectfilter" '
'data-field-name="test\\" data-is-stacked="0">\n</select>',
)
def test_stacked_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple("test\\", True)
self.assertHTMLEqual(
w.render("test", "test"),
'<select multiple name="test" class="selectfilterstacked" '
'data-field-name="test\\" data-is-stacked="1">\n</select>',
)
class AdminDateWidgetTest(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminDateWidget()
self.assertHTMLEqual(
w.render("test", datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="vDateField" name="test" '
'size="10">',
)
# pass attrs to widget
w = widgets.AdminDateWidget(attrs={"size": 20, "class": "myDateField"})
self.assertHTMLEqual(
w.render("test", datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="myDateField" name="test" '
'size="20">',
)
class AdminTimeWidgetTest(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminTimeWidget()
self.assertHTMLEqual(
w.render("test", datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="vTimeField" name="test" '
'size="8">',
)
# pass attrs to widget
w = widgets.AdminTimeWidget(attrs={"size": 20, "class": "myTimeField"})
self.assertHTMLEqual(
w.render("test", datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="myTimeField" name="test" '
'size="20">',
)
class AdminSplitDateTimeWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminSplitDateTime()
self.assertHTMLEqual(
w.render("test", datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Date: <input value="2007-12-01" type="text" class="vDateField" '
'name="test_0" size="10"><br>'
'Time: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8"></p>',
)
def test_localization(self):
w = widgets.AdminSplitDateTime()
with translation.override("de-at"):
w.is_localized = True
self.assertHTMLEqual(
w.render("test", datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Datum: <input value="01.12.2007" type="text" '
'class="vDateField" name="test_0"size="10"><br>'
'Zeit: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8"></p>',
)
class AdminURLWidgetTest(SimpleTestCase):
def test_get_context_validates_url(self):
w = widgets.AdminURLFieldWidget()
for invalid in ["", "/not/a/full/url/", 'javascript:alert("Danger XSS!")']:
with self.subTest(url=invalid):
self.assertFalse(w.get_context("name", invalid, {})["url_valid"])
self.assertTrue(w.get_context("name", "http://example.com", {})["url_valid"])
def test_render(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render("test", ""), '<input class="vURLField" name="test" type="url">'
)
self.assertHTMLEqual(
w.render("test", "http://example.com"),
'<p class="url">Currently:<a href="http://example.com">'
"http://example.com</a><br>"
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example.com"></p>',
)
def test_render_idn(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render("test", "http://example-äüö.com"),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">'
"http://example-äüö.com</a><br>"
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example-äüö.com"></p>',
)
def test_render_quoting(self):
"""
WARNING: This test doesn't use assertHTMLEqual since it will get rid
of some escapes which are tested here!
"""
HREF_RE = re.compile('href="([^"]+)"')
VALUE_RE = re.compile('value="([^"]+)"')
TEXT_RE = re.compile("<a[^>]+>([^>]+)</a>")
w = widgets.AdminURLFieldWidget()
output = w.render("test", "http://example.com/<sometag>some-text</sometag>")
self.assertEqual(
HREF_RE.search(output)[1],
"http://example.com/%3Csometag%3Esome-text%3C/sometag%3E",
)
self.assertEqual(
TEXT_RE.search(output)[1],
"http://example.com/<sometag>some-text</sometag>",
)
self.assertEqual(
VALUE_RE.search(output)[1],
"http://example.com/<sometag>some-text</sometag>",
)
output = w.render("test", "http://example-äüö.com/<sometag>some-text</sometag>")
self.assertEqual(
HREF_RE.search(output)[1],
"http://xn--example--7za4pnc.com/%3Csometag%3Esome-text%3C/sometag%3E",
)
self.assertEqual(
TEXT_RE.search(output)[1],
"http://example-äüö.com/<sometag>some-text</sometag>",
)
self.assertEqual(
VALUE_RE.search(output)[1],
"http://example-äüö.com/<sometag>some-text</sometag>",
)
output = w.render(
"test", 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"'
)
self.assertEqual(
HREF_RE.search(output)[1],
"http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)"
"%3C/script%3E%22",
)
self.assertEqual(
TEXT_RE.search(output)[1],
"http://www.example.com/%C3%A4"><script>"
"alert("XSS!")</script>"",
)
self.assertEqual(
VALUE_RE.search(output)[1],
"http://www.example.com/%C3%A4"><script>"
"alert("XSS!")</script>"",
)
class AdminUUIDWidgetTests(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminUUIDInputWidget()
self.assertHTMLEqual(
w.render("test", "550e8400-e29b-41d4-a716-446655440000"),
'<input value="550e8400-e29b-41d4-a716-446655440000" type="text" '
'class="vUUIDField" name="test">',
)
w = widgets.AdminUUIDInputWidget(attrs={"class": "myUUIDInput"})
self.assertHTMLEqual(
w.render("test", "550e8400-e29b-41d4-a716-446655440000"),
'<input value="550e8400-e29b-41d4-a716-446655440000" type="text" '
'class="myUUIDInput" name="test">',
)
@override_settings(ROOT_URLCONF="admin_widgets.urls")
class AdminFileWidgetTests(TestDataMixin, TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
band = Band.objects.create(name="Linkin Park")
cls.album = band.album_set.create(
name="Hybrid Theory", cover_art=r"albums\hybrid_theory.jpg"
)
def test_render(self):
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render("test", self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id"> '
'<label for="test-clear_id">Clear</label></span><br>'
'Change: <input type="file" name="test"></p>'
% {
"STORAGE_URL": default_storage.url(""),
},
)
self.assertHTMLEqual(
w.render("test", SimpleUploadedFile("test", b"content")),
'<input type="file" name="test">',
)
def test_render_required(self):
widget = widgets.AdminFileWidget()
widget.is_required = True
self.assertHTMLEqual(
widget.render("test", self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a><br>'
'Change: <input type="file" name="test"></p>'
% {
"STORAGE_URL": default_storage.url(""),
},
)
def test_render_disabled(self):
widget = widgets.AdminFileWidget(attrs={"disabled": True})
self.assertHTMLEqual(
widget.render("test", self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id" disabled>'
'<label for="test-clear_id">Clear</label></span><br>'
'Change: <input type="file" name="test" disabled></p>'
% {
"STORAGE_URL": default_storage.url(""),
},
)
def test_readonly_fields(self):
"""
File widgets should render as a link when they're marked "read only."
"""
self.client.force_login(self.superuser)
response = self.client.get(
reverse("admin:admin_widgets_album_change", args=(self.album.id,))
)
self.assertContains(
response,
'<div class="readonly"><a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">'
r"albums\hybrid_theory.jpg</a></div>"
% {"STORAGE_URL": default_storage.url("")},
html=True,
)
self.assertNotContains(
response,
'<input type="file" name="cover_art" id="id_cover_art">',
html=True,
)
response = self.client.get(reverse("admin:admin_widgets_album_add"))
self.assertContains(
response,
'<div class="readonly"></div>',
html=True,
)
@override_settings(ROOT_URLCONF="admin_widgets.urls")
class ForeignKeyRawIdWidgetTest(TestCase):
def test_render(self):
band = Band.objects.create(name="Linkin Park")
band.album_set.create(
name="Hybrid Theory", cover_art=r"albums\hybrid_theory.jpg"
)
rel_uuid = Album._meta.get_field("band").remote_field
w = widgets.ForeignKeyRawIdWidget(rel_uuid, widget_admin_site)
self.assertHTMLEqual(
w.render("test", band.uuid, attrs={}),
'<input type="text" name="test" value="%(banduuid)s" '
'class="vForeignKeyRawIdAdminField vUUIDField">'
'<a href="/admin_widgets/band/?_to_field=uuid" class="related-lookup" '
'id="lookup_id_test" title="Lookup"></a> <strong>'
'<a href="/admin_widgets/band/%(bandpk)s/change/">Linkin Park</a>'
"</strong>" % {"banduuid": band.uuid, "bandpk": band.pk},
)
rel_id = ReleaseEvent._meta.get_field("album").remote_field
w = widgets.ForeignKeyRawIdWidget(rel_id, widget_admin_site)
self.assertHTMLEqual(
w.render("test", None, attrs={}),
'<input type="text" name="test" class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/album/?_to_field=id" class="related-lookup" '
'id="lookup_id_test" title="Lookup"></a>',
)
def test_relations_to_non_primary_key(self):
# ForeignKeyRawIdWidget works with fields which aren't related to
# the model's primary key.
apple = Inventory.objects.create(barcode=86, name="Apple")
Inventory.objects.create(barcode=22, name="Pear")
core = Inventory.objects.create(barcode=87, name="Core", parent=apple)
rel = Inventory._meta.get_field("parent").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("test", core.parent_id, attrs={}),
'<input type="text" name="test" value="86" '
'class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
"Apple</a></strong>" % {"pk": apple.pk},
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = Honeycomb.objects.create(location="Old tree")
big_honeycomb.bee_set.create()
rel = Bee._meta.get_field("honeycomb").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("honeycomb_widget", big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s">'
" <strong>%(hcomb)s</strong>"
% {"hcombpk": big_honeycomb.pk, "hcomb": big_honeycomb},
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = Individual.objects.create(name="Subject #1")
Individual.objects.create(name="Child", parent=subject1)
rel = Individual._meta.get_field("parent").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("individual_widget", subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s">'
" <strong>%(subj1)s</strong>"
% {"subj1pk": subject1.pk, "subj1": subject1},
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = Inventory._meta.get_field("parent").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = Inventory.objects.create(barcode=93, name="Hidden", hidden=True)
child_of_hidden = Inventory.objects.create(
barcode=94, name="Child of hidden", parent=hidden
)
self.assertHTMLEqual(
w.render("test", child_of_hidden.parent_id, attrs={}),
'<input type="text" name="test" value="93" '
' class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
"Hidden</a></strong>" % {"pk": hidden.pk},
)
def test_render_unsafe_limit_choices_to(self):
rel = UnsafeLimitChoicesTo._meta.get_field("band").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("test", None),
'<input type="text" name="test" class="vForeignKeyRawIdAdminField">\n'
'<a href="/admin_widgets/band/?name=%22%26%3E%3Cescapeme&'
'_to_field=artist_ptr" class="related-lookup" id="lookup_id_test" '
'title="Lookup"></a>',
)
def test_render_fk_as_pk_model(self):
rel = VideoStream._meta.get_field("release_event").remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("test", None),
'<input type="text" name="test" class="vForeignKeyRawIdAdminField">\n'
'<a href="/admin_widgets/releaseevent/?_to_field=album" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>',
)
@override_settings(ROOT_URLCONF="admin_widgets.urls")
class ManyToManyRawIdWidgetTest(TestCase):
def test_render(self):
band = Band.objects.create(name="Linkin Park")
m1 = Member.objects.create(name="Chester")
m2 = Member.objects.create(name="Mike")
band.members.add(m1, m2)
rel = Band._meta.get_field("members").remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("test", [m1.pk, m2.pk], attrs={}),
(
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" '
' class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" '
' id="lookup_id_test" title="Lookup"></a>'
)
% {"m1pk": m1.pk, "m2pk": m2.pk},
)
self.assertHTMLEqual(
w.render("test", [m1.pk]),
(
'<input type="text" name="test" value="%(m1pk)s" '
' class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" '
' id="lookup_id_test" title="Lookup"></a>'
)
% {"m1pk": m1.pk},
)
def test_m2m_related_model_not_in_admin(self):
# M2M relationship with model not registered with admin site. Raw ID
# widget should have no magnifying glass link. See #16542
consultor1 = Advisor.objects.create(name="Rockstar Techie")
c1 = Company.objects.create(name="Doodle")
c2 = Company.objects.create(name="Pear")
consultor1.companies.add(c1, c2)
rel = Advisor._meta.get_field("companies").remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render("company_widget1", [c1.pk, c2.pk], attrs={}),
'<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s">'
% {"c1pk": c1.pk, "c2pk": c2.pk},
)
self.assertHTMLEqual(
w.render("company_widget2", [c1.pk]),
'<input type="text" name="company_widget2" value="%(c1pk)s">'
% {"c1pk": c1.pk},
)
@override_settings(ROOT_URLCONF="admin_widgets.urls")
class RelatedFieldWidgetWrapperTests(SimpleTestCase):
def test_no_can_add_related(self):
rel = Individual._meta.get_field("parent").remote_field
w = widgets.AdminRadioSelect()
# Used to fail with a name error.
w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site)
self.assertFalse(w.can_add_related)
def test_select_multiple_widget_cant_change_delete_related(self):
rel = Individual._meta.get_field("parent").remote_field
widget = forms.SelectMultiple()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget,
rel,
widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertFalse(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_on_delete_cascade_rel_cant_delete_related(self):
rel = Individual._meta.get_field("soulmate").remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget,
rel,
widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertTrue(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_custom_widget_render(self):
class CustomWidget(forms.Select):
def render(self, *args, **kwargs):
return "custom render output"
rel = Album._meta.get_field("band").remote_field
widget = CustomWidget()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget,
rel,
widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
output = wrapper.render("name", "value")
self.assertIn("custom render output", output)
def test_widget_delegates_value_omitted_from_data(self):
class CustomWidget(forms.Select):
def value_omitted_from_data(self, data, files, name):
return False
rel = Album._meta.get_field("band").remote_field
widget = CustomWidget()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.value_omitted_from_data({}, {}, "band"), False)
def test_widget_is_hidden(self):
rel = Album._meta.get_field("band").remote_field
widget = forms.HiddenInput()
widget.choices = ()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.is_hidden, True)
context = wrapper.get_context("band", None, {})
self.assertIs(context["is_hidden"], True)
output = wrapper.render("name", "value")
# Related item links are hidden.
self.assertNotIn("<a ", output)
def test_widget_is_not_hidden(self):
rel = Album._meta.get_field("band").remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.is_hidden, False)
context = wrapper.get_context("band", None, {})
self.assertIs(context["is_hidden"], False)
output = wrapper.render("name", "value")
# Related item links are present.
self.assertIn("<a ", output)
@override_settings(ROOT_URLCONF="admin_widgets.urls")
class AdminWidgetSeleniumTestCase(AdminSeleniumTestCase):
available_apps = ["admin_widgets"] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.u1 = User.objects.create_superuser(
username="super", password="secret", email="[email protected]"
)
class DateTimePickerSeleniumTests(AdminWidgetSeleniumTestCase):
def test_show_hide_date_time_picker_widgets(self):
"""
Pressing the ESC key or clicking on a widget value closes the date and
time picker widgets.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
self.admin_login(username="super", password="secret", login_url="/")
# Open a page that has a date and time picker widgets
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_member_add")
)
# First, with the date picker widget ---------------------------------
cal_icon = self.selenium.find_element(By.ID, "calendarlink0")
# The date picker is hidden
self.assertFalse(
self.selenium.find_element(By.ID, "calendarbox0").is_displayed()
)
# Click the calendar icon
cal_icon.click()
# The date picker is visible
self.assertTrue(
self.selenium.find_element(By.ID, "calendarbox0").is_displayed()
)
# Press the ESC key
self.selenium.find_element(By.TAG_NAME, "body").send_keys([Keys.ESCAPE])
# The date picker is hidden again
self.assertFalse(
self.selenium.find_element(By.ID, "calendarbox0").is_displayed()
)
# Click the calendar icon, then on the 15th of current month
cal_icon.click()
self.selenium.find_element(By.XPATH, "//a[contains(text(), '15')]").click()
self.assertFalse(
self.selenium.find_element(By.ID, "calendarbox0").is_displayed()
)
self.assertEqual(
self.selenium.find_element(By.ID, "id_birthdate_0").get_attribute("value"),
datetime.today().strftime("%Y-%m-") + "15",
)
# Then, with the time picker widget ----------------------------------
time_icon = self.selenium.find_element(By.ID, "clocklink0")
# The time picker is hidden
self.assertFalse(self.selenium.find_element(By.ID, "clockbox0").is_displayed())
# Click the time icon
time_icon.click()
# The time picker is visible
self.assertTrue(self.selenium.find_element(By.ID, "clockbox0").is_displayed())
self.assertEqual(
[
x.text
for x in self.selenium.find_elements(
By.XPATH, "//ul[@class='timelist']/li/a"
)
],
["Now", "Midnight", "6 a.m.", "Noon", "6 p.m."],
)
# Press the ESC key
self.selenium.find_element(By.TAG_NAME, "body").send_keys([Keys.ESCAPE])
# The time picker is hidden again
self.assertFalse(self.selenium.find_element(By.ID, "clockbox0").is_displayed())
# Click the time icon, then select the 'Noon' value
time_icon.click()
self.selenium.find_element(By.XPATH, "//a[contains(text(), 'Noon')]").click()
self.assertFalse(self.selenium.find_element(By.ID, "clockbox0").is_displayed())
self.assertEqual(
self.selenium.find_element(By.ID, "id_birthdate_1").get_attribute("value"),
"12:00:00",
)
def test_calendar_nonday_class(self):
"""
Ensure cells that are not days of the month have the `nonday` CSS class.
Refs #4574.
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
# Open a page that has a date and time picker widgets
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_member_add")
)
# fill in the birth date.
self.selenium.find_element(By.ID, "id_birthdate_0").send_keys("2013-06-01")
# Click the calendar icon
self.selenium.find_element(By.ID, "calendarlink0").click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element(By.ID, "calendarin0")
tds = calendar0.find_elements(By.TAG_NAME, "td")
# make sure the first and last 6 cells have class nonday
for td in tds[:6] + tds[-6:]:
self.assertEqual(td.get_attribute("class"), "nonday")
def test_calendar_selected_class(self):
"""
Ensure cell for the day in the input has the `selected` CSS class.
Refs #4574.
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
# Open a page that has a date and time picker widgets
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_member_add")
)
# fill in the birth date.
self.selenium.find_element(By.ID, "id_birthdate_0").send_keys("2013-06-01")
# Click the calendar icon
self.selenium.find_element(By.ID, "calendarlink0").click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element(By.ID, "calendarin0")
tds = calendar0.find_elements(By.TAG_NAME, "td")
# verify the selected cell
selected = tds[6]
self.assertEqual(selected.get_attribute("class"), "selected")
self.assertEqual(selected.text, "1")
def test_calendar_no_selected_class(self):
"""
Ensure no cells are given the selected class when the field is empty.
Refs #4574.
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
# Open a page that has a date and time picker widgets
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_member_add")
)
# Click the calendar icon
self.selenium.find_element(By.ID, "calendarlink0").click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element(By.ID, "calendarin0")
tds = calendar0.find_elements(By.TAG_NAME, "td")
# verify there are no cells with the selected class
selected = [td for td in tds if td.get_attribute("class") == "selected"]
self.assertEqual(len(selected), 0)
def test_calendar_show_date_from_input(self):
"""
The calendar shows the date from the input field for every locale
supported by Django.
"""
from selenium.webdriver.common.by import By
self.selenium.set_window_size(1024, 768)
self.admin_login(username="super", password="secret", login_url="/")
# Enter test data
member = Member.objects.create(
name="Bob", birthdate=datetime(1984, 5, 15), gender="M"
)
# Get month name translations for every locale
month_string = "May"
path = os.path.join(
os.path.dirname(import_module("django.contrib.admin").__file__), "locale"
)
for language_code, language_name in settings.LANGUAGES:
try:
catalog = gettext.translation("djangojs", path, [language_code])
except OSError:
continue
if month_string in catalog._catalog:
month_name = catalog._catalog[month_string]
else:
month_name = month_string
# Get the expected caption
may_translation = month_name
expected_caption = "{:s} {:d}".format(may_translation.upper(), 1984)
# Test with every locale
with override_settings(LANGUAGE_CODE=language_code):
# Open a page that has a date picker widget
url = reverse("admin:admin_widgets_member_change", args=(member.pk,))
self.selenium.get(self.live_server_url + url)
# Click on the calendar icon
self.selenium.find_element(By.ID, "calendarlink0").click()
# Make sure that the right month and year are displayed
self.wait_for_text("#calendarin0 caption", expected_caption)
@override_settings(TIME_ZONE="Asia/Singapore")
class DateTimePickerShortcutsSeleniumTests(AdminWidgetSeleniumTestCase):
def test_date_time_picker_shortcuts(self):
"""
date/time/datetime picker shortcuts work in the current time zone.
Refs #20663.
This test case is fairly tricky, it relies on selenium still running the browser
in the default time zone "America/Chicago" despite `override_settings` changing
the time zone to "Asia/Singapore".
"""
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
error_margin = timedelta(seconds=10)
# If we are neighbouring a DST, we add an hour of error margin.
tz = zoneinfo.ZoneInfo("America/Chicago")
utc_now = datetime.now(zoneinfo.ZoneInfo("UTC"))
tz_yesterday = (utc_now - timedelta(days=1)).astimezone(tz).tzname()
tz_tomorrow = (utc_now + timedelta(days=1)).astimezone(tz).tzname()
if tz_yesterday != tz_tomorrow:
error_margin += timedelta(hours=1)
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_member_add")
)
self.selenium.find_element(By.ID, "id_name").send_keys("test")
# Click on the "today" and "now" shortcuts.
shortcuts = self.selenium.find_elements(
By.CSS_SELECTOR, ".field-birthdate .datetimeshortcuts"
)
now = datetime.now()
for shortcut in shortcuts:
shortcut.find_element(By.TAG_NAME, "a").click()
# There is a time zone mismatch warning.
# Warning: This would effectively fail if the TIME_ZONE defined in the
# settings has the same UTC offset as "Asia/Singapore" because the
# mismatch warning would be rightfully missing from the page.
self.assertCountSeleniumElements(".field-birthdate .timezonewarning", 1)
# Submit the form.
with self.wait_page_loaded():
self.selenium.find_element(By.NAME, "_save").click()
# Make sure that "now" in JavaScript is within 10 seconds
# from "now" on the server side.
member = Member.objects.get(name="test")
self.assertGreater(member.birthdate, now - error_margin)
self.assertLess(member.birthdate, now + error_margin)
# The above tests run with Asia/Singapore which are on the positive side of
# UTC. Here we test with a timezone on the negative side.
@override_settings(TIME_ZONE="US/Eastern")
class DateTimePickerAltTimezoneSeleniumTests(DateTimePickerShortcutsSeleniumTests):
pass
class HorizontalVerticalFilterSeleniumTests(AdminWidgetSeleniumTestCase):
def setUp(self):
super().setUp()
self.lisa = Student.objects.create(name="Lisa")
self.john = Student.objects.create(name="John")
self.bob = Student.objects.create(name="Bob")
self.peter = Student.objects.create(name="Peter")
self.jenny = Student.objects.create(name="Jenny")
self.jason = Student.objects.create(name="Jason")
self.cliff = Student.objects.create(name="Cliff")
self.arthur = Student.objects.create(name="Arthur")
self.school = School.objects.create(name="School of Awesome")
def assertActiveButtons(
self, mode, field_name, choose, remove, choose_all=None, remove_all=None
):
choose_link = "#id_%s_add_link" % field_name
choose_all_link = "#id_%s_add_all_link" % field_name
remove_link = "#id_%s_remove_link" % field_name
remove_all_link = "#id_%s_remove_all_link" % field_name
self.assertEqual(self.has_css_class(choose_link, "active"), choose)
self.assertEqual(self.has_css_class(remove_link, "active"), remove)
if mode == "horizontal":
self.assertEqual(self.has_css_class(choose_all_link, "active"), choose_all)
self.assertEqual(self.has_css_class(remove_all_link, "active"), remove_all)
def execute_basic_operations(self, mode, field_name):
from selenium.webdriver.common.by import By
original_url = self.selenium.current_url
from_box = "#id_%s_from" % field_name
to_box = "#id_%s_to" % field_name
choose_link = "id_%s_add_link" % field_name
choose_all_link = "id_%s_add_all_link" % field_name
remove_link = "id_%s_remove_link" % field_name
remove_all_link = "id_%s_remove_all_link" % field_name
# Initial positions ---------------------------------------------------
self.assertSelectOptions(
from_box,
[
str(self.arthur.id),
str(self.bob.id),
str(self.cliff.id),
str(self.jason.id),
str(self.jenny.id),
str(self.john.id),
],
)
self.assertSelectOptions(to_box, [str(self.lisa.id), str(self.peter.id)])
self.assertActiveButtons(mode, field_name, False, False, True, True)
# Click 'Choose all' --------------------------------------------------
if mode == "horizontal":
self.selenium.find_element(By.ID, choose_all_link).click()
elif mode == "vertical":
# There 's no 'Choose all' button in vertical mode, so individually
# select all options and click 'Choose'.
for option in self.selenium.find_elements(
By.CSS_SELECTOR, from_box + " > option"
):
option.click()
self.selenium.find_element(By.ID, choose_link).click()
self.assertSelectOptions(from_box, [])
self.assertSelectOptions(
to_box,
[
str(self.lisa.id),
str(self.peter.id),
str(self.arthur.id),
str(self.bob.id),
str(self.cliff.id),
str(self.jason.id),
str(self.jenny.id),
str(self.john.id),
],
)
self.assertActiveButtons(mode, field_name, False, False, False, True)
# Click 'Remove all' --------------------------------------------------
if mode == "horizontal":
self.selenium.find_element(By.ID, remove_all_link).click()
elif mode == "vertical":
# There 's no 'Remove all' button in vertical mode, so individually
# select all options and click 'Remove'.
for option in self.selenium.find_elements(
By.CSS_SELECTOR, to_box + " > option"
):
option.click()
self.selenium.find_element(By.ID, remove_link).click()
self.assertSelectOptions(
from_box,
[
str(self.lisa.id),
str(self.peter.id),
str(self.arthur.id),
str(self.bob.id),
str(self.cliff.id),
str(self.jason.id),
str(self.jenny.id),
str(self.john.id),
],
)
self.assertSelectOptions(to_box, [])
self.assertActiveButtons(mode, field_name, False, False, True, False)
# Choose some options ------------------------------------------------
from_lisa_select_option = self.selenium.find_element(
By.CSS_SELECTOR, '{} > option[value="{}"]'.format(from_box, self.lisa.id)
)
# Check the title attribute is there for tool tips: ticket #20821
self.assertEqual(
from_lisa_select_option.get_attribute("title"),
from_lisa_select_option.get_attribute("text"),
)
self.select_option(from_box, str(self.lisa.id))
self.select_option(from_box, str(self.jason.id))
self.select_option(from_box, str(self.bob.id))
self.select_option(from_box, str(self.john.id))
self.assertActiveButtons(mode, field_name, True, False, True, False)
self.selenium.find_element(By.ID, choose_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(
from_box,
[
str(self.peter.id),
str(self.arthur.id),
str(self.cliff.id),
str(self.jenny.id),
],
)
self.assertSelectOptions(
to_box,
[
str(self.lisa.id),
str(self.bob.id),
str(self.jason.id),
str(self.john.id),
],
)
# Check the tooltip is still there after moving: ticket #20821
to_lisa_select_option = self.selenium.find_element(
By.CSS_SELECTOR, '{} > option[value="{}"]'.format(to_box, self.lisa.id)
)
self.assertEqual(
to_lisa_select_option.get_attribute("title"),
to_lisa_select_option.get_attribute("text"),
)
# Remove some options -------------------------------------------------
self.select_option(to_box, str(self.lisa.id))
self.select_option(to_box, str(self.bob.id))
self.assertActiveButtons(mode, field_name, False, True, True, True)
self.selenium.find_element(By.ID, remove_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(
from_box,
[
str(self.peter.id),
str(self.arthur.id),
str(self.cliff.id),
str(self.jenny.id),
str(self.lisa.id),
str(self.bob.id),
],
)
self.assertSelectOptions(to_box, [str(self.jason.id), str(self.john.id)])
# Choose some more options --------------------------------------------
self.select_option(from_box, str(self.arthur.id))
self.select_option(from_box, str(self.cliff.id))
self.selenium.find_element(By.ID, choose_link).click()
self.assertSelectOptions(
from_box,
[
str(self.peter.id),
str(self.jenny.id),
str(self.lisa.id),
str(self.bob.id),
],
)
self.assertSelectOptions(
to_box,
[
str(self.jason.id),
str(self.john.id),
str(self.arthur.id),
str(self.cliff.id),
],
)
# Choose some more options --------------------------------------------
self.select_option(from_box, str(self.peter.id))
self.select_option(from_box, str(self.lisa.id))
# Confirm they're selected after clicking inactive buttons: ticket #26575
self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)])
self.selenium.find_element(By.ID, remove_link).click()
self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)])
# Unselect the options ------------------------------------------------
self.deselect_option(from_box, str(self.peter.id))
self.deselect_option(from_box, str(self.lisa.id))
# Choose some more options --------------------------------------------
self.select_option(to_box, str(self.jason.id))
self.select_option(to_box, str(self.john.id))
# Confirm they're selected after clicking inactive buttons: ticket #26575
self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)])
self.selenium.find_element(By.ID, choose_link).click()
self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)])
# Unselect the options ------------------------------------------------
self.deselect_option(to_box, str(self.jason.id))
self.deselect_option(to_box, str(self.john.id))
# Pressing buttons shouldn't change the URL.
self.assertEqual(self.selenium.current_url, original_url)
def test_basic(self):
from selenium.webdriver.common.by import By
self.selenium.set_window_size(1024, 768)
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username="super", password="secret", login_url="/")
self.selenium.get(
self.live_server_url
+ reverse("admin:admin_widgets_school_change", args=(self.school.id,))
)
self.wait_page_ready()
self.execute_basic_operations("vertical", "students")
self.execute_basic_operations("horizontal", "alumni")
# Save and check that everything is properly stored in the database ---
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.wait_page_ready()
self.school = School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(
list(self.school.students.all()),
[self.arthur, self.cliff, self.jason, self.john],
)
self.assertEqual(
list(self.school.alumni.all()),
[self.arthur, self.cliff, self.jason, self.john],
)
def test_filter(self):
"""
Typing in the search box filters out options displayed in the 'from'
box.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
self.selenium.set_window_size(1024, 768)
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username="super", password="secret", login_url="/")
self.selenium.get(
self.live_server_url
+ reverse("admin:admin_widgets_school_change", args=(self.school.id,))
)
for field_name in ["students", "alumni"]:
from_box = "#id_%s_from" % field_name
to_box = "#id_%s_to" % field_name
choose_link = "id_%s_add_link" % field_name
remove_link = "id_%s_remove_link" % field_name
input = self.selenium.find_element(By.ID, "id_%s_input" % field_name)
# Initial values
self.assertSelectOptions(
from_box,
[
str(self.arthur.id),
str(self.bob.id),
str(self.cliff.id),
str(self.jason.id),
str(self.jenny.id),
str(self.john.id),
],
)
# Typing in some characters filters out non-matching options
input.send_keys("a")
self.assertSelectOptions(
from_box, [str(self.arthur.id), str(self.jason.id)]
)
input.send_keys("R")
self.assertSelectOptions(from_box, [str(self.arthur.id)])
# Clearing the text box makes the other options reappear
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(
from_box, [str(self.arthur.id), str(self.jason.id)]
)
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(
from_box,
[
str(self.arthur.id),
str(self.bob.id),
str(self.cliff.id),
str(self.jason.id),
str(self.jenny.id),
str(self.john.id),
],
)
# -----------------------------------------------------------------
# Choosing a filtered option sends it properly to the 'to' box.
input.send_keys("a")
self.assertSelectOptions(
from_box, [str(self.arthur.id), str(self.jason.id)]
)
self.select_option(from_box, str(self.jason.id))
self.selenium.find_element(By.ID, choose_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id)])
self.assertSelectOptions(
to_box,
[
str(self.lisa.id),
str(self.peter.id),
str(self.jason.id),
],
)
self.select_option(to_box, str(self.lisa.id))
self.selenium.find_element(By.ID, remove_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.lisa.id)])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE]) # Clear text box
self.assertSelectOptions(
from_box,
[
str(self.arthur.id),
str(self.bob.id),
str(self.cliff.id),
str(self.jenny.id),
str(self.john.id),
str(self.lisa.id),
],
)
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
# -----------------------------------------------------------------
# Pressing enter on a filtered option sends it properly to
# the 'to' box.
self.select_option(to_box, str(self.jason.id))
self.selenium.find_element(By.ID, remove_link).click()
input.send_keys("ja")
self.assertSelectOptions(from_box, [str(self.jason.id)])
input.send_keys([Keys.ENTER])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE, Keys.BACK_SPACE])
# Save and check that everything is properly stored in the database ---
with self.wait_page_loaded():
self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click()
self.school = School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()), [self.jason, self.peter])
self.assertEqual(list(self.school.alumni.all()), [self.jason, self.peter])
def test_back_button_bug(self):
"""
Some browsers had a bug where navigating away from the change page
and then clicking the browser's back button would clear the
filter_horizontal/filter_vertical widgets (#13614).
"""
from selenium.webdriver.common.by import By
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username="super", password="secret", login_url="/")
change_url = reverse(
"admin:admin_widgets_school_change", args=(self.school.id,)
)
self.selenium.get(self.live_server_url + change_url)
# Navigate away and go back to the change form page.
self.selenium.find_element(By.LINK_TEXT, "Home").click()
self.selenium.back()
expected_unselected_values = [
str(self.arthur.id),
str(self.bob.id),
str(self.cliff.id),
str(self.jason.id),
str(self.jenny.id),
str(self.john.id),
]
expected_selected_values = [str(self.lisa.id), str(self.peter.id)]
# Everything is still in place
self.assertSelectOptions("#id_students_from", expected_unselected_values)
self.assertSelectOptions("#id_students_to", expected_selected_values)
self.assertSelectOptions("#id_alumni_from", expected_unselected_values)
self.assertSelectOptions("#id_alumni_to", expected_selected_values)
def test_refresh_page(self):
"""
Horizontal and vertical filter widgets keep selected options on page
reload (#22955).
"""
self.school.students.add(self.arthur, self.jason)
self.school.alumni.add(self.arthur, self.jason)
self.admin_login(username="super", password="secret", login_url="/")
change_url = reverse(
"admin:admin_widgets_school_change", args=(self.school.id,)
)
self.selenium.get(self.live_server_url + change_url)
self.assertCountSeleniumElements("#id_students_to > option", 2)
# self.selenium.refresh() or send_keys(Keys.F5) does hard reload and
# doesn't replicate what happens when a user clicks the browser's
# 'Refresh' button.
with self.wait_page_loaded():
self.selenium.execute_script("location.reload()")
self.assertCountSeleniumElements("#id_students_to > option", 2)
class AdminRawIdWidgetSeleniumTests(AdminWidgetSeleniumTestCase):
def setUp(self):
super().setUp()
Band.objects.create(id=42, name="Bogey Blues")
Band.objects.create(id=98, name="Green Potatoes")
def test_ForeignKey(self):
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_event_add")
)
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element(By.ID, "id_main_band").get_attribute("value"), ""
)
# Open the popup window and click on a band
self.selenium.find_element(By.ID, "lookup_id_main_band").click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element(By.LINK_TEXT, "Bogey Blues")
self.assertIn("/band/42/", link.get_attribute("href"))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value("#id_main_band", "42")
# Reopen the popup window and click on another band
self.selenium.find_element(By.ID, "lookup_id_main_band").click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element(By.LINK_TEXT, "Green Potatoes")
self.assertIn("/band/98/", link.get_attribute("href"))
link.click()
# The field now contains the other selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value("#id_main_band", "98")
def test_many_to_many(self):
from selenium.webdriver.common.by import By
self.admin_login(username="super", password="secret", login_url="/")
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_event_add")
)
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element(By.ID, "id_supporting_bands").get_attribute(
"value"
),
"",
)
# Help text for the field is displayed
self.assertEqual(
self.selenium.find_element(
By.CSS_SELECTOR, ".field-supporting_bands div.help"
).text,
"Supporting Bands.",
)
# Open the popup window and click on a band
self.selenium.find_element(By.ID, "lookup_id_supporting_bands").click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element(By.LINK_TEXT, "Bogey Blues")
self.assertIn("/band/42/", link.get_attribute("href"))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value("#id_supporting_bands", "42")
# Reopen the popup window and click on another band
self.selenium.find_element(By.ID, "lookup_id_supporting_bands").click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element(By.LINK_TEXT, "Green Potatoes")
self.assertIn("/band/98/", link.get_attribute("href"))
link.click()
# The field now contains the two selected bands' ids
self.selenium.switch_to.window(main_window)
self.wait_for_value("#id_supporting_bands", "42,98")
class RelatedFieldWidgetSeleniumTests(AdminWidgetSeleniumTestCase):
def test_ForeignKey_using_to_field(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
self.admin_login(username="super", password="secret", login_url="/")
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_profile_add")
)
main_window = self.selenium.current_window_handle
# Click the Add User button to add new
self.selenium.find_element(By.ID, "add_id_user").click()
self.wait_for_and_switch_to_popup()
password_field = self.selenium.find_element(By.ID, "id_password")
password_field.send_keys("password")
username_field = self.selenium.find_element(By.ID, "id_username")
username_value = "newuser"
username_field.send_keys(username_value)
save_button_css_selector = ".submit-row > input[type=submit]"
self.selenium.find_element(By.CSS_SELECTOR, save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# The field now contains the new user
self.selenium.find_element(By.CSS_SELECTOR, "#id_user option[value=newuser]")
self.selenium.find_element(By.ID, "view_id_user").click()
self.wait_for_value("#id_username", "newuser")
self.selenium.back()
select = Select(self.selenium.find_element(By.ID, "id_user"))
select.select_by_value("newuser")
# Click the Change User button to change it
self.selenium.find_element(By.ID, "change_id_user").click()
self.wait_for_and_switch_to_popup()
username_field = self.selenium.find_element(By.ID, "id_username")
username_value = "changednewuser"
username_field.clear()
username_field.send_keys(username_value)
save_button_css_selector = ".submit-row > input[type=submit]"
self.selenium.find_element(By.CSS_SELECTOR, save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
self.selenium.find_element(
By.CSS_SELECTOR, "#id_user option[value=changednewuser]"
)
self.selenium.find_element(By.ID, "view_id_user").click()
self.wait_for_value("#id_username", "changednewuser")
self.selenium.back()
select = Select(self.selenium.find_element(By.ID, "id_user"))
select.select_by_value("changednewuser")
# Go ahead and submit the form to make sure it works
self.selenium.find_element(By.CSS_SELECTOR, save_button_css_selector).click()
self.wait_for_text(
"li.success", "The profile “changednewuser” was added successfully."
)
profiles = Profile.objects.all()
self.assertEqual(len(profiles), 1)
self.assertEqual(profiles[0].user.username, username_value)
|
df2604dee4e3b1d78d583f5696c321fa5deced20e726845fbd93dfa16179536b | """
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import posixpath
import re
from pathlib import Path
from django.http import FileResponse, Http404, HttpResponse, HttpResponseNotModified
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils._os import safe_join
from django.utils.http import http_date, parse_http_date
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
path('<path:path>', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(path).lstrip("/")
fullpath = Path(safe_join(document_root, path))
if fullpath.is_dir():
if show_indexes:
return directory_index(path, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not fullpath.exists():
raise Http404(_("“%(path)s” does not exist") % {"path": fullpath})
# Respect the If-Modified-Since header.
statobj = fullpath.stat()
if not was_modified_since(
request.META.get("HTTP_IF_MODIFIED_SINCE"), statobj.st_mtime, statobj.st_size
):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(str(fullpath))
content_type = content_type or "application/octet-stream"
response = FileResponse(fullpath.open("rb"), content_type=content_type)
response.headers["Last-Modified"] = http_date(statobj.st_mtime)
if encoding:
response.headers["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8">
<meta http-equiv="Content-Language" content="en-us">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% blocktranslate %}Index of {{ directory }}{% endblocktranslate %}</title>
</head>
<body>
<h1>{% blocktranslate %}Index of {{ directory }}{% endblocktranslate %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = gettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template(
[
"static/directory_index.html",
"static/directory_index",
]
)
except TemplateDoesNotExist:
t = Engine(libraries={"i18n": "django.templatetags.i18n"}).from_string(
DEFAULT_DIRECTORY_INDEX_TEMPLATE
)
c = Context()
else:
c = {}
files = []
for f in fullpath.iterdir():
if not f.name.startswith("."):
url = str(f.relative_to(fullpath))
if f.is_dir():
url += "/"
files.append(url)
c.update(
{
"directory": path + "/",
"file_list": files,
}
)
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header, re.IGNORECASE)
if matches is None:
raise ValueError
header_mtime = parse_http_date(matches[1])
header_len = matches[3]
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (ValueError, OverflowError):
return True
return False
|
51fec2e7513cd5010c74e9f293cad7a821259a2076e1d88e286414c726f71665 | from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.db.migrations.utils import field_references, resolve_relation
from django.db.models.options import normalize_together
from django.utils.functional import cached_property
from .fields import AddField, AlterField, FieldOperation, RemoveField, RenameField
def _check_for_duplicates(arg_name, objs):
used_vals = set()
for val in objs:
if val in used_vals:
raise ValueError(
"Found duplicate value %s in CreateModel %s argument." % (val, arg_name)
)
used_vals.add(val)
class ModelOperation(Operation):
def __init__(self, name):
self.name = name
@cached_property
def name_lower(self):
return self.name.lower()
def references_model(self, name, app_label):
return name.lower() == self.name_lower
def reduce(self, operation, app_label):
return super().reduce(operation, app_label) or self.can_reduce_through(
operation, app_label
)
def can_reduce_through(self, operation, app_label):
return not operation.references_model(self.name, app_label)
class CreateModel(ModelOperation):
"""Create a model's table."""
serialization_expand_args = ["fields", "options", "managers"]
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
super().__init__(name)
# Sanity-check that there are no duplicated field names, bases, or
# manager names
_check_for_duplicates("fields", (name for name, _ in self.fields))
_check_for_duplicates(
"bases",
(
base._meta.label_lower
if hasattr(base, "_meta")
else base.lower()
if isinstance(base, str)
else base
for base in self.bases
),
)
_check_for_duplicates("managers", (name for name, _ in self.managers))
def deconstruct(self):
kwargs = {
"name": self.name,
"fields": self.fields,
}
if self.options:
kwargs["options"] = self.options
if self.bases and self.bases != (models.Model,):
kwargs["bases"] = self.bases
if self.managers and self.managers != [("objects", models.Manager())]:
kwargs["managers"] = self.managers
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.add_model(
ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
)
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % (
"proxy " if self.options.get("proxy", False) else "",
self.name,
)
@property
def migration_name_fragment(self):
return self.name_lower
def references_model(self, name, app_label):
name_lower = name.lower()
if name_lower == self.name_lower:
return True
# Check we didn't inherit from the model
reference_model_tuple = (app_label, name_lower)
for base in self.bases:
if (
base is not models.Model
and isinstance(base, (models.base.ModelBase, str))
and resolve_relation(base, app_label) == reference_model_tuple
):
return True
# Check we have no FKs/M2Ms with it
for _name, field in self.fields:
if field_references(
(app_label, self.name_lower), field, reference_model_tuple
):
return True
return False
def reduce(self, operation, app_label):
if (
isinstance(operation, DeleteModel)
and self.name_lower == operation.name_lower
and not self.options.get("proxy", False)
):
return []
elif (
isinstance(operation, RenameModel)
and self.name_lower == operation.old_name_lower
):
return [
CreateModel(
operation.new_name,
fields=self.fields,
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif (
isinstance(operation, AlterModelOptions)
and self.name_lower == operation.name_lower
):
options = {**self.options, **operation.options}
for key in operation.ALTER_OPTION_KEYS:
if key not in operation.options:
options.pop(key, None)
return [
CreateModel(
self.name,
fields=self.fields,
options=options,
bases=self.bases,
managers=self.managers,
),
]
elif (
isinstance(operation, AlterModelManagers)
and self.name_lower == operation.name_lower
):
return [
CreateModel(
self.name,
fields=self.fields,
options=self.options,
bases=self.bases,
managers=operation.managers,
),
]
elif (
isinstance(operation, AlterTogetherOptionOperation)
and self.name_lower == operation.name_lower
):
return [
CreateModel(
self.name,
fields=self.fields,
options={
**self.options,
**{operation.option_name: operation.option_value},
},
bases=self.bases,
managers=self.managers,
),
]
elif (
isinstance(operation, AlterOrderWithRespectTo)
and self.name_lower == operation.name_lower
):
return [
CreateModel(
self.name,
fields=self.fields,
options={
**self.options,
"order_with_respect_to": operation.order_with_respect_to,
},
bases=self.bases,
managers=self.managers,
),
]
elif (
isinstance(operation, FieldOperation)
and self.name_lower == operation.model_name_lower
):
if isinstance(operation, AddField):
return [
CreateModel(
self.name,
fields=self.fields + [(operation.name, operation.field)],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, AlterField):
return [
CreateModel(
self.name,
fields=[
(n, operation.field if n == operation.name else v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RemoveField):
options = self.options.copy()
for option_name in ("unique_together", "index_together"):
option = options.pop(option_name, None)
if option:
option = set(
filter(
bool,
(
tuple(
f for f in fields if f != operation.name_lower
)
for fields in option
),
)
)
if option:
options[option_name] = option
order_with_respect_to = options.get("order_with_respect_to")
if order_with_respect_to == operation.name_lower:
del options["order_with_respect_to"]
return [
CreateModel(
self.name,
fields=[
(n, v)
for n, v in self.fields
if n.lower() != operation.name_lower
],
options=options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RenameField):
options = self.options.copy()
for option_name in ("unique_together", "index_together"):
option = options.get(option_name)
if option:
options[option_name] = {
tuple(
operation.new_name if f == operation.old_name else f
for f in fields
)
for fields in option
}
order_with_respect_to = options.get("order_with_respect_to")
if order_with_respect_to == operation.old_name:
options["order_with_respect_to"] = operation.new_name
return [
CreateModel(
self.name,
fields=[
(operation.new_name if n == operation.old_name else n, v)
for n, v in self.fields
],
options=options,
bases=self.bases,
managers=self.managers,
),
]
return super().reduce(operation, app_label)
class DeleteModel(ModelOperation):
"""Drop a model's table."""
def deconstruct(self):
kwargs = {
"name": self.name,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.remove_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def references_model(self, name, app_label):
# The deleted model could be referencing the specified model through
# related fields.
return True
def describe(self):
return "Delete model %s" % self.name
@property
def migration_name_fragment(self):
return "delete_%s" % self.name_lower
class RenameModel(ModelOperation):
"""Rename a model."""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
super().__init__(old_name)
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
"old_name": self.old_name,
"new_name": self.new_name,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.rename_model(app_label, self.old_name, self.new_name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.new_name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.old_name)
old_db_table = old_model._meta.db_table
new_db_table = new_model._meta.db_table
# Don't alter when a table name is not changed.
if old_db_table == new_db_table:
return
# Move the main table
schema_editor.alter_db_table(new_model, old_db_table, new_db_table)
# Alter the fields pointing to us
for related_object in old_model._meta.related_objects:
if related_object.related_model == old_model:
model = new_model
related_key = (app_label, self.new_name_lower)
else:
model = related_object.related_model
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
to_field = to_state.apps.get_model(*related_key)._meta.get_field(
related_object.field.name
)
schema_editor.alter_field(
model,
related_object.field,
to_field,
)
# Rename M2M fields whose name is based on this model's name.
fields = zip(
old_model._meta.local_many_to_many, new_model._meta.local_many_to_many
)
for (old_field, new_field) in fields:
# Skip self-referential fields as these are renamed above.
if (
new_field.model == new_field.related_model
or not new_field.remote_field.through._meta.auto_created
):
continue
# Rename the M2M table that's based on this model's name.
old_m2m_model = old_field.remote_field.through
new_m2m_model = new_field.remote_field.through
schema_editor.alter_db_table(
new_m2m_model,
old_m2m_model._meta.db_table,
new_m2m_model._meta.db_table,
)
# Rename the column in the M2M table that's based on this
# model's name.
schema_editor.alter_field(
new_m2m_model,
old_m2m_model._meta.get_field(old_model._meta.model_name),
new_m2m_model._meta.get_field(new_model._meta.model_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name_lower, self.old_name_lower = (
self.old_name_lower,
self.new_name_lower,
)
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name_lower, self.old_name_lower = (
self.old_name_lower,
self.new_name_lower,
)
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label):
return (
name.lower() == self.old_name_lower or name.lower() == self.new_name_lower
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
@property
def migration_name_fragment(self):
return "rename_%s_%s" % (self.old_name_lower, self.new_name_lower)
def reduce(self, operation, app_label):
if (
isinstance(operation, RenameModel)
and self.new_name_lower == operation.old_name_lower
):
return [
RenameModel(
self.old_name,
operation.new_name,
),
]
# Skip `ModelOperation.reduce` as we want to run `references_model`
# against self.new_name.
return super(ModelOperation, self).reduce(
operation, app_label
) or not operation.references_model(self.new_name, app_label)
class ModelOptionOperation(ModelOperation):
def reduce(self, operation, app_label):
if (
isinstance(operation, (self.__class__, DeleteModel))
and self.name_lower == operation.name_lower
):
return [operation]
return super().reduce(operation, app_label)
class AlterModelTable(ModelOptionOperation):
"""Rename a model's table."""
def __init__(self, name, table):
self.table = table
super().__init__(name)
def deconstruct(self):
kwargs = {
"name": self.name,
"table": self.table,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_model_options(app_label, self.name_lower, {"db_table": self.table})
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for (old_field, new_field) in zip(
old_model._meta.local_many_to_many, new_model._meta.local_many_to_many
):
if new_field.remote_field.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Rename table for %s to %s" % (
self.name,
self.table if self.table is not None else "(default)",
)
@property
def migration_name_fragment(self):
return "alter_%s_table" % self.name_lower
class AlterTogetherOptionOperation(ModelOptionOperation):
option_name = None
def __init__(self, name, option_value):
if option_value:
option_value = set(normalize_together(option_value))
setattr(self, self.option_name, option_value)
super().__init__(name)
@cached_property
def option_value(self):
return getattr(self, self.option_name)
def deconstruct(self):
kwargs = {
"name": self.name,
self.option_name: self.option_value,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_model_options(
app_label,
self.name_lower,
{self.option_name: self.option_value},
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
alter_together = getattr(schema_editor, "alter_%s" % self.option_name)
alter_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label):
return self.references_model(model_name, app_label) and (
not self.option_value
or any((name in fields) for fields in self.option_value)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (
self.option_name,
self.name,
len(self.option_value or ""),
)
@property
def migration_name_fragment(self):
return "alter_%s_%s" % (self.name_lower, self.option_name)
def can_reduce_through(self, operation, app_label):
return super().can_reduce_through(operation, app_label) or (
isinstance(operation, AlterTogetherOptionOperation)
and type(operation) is not type(self)
)
class AlterUniqueTogether(AlterTogetherOptionOperation):
"""
Change the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
def __init__(self, name, unique_together):
super().__init__(name, unique_together)
class AlterIndexTogether(AlterTogetherOptionOperation):
"""
Change the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
super().__init__(name, index_together)
class AlterOrderWithRespectTo(ModelOptionOperation):
"""Represent a change with the order_with_respect_to option."""
option_name = "order_with_respect_to"
def __init__(self, name, order_with_respect_to):
self.order_with_respect_to = order_with_respect_to
super().__init__(name)
def deconstruct(self):
kwargs = {
"name": self.name,
"order_with_respect_to": self.order_with_respect_to,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_model_options(
app_label,
self.name_lower,
{self.option_name: self.order_with_respect_to},
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if (
from_model._meta.order_with_respect_to
and not to_model._meta.order_with_respect_to
):
schema_editor.remove_field(
from_model, from_model._meta.get_field("_order")
)
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif (
to_model._meta.order_with_respect_to
and not from_model._meta.order_with_respect_to
):
field = to_model._meta.get_field("_order")
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label):
return self.references_model(model_name, app_label) and (
self.order_with_respect_to is None or name == self.order_with_respect_to
)
def describe(self):
return "Set order_with_respect_to on %s to %s" % (
self.name,
self.order_with_respect_to,
)
@property
def migration_name_fragment(self):
return "alter_%s_order_with_respect_to" % self.name_lower
class AlterModelOptions(ModelOptionOperation):
"""
Set new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"base_manager_name",
"default_manager_name",
"default_related_name",
"get_latest_by",
"managed",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, name, options):
self.options = options
super().__init__(name)
def deconstruct(self):
kwargs = {
"name": self.name,
"options": self.options,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_model_options(
app_label,
self.name_lower,
self.options,
self.ALTER_OPTION_KEYS,
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change Meta options on %s" % self.name
@property
def migration_name_fragment(self):
return "alter_%s_options" % self.name_lower
class AlterModelManagers(ModelOptionOperation):
"""Alter the model's managers."""
serialization_expand_args = ["managers"]
def __init__(self, name, managers):
self.managers = managers
super().__init__(name)
def deconstruct(self):
return (self.__class__.__qualname__, [self.name, self.managers], {})
def state_forwards(self, app_label, state):
state.alter_model_managers(app_label, self.name_lower, self.managers)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change managers on %s" % self.name
@property
def migration_name_fragment(self):
return "alter_%s_managers" % self.name_lower
class IndexOperation(Operation):
option_name = "indexes"
@cached_property
def model_name_lower(self):
return self.model_name.lower()
class AddIndex(IndexOperation):
"""Add an index on a model."""
def __init__(self, model_name, index):
self.model_name = model_name
if not index.name:
raise ValueError(
"Indexes passed to AddIndex operations require a name "
"argument. %r doesn't have one." % index
)
self.index = index
def state_forwards(self, app_label, state):
state.add_index(app_label, self.model_name_lower, self.index)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_index(model, self.index)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.remove_index(model, self.index)
def deconstruct(self):
kwargs = {
"model_name": self.model_name,
"index": self.index,
}
return (
self.__class__.__qualname__,
[],
kwargs,
)
def describe(self):
if self.index.expressions:
return "Create index %s on %s on model %s" % (
self.index.name,
", ".join([str(expression) for expression in self.index.expressions]),
self.model_name,
)
return "Create index %s on field(s) %s of model %s" % (
self.index.name,
", ".join(self.index.fields),
self.model_name,
)
@property
def migration_name_fragment(self):
return "%s_%s" % (self.model_name_lower, self.index.name.lower())
class RemoveIndex(IndexOperation):
"""Remove an index from a model."""
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
state.remove_index(app_label, self.model_name_lower, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
from_model_state = from_state.models[app_label, self.model_name_lower]
index = from_model_state.get_index_by_name(self.name)
schema_editor.remove_index(model, index)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
to_model_state = to_state.models[app_label, self.model_name_lower]
index = to_model_state.get_index_by_name(self.name)
schema_editor.add_index(model, index)
def deconstruct(self):
kwargs = {
"model_name": self.model_name,
"name": self.name,
}
return (
self.__class__.__qualname__,
[],
kwargs,
)
def describe(self):
return "Remove index %s from %s" % (self.name, self.model_name)
@property
def migration_name_fragment(self):
return "remove_%s_%s" % (self.model_name_lower, self.name.lower())
class AddConstraint(IndexOperation):
option_name = "constraints"
def __init__(self, model_name, constraint):
self.model_name = model_name
self.constraint = constraint
def state_forwards(self, app_label, state):
state.add_constraint(app_label, self.model_name_lower, self.constraint)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_constraint(model, self.constraint)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.remove_constraint(model, self.constraint)
def deconstruct(self):
return (
self.__class__.__name__,
[],
{
"model_name": self.model_name,
"constraint": self.constraint,
},
)
def describe(self):
return "Create constraint %s on model %s" % (
self.constraint.name,
self.model_name,
)
@property
def migration_name_fragment(self):
return "%s_%s" % (self.model_name_lower, self.constraint.name.lower())
class RemoveConstraint(IndexOperation):
option_name = "constraints"
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
state.remove_constraint(app_label, self.model_name_lower, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
from_model_state = from_state.models[app_label, self.model_name_lower]
constraint = from_model_state.get_constraint_by_name(self.name)
schema_editor.remove_constraint(model, constraint)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
to_model_state = to_state.models[app_label, self.model_name_lower]
constraint = to_model_state.get_constraint_by_name(self.name)
schema_editor.add_constraint(model, constraint)
def deconstruct(self):
return (
self.__class__.__name__,
[],
{
"model_name": self.model_name,
"name": self.name,
},
)
def describe(self):
return "Remove constraint %s from model %s" % (self.name, self.model_name)
@property
def migration_name_fragment(self):
return "remove_%s_%s" % (self.model_name_lower, self.name.lower())
|
bec053773face756814738d066980145d35def0421cc3c656d83609537978a53 | from collections import defaultdict
from django.apps import apps
from django.db import models
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
class ContentTypeManager(models.Manager):
use_in_migrations = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Cache shared by all the get_for_* methods to speed up
# ContentType retrieval.
self._cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
return model._meta
def _get_from_cache(self, opts):
key = (opts.app_label, opts.model_name)
return self._cache[self.db][key]
def get_for_model(self, model, for_concrete_model=True):
"""
Return the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
return self._get_from_cache(opts)
except KeyError:
pass
# The ContentType entry was not found in the cache, therefore we
# proceed to load or create it.
try:
# Start with get() and not get_or_create() in order to use
# the db_for_read (see #20401).
ct = self.get(app_label=opts.app_label, model=opts.model_name)
except self.model.DoesNotExist:
# Not found in the database; we proceed to create it. This time
# use get_or_create to take care of any race conditions.
ct, created = self.get_or_create(
app_label=opts.app_label,
model=opts.model_name,
)
self._add_to_cache(self.db, ct)
return ct
def get_for_models(self, *models, for_concrete_models=True):
"""
Given *models, return a dictionary mapping {model: content_type}.
"""
results = {}
# Models that aren't already in the cache grouped by app labels.
needed_models = defaultdict(set)
# Mapping of opts to the list of models requiring it.
needed_opts = defaultdict(list)
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except KeyError:
needed_models[opts.app_label].add(opts.model_name)
needed_opts[opts].append(model)
else:
results[model] = ct
if needed_opts:
# Lookup required content types from the DB.
condition = Q(
*(
Q(("app_label", app_label), ("model__in", models), _connector=Q.AND)
for app_label, models in needed_models.items()
),
_connector=Q.OR,
)
cts = self.filter(condition)
for ct in cts:
opts_models = needed_opts.pop(ct.model_class()._meta, [])
for model in opts_models:
results[model] = ct
self._add_to_cache(self.db, ct)
# Create content types that weren't in the cache or DB.
for opts, opts_models in needed_opts.items():
ct = self.create(
app_label=opts.app_label,
model=opts.model_name,
)
self._add_to_cache(self.db, ct)
for model in opts_models:
results[model] = ct
return results
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Use the same shared cache as get_for_model
(though ContentTypes are not created on-the-fly by get_by_id).
"""
try:
ct = self._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache.
"""
self._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
# Note it's possible for ContentType objects to be stale; model_class()
# will return None. Hence, there is no reliance on
# model._meta.app_label here, just using the model fields instead.
key = (ct.app_label, ct.model)
self._cache.setdefault(using, {})[key] = ct
self._cache.setdefault(using, {})[ct.id] = ct
class ContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(_("python model class name"), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _("content type")
verbose_name_plural = _("content types")
db_table = "django_content_type"
unique_together = [["app_label", "model"]]
def __str__(self):
return self.app_labeled_name
@property
def name(self):
model = self.model_class()
if not model:
return self.model
return str(model._meta.verbose_name)
@property
def app_labeled_name(self):
model = self.model_class()
if not model:
return self.model
return "%s | %s" % (model._meta.app_label, model._meta.verbose_name)
def model_class(self):
"""Return the model class for this type of content."""
try:
return apps.get_model(self.app_label, self.model)
except LookupError:
return None
def get_object_for_this_type(self, **kwargs):
"""
Return an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._base_manager.using(self._state.db).get(**kwargs)
def get_all_objects_for_this_type(self, **kwargs):
"""
Return all objects of this type for the keyword arguments given.
"""
return self.model_class()._base_manager.using(self._state.db).filter(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
|
478ca70bb1da0bd6648d151d74f88aef46167eb3cc7b5bcec5b7fcf26b3406bc | from django.db import migrations, models
from django.db.migrations import operations
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.serializer import serializer_factory
from django.test import SimpleTestCase
from .models import EmptyManager, UnicodeModel
class OptimizerTests(SimpleTestCase):
"""
Tests the migration autodetector.
"""
def optimize(self, operations, app_label):
"""
Handy shortcut for getting results + number of loops
"""
optimizer = MigrationOptimizer()
return optimizer.optimize(operations, app_label), optimizer._iterations
def serialize(self, value):
return serializer_factory(value).serialize()[0]
def assertOptimizesTo(
self, operations, expected, exact=None, less_than=None, app_label=None
):
result, iterations = self.optimize(operations, app_label or "migrations")
result = [self.serialize(f) for f in result]
expected = [self.serialize(f) for f in expected]
self.assertEqual(expected, result)
if exact is not None and iterations != exact:
raise self.failureException(
"Optimization did not take exactly %s iterations (it took %s)"
% (exact, iterations)
)
if less_than is not None and iterations >= less_than:
raise self.failureException(
"Optimization did not take less than %s iterations (it took %s)"
% (less_than, iterations)
)
def assertDoesNotOptimize(self, operations, **kwargs):
self.assertOptimizesTo(operations, operations, **kwargs)
def test_none_app_label(self):
optimizer = MigrationOptimizer()
with self.assertRaisesMessage(TypeError, "app_label must be a str"):
optimizer.optimize([], None)
def test_single(self):
"""
The optimizer does nothing on a single operation,
and that it does it in just one pass.
"""
self.assertOptimizesTo(
[migrations.DeleteModel("Foo")],
[migrations.DeleteModel("Foo")],
exact=1,
)
def test_create_delete_model(self):
"""
CreateModel and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_rename_model(self):
"""
CreateModel should absorb RenameModels.
"""
managers = [("objects", EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameModel("Foo", "Bar"),
],
[
migrations.CreateModel(
"Bar",
[("name", models.CharField(max_length=255))],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
)
],
)
def test_rename_model_self(self):
"""
RenameModels should absorb themselves.
"""
self.assertOptimizesTo(
[
migrations.RenameModel("Foo", "Baa"),
migrations.RenameModel("Baa", "Bar"),
],
[
migrations.RenameModel("Foo", "Bar"),
],
)
def test_create_alter_model_options(self):
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", fields=[]),
migrations.AlterModelOptions(
name="Foo", options={"verbose_name_plural": "Foozes"}
),
],
[
migrations.CreateModel(
"Foo", fields=[], options={"verbose_name_plural": "Foozes"}
),
],
)
def test_create_alter_model_managers(self):
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", fields=[]),
migrations.AlterModelManagers(
name="Foo",
managers=[
("objects", models.Manager()),
("things", models.Manager()),
],
),
],
[
migrations.CreateModel(
"Foo",
fields=[],
managers=[
("objects", models.Manager()),
("things", models.Manager()),
],
),
],
)
def test_create_model_and_remove_model_options(self):
self.assertOptimizesTo(
[
migrations.CreateModel(
"MyModel",
fields=[],
options={"verbose_name": "My Model"},
),
migrations.AlterModelOptions("MyModel", options={}),
],
[migrations.CreateModel("MyModel", fields=[])],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"MyModel",
fields=[],
options={
"verbose_name": "My Model",
"verbose_name_plural": "My Model plural",
},
),
migrations.AlterModelOptions(
"MyModel",
options={"verbose_name": "My Model"},
),
],
[
migrations.CreateModel(
"MyModel",
fields=[],
options={"verbose_name": "My Model"},
),
],
)
def _test_create_alter_foo_delete_model(self, alter_foo):
"""
CreateModel, AlterModelTable, AlterUniqueTogether/AlterIndexTogether/
AlterOrderWithRespectTo, and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.AlterModelTable("Foo", "woohoo"),
alter_foo,
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_alter_unique_delete_model(self):
self._test_create_alter_foo_delete_model(
migrations.AlterUniqueTogether("Foo", [["a", "b"]])
)
def test_create_alter_index_delete_model(self):
self._test_create_alter_foo_delete_model(
migrations.AlterIndexTogether("Foo", [["a", "b"]])
)
def test_create_alter_owrt_delete_model(self):
self._test_create_alter_foo_delete_model(
migrations.AlterOrderWithRespectTo("Foo", "a")
)
def _test_alter_alter_model(self, alter_foo, alter_bar):
"""
Two AlterUniqueTogether/AlterIndexTogether/AlterOrderWithRespectTo
should collapse into the second.
"""
self.assertOptimizesTo(
[
alter_foo,
alter_bar,
],
[
alter_bar,
],
)
def test_alter_alter_table_model(self):
self._test_alter_alter_model(
migrations.AlterModelTable("Foo", "a"),
migrations.AlterModelTable("Foo", "b"),
)
def test_alter_alter_unique_model(self):
self._test_alter_alter_model(
migrations.AlterUniqueTogether("Foo", [["a", "b"]]),
migrations.AlterUniqueTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_index_model(self):
self._test_alter_alter_model(
migrations.AlterIndexTogether("Foo", [["a", "b"]]),
migrations.AlterIndexTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_owrt_model(self):
self._test_alter_alter_model(
migrations.AlterOrderWithRespectTo("Foo", "a"),
migrations.AlterOrderWithRespectTo("Foo", "b"),
)
def test_optimize_through_create(self):
"""
We should be able to optimize away create/delete through a create or
delete of a different model, but only if the create operation does not
mention the model at all.
"""
# These should work
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Bar"),
migrations.DeleteModel("Foo"),
],
[],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
migrations.DeleteModel("Bar"),
],
[],
)
# Operations should be optimized if the FK references a model from the
# other app.
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]
),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel(
"Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]
),
],
app_label="otherapp",
)
# But it shouldn't work if a FK references a model with the same
# app_label.
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("other", models.ForeignKey("Foo", models.CASCADE))]
),
migrations.DeleteModel("Foo"),
],
)
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]
),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
# This should not work - bases should block it
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("size", models.IntegerField())], bases=("Foo",)
),
migrations.DeleteModel("Foo"),
],
)
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)
),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
# The same operations should be optimized if app_label and none of
# bases belong to that app.
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)
),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel(
"Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)
),
],
app_label="otherapp",
)
# But it shouldn't work if some of bases belongs to the specified app.
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)
),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Book", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Person", [("name", models.CharField(max_length=255))]
),
migrations.AddField(
"book",
"author",
models.ForeignKey("test_app.Person", models.CASCADE),
),
migrations.CreateModel(
"Review",
[("book", models.ForeignKey("test_app.Book", models.CASCADE))],
),
migrations.CreateModel(
"Reviewer", [("name", models.CharField(max_length=255))]
),
migrations.AddField(
"review",
"reviewer",
models.ForeignKey("test_app.Reviewer", models.CASCADE),
),
migrations.RemoveField("book", "author"),
migrations.DeleteModel("Person"),
],
[
migrations.CreateModel(
"Book", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Reviewer", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Review",
[
("book", models.ForeignKey("test_app.Book", models.CASCADE)),
(
"reviewer",
models.ForeignKey("test_app.Reviewer", models.CASCADE),
),
],
),
],
app_label="test_app",
)
def test_create_model_add_field(self):
"""
AddField should optimize into CreateModel.
"""
managers = [("objects", EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AddField("Foo", "age", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_reordering(self):
"""
AddField optimizes into CreateModel if it's a FK to a model that's
between them (and there's no FK in the other direction), by changing
the order of the CreateModel operations.
"""
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField(
"Foo", "link", models.ForeignKey("migrations.Link", models.CASCADE)
),
],
[
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.CreateModel(
"Foo",
[
("name", models.CharField(max_length=255)),
("link", models.ForeignKey("migrations.Link", models.CASCADE)),
],
),
],
)
def test_create_model_reordering_circular_fk(self):
"""
CreateModel reordering behavior doesn't result in an infinite loop if
there are FKs in both directions.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Bar", [("url", models.TextField())]),
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.AddField(
"Bar", "foo_fk", models.ForeignKey("migrations.Foo", models.CASCADE)
),
migrations.AddField(
"Foo", "bar_fk", models.ForeignKey("migrations.Bar", models.CASCADE)
),
],
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar",
[
("url", models.TextField()),
("foo_fk", models.ForeignKey("migrations.Foo", models.CASCADE)),
],
),
migrations.AddField(
"Foo", "bar_fk", models.ForeignKey("migrations.Bar", models.CASCADE)
),
],
)
def test_create_model_no_reordering_for_unrelated_fk(self):
"""
CreateModel order remains unchanged if the later AddField operation
isn't a FK between them.
"""
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField(
"Other",
"link",
models.ForeignKey("migrations.Link", models.CASCADE),
),
],
)
def test_create_model_no_reordering_of_inherited_model(self):
"""
A CreateModel that inherits from another isn't reordered to avoid
moving it earlier than its parent CreateModel operation.
"""
self.assertOptimizesTo(
[
migrations.CreateModel(
"Other", [("foo", models.CharField(max_length=255))]
),
migrations.CreateModel(
"ParentModel", [("bar", models.CharField(max_length=255))]
),
migrations.CreateModel(
"ChildModel",
[("baz", models.CharField(max_length=255))],
bases=("migrations.parentmodel",),
),
migrations.AddField(
"Other",
"fk",
models.ForeignKey("migrations.ChildModel", models.CASCADE),
),
],
[
migrations.CreateModel(
"ParentModel", [("bar", models.CharField(max_length=255))]
),
migrations.CreateModel(
"ChildModel",
[("baz", models.CharField(max_length=255))],
bases=("migrations.parentmodel",),
),
migrations.CreateModel(
"Other",
[
("foo", models.CharField(max_length=255)),
(
"fk",
models.ForeignKey("migrations.ChildModel", models.CASCADE),
),
],
),
],
)
def test_create_model_add_field_not_through_m2m_through(self):
"""
AddField should NOT optimize into CreateModel if it's an M2M using a
through that's created between them.
"""
self.assertDoesNotOptimize(
[
migrations.CreateModel("Employee", []),
migrations.CreateModel("Employer", []),
migrations.CreateModel(
"Employment",
[
(
"employee",
models.ForeignKey("migrations.Employee", models.CASCADE),
),
(
"employment",
models.ForeignKey("migrations.Employer", models.CASCADE),
),
],
),
migrations.AddField(
"Employer",
"employees",
models.ManyToManyField(
"migrations.Employee",
through="migrations.Employment",
),
),
],
)
def test_create_model_alter_field(self):
"""
AlterField should optimize into CreateModel.
"""
managers = [("objects", EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AlterField("Foo", "name", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.IntegerField()),
],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_rename_field(self):
"""
RenameField should optimize into CreateModel.
"""
managers = [("objects", EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("title", models.CharField(max_length=255)),
],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_rename_field(self):
"""
RenameField should optimize into AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.AddField("Foo", "title", models.CharField(max_length=255)),
],
)
def test_alter_field_rename_field(self):
"""
RenameField should optimize to the other side of AlterField,
and into itself.
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
migrations.RenameField("Foo", "title", "nom"),
],
[
migrations.RenameField("Foo", "name", "nom"),
migrations.AlterField("Foo", "nom", models.CharField(max_length=255)),
],
)
def test_swapping_fields_names(self):
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"MyModel",
[
("field_a", models.IntegerField()),
("field_b", models.IntegerField()),
],
),
migrations.RunPython(migrations.RunPython.noop),
migrations.RenameField("MyModel", "field_a", "field_c"),
migrations.RenameField("MyModel", "field_b", "field_a"),
migrations.RenameField("MyModel", "field_c", "field_b"),
],
)
def test_create_model_remove_field(self):
"""
RemoveField should optimize into CreateModel.
"""
managers = [("objects", EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RemoveField("Foo", "age"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_alter_field(self):
"""
AlterField should optimize into AddField.
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AlterField("Foo", "age", models.FloatField(default=2.4)),
],
[
migrations.AddField(
"Foo", name="age", field=models.FloatField(default=2.4)
),
],
)
def test_add_field_delete_field(self):
"""
RemoveField should cancel AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[],
)
def test_alter_field_delete_field(self):
"""
RemoveField should absorb AlterField
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[
migrations.RemoveField("Foo", "age"),
],
)
def _test_create_alter_foo_field(self, alter):
"""
CreateModel, AlterFooTogether/AlterOrderWithRespectTo followed by an
add/alter/rename field should optimize to CreateModel with options.
"""
option_value = getattr(alter, alter.option_name)
options = {alter.option_name: option_value}
# AddField
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
),
alter,
migrations.AddField("Foo", "c", models.IntegerField()),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
],
options=options,
),
],
)
# AlterField
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
),
alter,
migrations.AlterField("Foo", "b", models.CharField(max_length=255)),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.CharField(max_length=255)),
],
options=options,
),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
],
),
alter,
migrations.AlterField("Foo", "c", models.CharField(max_length=255)),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.CharField(max_length=255)),
],
options=options,
),
],
)
# RenameField
if isinstance(option_value, str):
renamed_options = {alter.option_name: "c"}
else:
renamed_options = {
alter.option_name: {
tuple("c" if value == "b" else value for value in item)
for item in option_value
}
}
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
),
alter,
migrations.RenameField("Foo", "b", "c"),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("c", models.IntegerField()),
],
options=renamed_options,
),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
),
alter,
migrations.RenameField("Foo", "b", "x"),
migrations.RenameField("Foo", "x", "c"),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("c", models.IntegerField()),
],
options=renamed_options,
),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
],
),
alter,
migrations.RenameField("Foo", "c", "d"),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("d", models.IntegerField()),
],
options=options,
),
],
)
# RemoveField
if isinstance(option_value, str):
removed_options = None
else:
removed_options = {
alter.option_name: {
tuple(value for value in item if value != "b")
for item in option_value
}
}
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
),
alter,
migrations.RemoveField("Foo", "b"),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
],
options=removed_options,
),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
],
),
alter,
migrations.RemoveField("Foo", "c"),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
options=options,
),
],
)
def test_create_alter_unique_field(self):
self._test_create_alter_foo_field(
migrations.AlterUniqueTogether("Foo", [["a", "b"]])
)
def test_create_alter_index_field(self):
self._test_create_alter_foo_field(
migrations.AlterIndexTogether("Foo", [["a", "b"]])
)
def test_create_alter_owrt_field(self):
self._test_create_alter_foo_field(
migrations.AlterOrderWithRespectTo("Foo", "b")
)
def test_optimize_through_fields(self):
"""
field-level through checking is working. This should manage to collapse
model Foo to nonexistence, and model Bar to a single IntegerField
called "width".
"""
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AddField("Bar", "width", models.IntegerField()),
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RenameField("Bar", "size", "dimensions"),
migrations.RemoveField("Foo", "age"),
migrations.RenameModel("Foo", "Phou"),
migrations.RemoveField("Bar", "dimensions"),
migrations.RenameModel("Phou", "Fou"),
migrations.DeleteModel("Fou"),
],
[
migrations.CreateModel("Bar", [("width", models.IntegerField())]),
],
)
def test_optimize_elidable_operation(self):
elidable_operation = operations.base.Operation()
elidable_operation.elidable = True
self.assertOptimizesTo(
[
elidable_operation,
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
elidable_operation,
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
elidable_operation,
migrations.RenameModel("Foo", "Phou"),
migrations.DeleteModel("Bar"),
elidable_operation,
],
[
migrations.CreateModel(
"Phou", [("name", models.CharField(max_length=255))]
),
],
)
|
03aa0485a5912b5a5da33ea3525ddb451abfe91b3e65dd481c734c609498ad7a | from django.contrib.contenttypes.models import ContentType, ContentTypeManager
from django.db import models
from django.test import TestCase, override_settings
from django.test.utils import isolate_apps
from .models import Author, ConcreteModel, FooWithUrl, ProxyModel
class ContentTypesTests(TestCase):
def setUp(self):
ContentType.objects.clear_cache()
def tearDown(self):
ContentType.objects.clear_cache()
def test_lookup_cache(self):
"""
The content type cache (see ContentTypeManager) works correctly.
Lookups for a particular content type -- by model, ID, or natural key
-- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# A second hit, though, won't hit the DB, nor will a lookup by ID
# or natural key
with self.assertNumQueries(0):
ct = ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_id(ct.id)
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key("contenttypes", "contenttype")
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# The same should happen with a lookup by natural key
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_by_natural_key("contenttypes", "contenttype")
# And a second hit shouldn't hit the DB
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key("contenttypes", "contenttype")
def test_get_for_models_creation(self):
ContentType.objects.all().delete()
with self.assertNumQueries(4):
cts = ContentType.objects.get_for_models(
ContentType, FooWithUrl, ProxyModel, ConcreteModel
)
self.assertEqual(
cts,
{
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
ProxyModel: ContentType.objects.get_for_model(ProxyModel),
ConcreteModel: ContentType.objects.get_for_model(ConcreteModel),
},
)
def test_get_for_models_empty_cache(self):
# Empty cache.
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(
ContentType, FooWithUrl, ProxyModel, ConcreteModel
)
self.assertEqual(
cts,
{
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
ProxyModel: ContentType.objects.get_for_model(ProxyModel),
ConcreteModel: ContentType.objects.get_for_model(ConcreteModel),
},
)
def test_get_for_models_partial_cache(self):
# Partial cache
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(
cts,
{
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
},
)
def test_get_for_models_full_cache(self):
# Full cache
ContentType.objects.get_for_model(ContentType)
ContentType.objects.get_for_model(FooWithUrl)
with self.assertNumQueries(0):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(
cts,
{
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
},
)
@isolate_apps("contenttypes_tests")
def test_get_for_model_create_contenttype(self):
"""
ContentTypeManager.get_for_model() creates the corresponding content
type if it doesn't exist in the database.
"""
class ModelCreatedOnTheFly(models.Model):
name = models.CharField()
ct = ContentType.objects.get_for_model(ModelCreatedOnTheFly)
self.assertEqual(ct.app_label, "contenttypes_tests")
self.assertEqual(ct.model, "modelcreatedonthefly")
self.assertEqual(str(ct), "modelcreatedonthefly")
def test_get_for_concrete_model(self):
"""
Make sure the `for_concrete_model` kwarg correctly works
with concrete, proxy and deferred models
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
self.assertEqual(
concrete_model_ct, ContentType.objects.get_for_model(ProxyModel)
)
self.assertEqual(
concrete_model_ct,
ContentType.objects.get_for_model(ConcreteModel, for_concrete_model=False),
)
proxy_model_ct = ContentType.objects.get_for_model(
ProxyModel, for_concrete_model=False
)
self.assertNotEqual(concrete_model_ct, proxy_model_ct)
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only("pk").get().__class__
DeferredProxyModel = ProxyModel.objects.only("pk").get().__class__
self.assertEqual(
concrete_model_ct, ContentType.objects.get_for_model(DeferredConcreteModel)
)
self.assertEqual(
concrete_model_ct,
ContentType.objects.get_for_model(
DeferredConcreteModel, for_concrete_model=False
),
)
self.assertEqual(
concrete_model_ct, ContentType.objects.get_for_model(DeferredProxyModel)
)
self.assertEqual(
proxy_model_ct,
ContentType.objects.get_for_model(
DeferredProxyModel, for_concrete_model=False
),
)
def test_get_for_concrete_models(self):
"""
Make sure the `for_concrete_models` kwarg correctly works
with concrete, proxy and deferred models.
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel)
self.assertEqual(
cts,
{
ConcreteModel: concrete_model_ct,
ProxyModel: concrete_model_ct,
},
)
proxy_model_ct = ContentType.objects.get_for_model(
ProxyModel, for_concrete_model=False
)
cts = ContentType.objects.get_for_models(
ConcreteModel, ProxyModel, for_concrete_models=False
)
self.assertEqual(
cts,
{
ConcreteModel: concrete_model_ct,
ProxyModel: proxy_model_ct,
},
)
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only("pk").get().__class__
DeferredProxyModel = ProxyModel.objects.only("pk").get().__class__
cts = ContentType.objects.get_for_models(
DeferredConcreteModel, DeferredProxyModel
)
self.assertEqual(
cts,
{
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: concrete_model_ct,
},
)
cts = ContentType.objects.get_for_models(
DeferredConcreteModel, DeferredProxyModel, for_concrete_models=False
)
self.assertEqual(
cts,
{
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: proxy_model_ct,
},
)
def test_cache_not_shared_between_managers(self):
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_model(ContentType)
other_manager = ContentTypeManager()
other_manager.model = ContentType
with self.assertNumQueries(1):
other_manager.get_for_model(ContentType)
with self.assertNumQueries(0):
other_manager.get_for_model(ContentType)
def test_missing_model(self):
"""
Displaying content types in admin (or anywhere) doesn't break on
leftover content type records in the DB for which no model is defined
anymore.
"""
ct = ContentType.objects.create(
app_label="contenttypes",
model="OldModel",
)
self.assertEqual(str(ct), "OldModel")
self.assertIsNone(ct.model_class())
# Stale ContentTypes can be fetched like any other object.
ct_fetched = ContentType.objects.get_for_id(ct.pk)
self.assertIsNone(ct_fetched.model_class())
def test_missing_model_with_existing_model_name(self):
"""
Displaying content types in admin (or anywhere) doesn't break on
leftover content type records in the DB for which no model is defined
anymore, even if a model with the same name exists in another app.
"""
# Create a stale ContentType that matches the name of an existing
# model.
ContentType.objects.create(app_label="contenttypes", model="author")
ContentType.objects.clear_cache()
# get_for_models() should work as expected for existing models.
cts = ContentType.objects.get_for_models(ContentType, Author)
self.assertEqual(
cts,
{
ContentType: ContentType.objects.get_for_model(ContentType),
Author: ContentType.objects.get_for_model(Author),
},
)
def test_str(self):
ct = ContentType.objects.get(app_label="contenttypes_tests", model="site")
self.assertEqual(str(ct), "contenttypes_tests | site")
def test_app_labeled_name(self):
ct = ContentType.objects.get(app_label="contenttypes_tests", model="site")
self.assertEqual(ct.app_labeled_name, "contenttypes_tests | site")
def test_app_labeled_name_unknown_model(self):
ct = ContentType(app_label="contenttypes_tests", model="unknown")
self.assertEqual(ct.app_labeled_name, "unknown")
class TestRouter:
def db_for_read(self, model, **hints):
return "other"
def db_for_write(self, model, **hints):
return "default"
@override_settings(DATABASE_ROUTERS=[TestRouter()])
class ContentTypesMultidbTests(TestCase):
databases = {"default", "other"}
def test_multidb(self):
"""
When using multiple databases, ContentType.objects.get_for_model() uses
db_for_read().
"""
ContentType.objects.clear_cache()
with self.assertNumQueries(0, using="default"), self.assertNumQueries(
1, using="other"
):
ContentType.objects.get_for_model(Author)
|
3751eb51860d941c393d930b34c185716ffe8b4a6371cccea4a38bb219dbc365 | import mimetypes
import unittest
from os import path
from urllib.parse import quote
from django.conf.urls.static import static
from django.core.exceptions import ImproperlyConfigured
from django.http import FileResponse, HttpResponseNotModified
from django.test import SimpleTestCase, override_settings
from django.utils.http import http_date
from django.views.static import was_modified_since
from .. import urls
from ..urls import media_dir
@override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls")
class StaticTests(SimpleTestCase):
"""Tests django views in django/views/static.py"""
prefix = "site_media"
def test_serve(self):
"The static view can serve static media"
media_files = ["file.txt", "file.txt.gz", "%2F.txt"]
for filename in media_files:
response = self.client.get("/%s/%s" % (self.prefix, quote(filename)))
response_content = b"".join(response)
file_path = path.join(media_dir, filename)
with open(file_path, "rb") as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(
len(response_content), int(response.headers["Content-Length"])
)
self.assertEqual(
mimetypes.guess_type(file_path)[1],
response.get("Content-Encoding", None),
)
def test_chunked(self):
"The static view should stream files in chunks to avoid large memory usage"
response = self.client.get("/%s/%s" % (self.prefix, "long-line.txt"))
first_chunk = next(response.streaming_content)
self.assertEqual(len(first_chunk), FileResponse.block_size)
second_chunk = next(response.streaming_content)
response.close()
# strip() to prevent OS line endings from causing differences
self.assertEqual(len(second_chunk.strip()), 1449)
def test_unknown_mime_type(self):
response = self.client.get("/%s/file.unknown" % self.prefix)
self.assertEqual("application/octet-stream", response.headers["Content-Type"])
response.close()
def test_copes_with_empty_path_component(self):
file_name = "file.txt"
response = self.client.get("/%s//%s" % (self.prefix, file_name))
response_content = b"".join(response)
with open(path.join(media_dir, file_name), "rb") as fp:
self.assertEqual(fp.read(), response_content)
def test_is_modified_since(self):
file_name = "file.txt"
response = self.client.get(
"/%s/%s" % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE="Thu, 1 Jan 1970 00:00:00 GMT",
)
response_content = b"".join(response)
with open(path.join(media_dir, file_name), "rb") as fp:
self.assertEqual(fp.read(), response_content)
def test_not_modified_since(self):
file_name = "file.txt"
response = self.client.get(
"/%s/%s" % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE="Mon, 18 Jan 2038 05:14:07 GMT"
# This is 24h before max Unix time. Remember to fix Django and
# update this test well before 2038 :)
)
self.assertIsInstance(response, HttpResponseNotModified)
def test_invalid_if_modified_since(self):
"""Handle bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
2616, section 14.25.
"""
file_name = "file.txt"
invalid_date = "Mon, 28 May 999999999999 28:25:26 GMT"
response = self.client.get(
"/%s/%s" % (self.prefix, file_name), HTTP_IF_MODIFIED_SINCE=invalid_date
)
response_content = b"".join(response)
with open(path.join(media_dir, file_name), "rb") as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(len(response_content), int(response.headers["Content-Length"]))
def test_invalid_if_modified_since2(self):
"""Handle even more bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
2616, section 14.25.
"""
file_name = "file.txt"
invalid_date = ": 1291108438, Wed, 20 Oct 2010 14:05:00 GMT"
response = self.client.get(
"/%s/%s" % (self.prefix, file_name), HTTP_IF_MODIFIED_SINCE=invalid_date
)
response_content = b"".join(response)
with open(path.join(media_dir, file_name), "rb") as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(len(response_content), int(response.headers["Content-Length"]))
def test_404(self):
response = self.client.get("/%s/nonexistent_resource" % self.prefix)
self.assertEqual(404, response.status_code)
def test_index(self):
response = self.client.get("/%s/" % self.prefix)
self.assertContains(response, "Index of ./")
# Directories have a trailing slash.
self.assertIn("subdir/", response.context["file_list"])
def test_index_subdir(self):
response = self.client.get("/%s/subdir/" % self.prefix)
self.assertContains(response, "Index of subdir/")
# File with a leading dot (e.g. .hidden) aren't displayed.
self.assertEqual(response.context["file_list"], ["visible"])
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"loaders": [
(
"django.template.loaders.locmem.Loader",
{
"static/directory_index.html": "Test index",
},
),
],
},
}
]
)
def test_index_custom_template(self):
response = self.client.get("/%s/" % self.prefix)
self.assertEqual(response.content, b"Test index")
class StaticHelperTest(StaticTests):
"""
Test case to make sure the static URL pattern helper works as expected
"""
def setUp(self):
super().setUp()
self._old_views_urlpatterns = urls.urlpatterns[:]
urls.urlpatterns += static("media/", document_root=media_dir)
def tearDown(self):
super().tearDown()
urls.urlpatterns = self._old_views_urlpatterns
def test_prefix(self):
self.assertEqual(static("test")[0].pattern.regex.pattern, "^test(?P<path>.*)$")
@override_settings(DEBUG=False)
def test_debug_off(self):
"""No URLs are served if DEBUG=False."""
self.assertEqual(static("test"), [])
def test_empty_prefix(self):
with self.assertRaisesMessage(
ImproperlyConfigured, "Empty static prefix not permitted"
):
static("")
def test_special_prefix(self):
"""No URLs are served if prefix contains a netloc part."""
self.assertEqual(static("http://example.org"), [])
self.assertEqual(static("//example.org"), [])
class StaticUtilsTests(unittest.TestCase):
def test_was_modified_since_fp(self):
"""
A floating point mtime does not disturb was_modified_since (#18675).
"""
mtime = 1343416141.107817
header = http_date(mtime)
self.assertFalse(was_modified_since(header, mtime))
def test_was_modified_since_empty_string(self):
self.assertTrue(was_modified_since(header="", mtime=1))
|
2b5ab2ab557b01d346210fbc2ea5ab15b6bda2004faf07ef1f9a23a1794dc8bc | #!/usr/bin/env python
import argparse
import atexit
import copy
import gc
import multiprocessing
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import warnings
from functools import partial
from pathlib import Path
try:
import django
except ImportError as e:
raise RuntimeError(
"Django module not found, reference tests/README.rst for instructions."
) from e
else:
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, connections
from django.test import TestCase, TransactionTestCase
from django.test.runner import _init_worker, get_max_test_processes, parallel_type
from django.test.selenium import SeleniumTestCaseBase
from django.test.utils import NullTimeKeeper, TimeKeeper, get_runner
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.log import DEFAULT_LOGGING
try:
import MySQLdb
except ImportError:
pass
else:
# Ignore informational warnings from QuerySet.explain().
warnings.filterwarnings("ignore", r"\(1003, *", category=MySQLdb.Warning)
# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter("error", RemovedInDjango50Warning)
# Make resource and runtime warning errors to ensure no usage of error prone
# patterns.
warnings.simplefilter("error", ResourceWarning)
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings(
"ignore", "'U' mode is deprecated", DeprecationWarning, module="docutils.io"
)
# Reduce garbage collection frequency to improve performance. Since CPython
# uses refcounting, garbage collection only collects objects with cyclic
# references, which are a minority, so the garbage collection threshold can be
# larger than the default threshold of 700 allocations + deallocations without
# much increase in memory usage.
gc.set_threshold(100_000)
RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, "templates")
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix="django_")
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ["TMPDIR"] = TMPDIR
# Removing the temporary TMPDIR.
atexit.register(shutil.rmtree, TMPDIR)
# This is a dict mapping RUNTESTS_DIR subdirectory to subdirectories of that
# directory to skip when searching for test modules.
SUBDIRS_TO_SKIP = {
"": {"import_error_package", "test_runner_apps"},
"gis_tests": {"data"},
}
ALWAYS_INSTALLED_APPS = [
"django.contrib.contenttypes",
"django.contrib.auth",
"django.contrib.sites",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.admin.apps.SimpleAdminConfig",
"django.contrib.staticfiles",
]
ALWAYS_MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and isn't in an application in INSTALLED_APPS."
CONTRIB_TESTS_TO_APPS = {
"deprecation": ["django.contrib.flatpages", "django.contrib.redirects"],
"flatpages_tests": ["django.contrib.flatpages"],
"redirects_tests": ["django.contrib.redirects"],
}
def get_test_modules(gis_enabled):
"""
Scan the tests directory and yield the names of all test modules.
The yielded names have either one dotted part like "test_runner" or, in
the case of GIS tests, two dotted parts like "gis_tests.gdal_tests".
"""
discovery_dirs = [""]
if gis_enabled:
# GIS tests are in nested apps
discovery_dirs.append("gis_tests")
else:
SUBDIRS_TO_SKIP[""].add("gis_tests")
for dirname in discovery_dirs:
dirpath = os.path.join(RUNTESTS_DIR, dirname)
subdirs_to_skip = SUBDIRS_TO_SKIP[dirname]
with os.scandir(dirpath) as entries:
for f in entries:
if (
"." in f.name
or os.path.basename(f.name) in subdirs_to_skip
or f.is_file()
or not os.path.exists(os.path.join(f.path, "__init__.py"))
):
continue
test_module = f.name
if dirname:
test_module = dirname + "." + test_module
yield test_module
def get_label_module(label):
"""Return the top-level module part for a test label."""
path = Path(label)
if len(path.parts) == 1:
# Interpret the label as a dotted module name.
return label.split(".")[0]
# Otherwise, interpret the label as a path. Check existence first to
# provide a better error message than relative_to() if it doesn't exist.
if not path.exists():
raise RuntimeError(f"Test label path {label} does not exist")
path = path.resolve()
rel_path = path.relative_to(RUNTESTS_DIR)
return rel_path.parts[0]
def get_filtered_test_modules(start_at, start_after, gis_enabled, test_labels=None):
if test_labels is None:
test_labels = []
# Reduce each test label to just the top-level module part.
label_modules = set()
for label in test_labels:
test_module = get_label_module(label)
label_modules.add(test_module)
# It would be nice to put this validation earlier but it must come after
# django.setup() so that connection.features.gis_enabled can be accessed.
if "gis_tests" in label_modules and not gis_enabled:
print("Aborting: A GIS database backend is required to run gis_tests.")
sys.exit(1)
def _module_match_label(module_name, label):
# Exact or ancestor match.
return module_name == label or module_name.startswith(label + ".")
start_label = start_at or start_after
for test_module in get_test_modules(gis_enabled):
if start_label:
if not _module_match_label(test_module, start_label):
continue
start_label = ""
if not start_at:
assert start_after
# Skip the current one before starting.
continue
# If the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), include the test module.
if not test_labels or any(
_module_match_label(test_module, label_module)
for label_module in label_modules
):
yield test_module
def setup_collect_tests(start_at, start_after, test_labels=None):
state = {
"INSTALLED_APPS": settings.INSTALLED_APPS,
"ROOT_URLCONF": getattr(settings, "ROOT_URLCONF", ""),
"TEMPLATES": settings.TEMPLATES,
"LANGUAGE_CODE": settings.LANGUAGE_CODE,
"STATIC_URL": settings.STATIC_URL,
"STATIC_ROOT": settings.STATIC_ROOT,
"MIDDLEWARE": settings.MIDDLEWARE,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = "urls"
settings.STATIC_URL = "static/"
settings.STATIC_ROOT = os.path.join(TMPDIR, "static")
settings.TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [TEMPLATE_DIR],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
]
settings.LANGUAGE_CODE = "en"
settings.SITE_ID = 1
settings.MIDDLEWARE = ALWAYS_MIDDLEWARE
settings.MIGRATION_MODULES = {
# This lets us skip creating migrations for the test models as many of
# them depend on one of the following contrib applications.
"auth": None,
"contenttypes": None,
"sessions": None,
}
log_config = copy.deepcopy(DEFAULT_LOGGING)
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config["loggers"]["django"]["level"] = "ERROR"
settings.LOGGING = log_config
settings.SILENCED_SYSTEM_CHECKS = [
"fields.W342", # ForeignKey(unique=True) -> OneToOneField
]
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# This flag must be evaluated after django.setup() because otherwise it can
# raise AppRegistryNotReady when running gis_tests in isolation on some
# backends (e.g. PostGIS).
gis_enabled = connection.features.gis_enabled
test_modules = list(
get_filtered_test_modules(
start_at,
start_after,
gis_enabled,
test_labels=test_labels,
)
)
return test_modules, state
def teardown_collect_tests(state):
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
# This function should be called only after calling django.setup(),
# since it calls connection.features.gis_enabled.
def get_apps_to_install(test_modules):
for test_module in test_modules:
if test_module in CONTRIB_TESTS_TO_APPS:
yield from CONTRIB_TESTS_TO_APPS[test_module]
yield test_module
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
if connection.features.gis_enabled:
yield "django.contrib.gis"
def setup_run_tests(verbosity, start_at, start_after, test_labels=None):
test_modules, state = setup_collect_tests(
start_at, start_after, test_labels=test_labels
)
installed_apps = set(get_installed())
for app in get_apps_to_install(test_modules):
if app in installed_apps:
continue
if verbosity >= 2:
print(f"Importing application {app}")
settings.INSTALLED_APPS.append(app)
installed_apps.add(app)
apps.set_installed_apps(settings.INSTALLED_APPS)
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception(
"Please define available_apps in TransactionTestCase and its subclasses."
)
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
# Set an environment variable that other code may consult to see if
# Django's own test suite is running.
os.environ["RUNNING_DJANGOS_TEST_SUITE"] = "true"
test_labels = test_labels or test_modules
return test_labels, state
def teardown_run_tests(state):
teardown_collect_tests(state)
# Discard the multiprocessing.util finalizer that tries to remove a
# temporary directory that's already removed by this script's
# atexit.register(shutil.rmtree, TMPDIR) handler. Prevents
# FileNotFoundError at the end of a test run (#27890).
from multiprocessing.util import _finalizer_registry
_finalizer_registry.pop((-100, 0), None)
del os.environ["RUNNING_DJANGOS_TEST_SUITE"]
class ActionSelenium(argparse.Action):
"""
Validate the comma-separated list of requested browsers.
"""
def __call__(self, parser, namespace, values, option_string=None):
try:
import selenium # NOQA
except ImportError as e:
raise ImproperlyConfigured(f"Error loading selenium module: {e}")
browsers = values.split(",")
for browser in browsers:
try:
SeleniumTestCaseBase.import_webdriver(browser)
except ImportError:
raise argparse.ArgumentError(
self, "Selenium browser specification '%s' is not valid." % browser
)
setattr(namespace, self.dest, browsers)
def django_tests(
verbosity,
interactive,
failfast,
keepdb,
reverse,
test_labels,
debug_sql,
parallel,
tags,
exclude_tags,
test_name_patterns,
start_at,
start_after,
pdb,
buffer,
timing,
shuffle,
):
if parallel in {0, "auto"}:
max_parallel = get_max_test_processes()
else:
max_parallel = parallel
if verbosity >= 1:
msg = "Testing against Django installed in '%s'" % os.path.dirname(
django.__file__
)
if max_parallel > 1:
msg += " with up to %d processes" % max_parallel
print(msg)
process_setup_args = (verbosity, start_at, start_after, test_labels)
test_labels, state = setup_run_tests(*process_setup_args)
# Run the test suite, including the extra validation tests.
if not hasattr(settings, "TEST_RUNNER"):
settings.TEST_RUNNER = "django.test.runner.DiscoverRunner"
if parallel in {0, "auto"}:
# This doesn't work before django.setup() on some databases.
if all(conn.features.can_clone_databases for conn in connections.all()):
parallel = max_parallel
else:
parallel = 1
TestRunner = get_runner(settings)
TestRunner.parallel_test_suite.init_worker = partial(
_init_worker,
process_setup=setup_run_tests,
process_setup_args=process_setup_args,
)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
parallel=parallel,
tags=tags,
exclude_tags=exclude_tags,
test_name_patterns=test_name_patterns,
pdb=pdb,
buffer=buffer,
timing=timing,
shuffle=shuffle,
)
failures = test_runner.run_tests(test_labels)
teardown_run_tests(state)
return failures
def collect_test_modules(start_at, start_after):
test_modules, state = setup_collect_tests(start_at, start_after)
teardown_collect_tests(state)
return test_modules
def get_subprocess_args(options):
subprocess_args = [sys.executable, __file__, "--settings=%s" % options.settings]
if options.failfast:
subprocess_args.append("--failfast")
if options.verbosity:
subprocess_args.append("--verbosity=%s" % options.verbosity)
if not options.interactive:
subprocess_args.append("--noinput")
if options.tags:
subprocess_args.append("--tag=%s" % options.tags)
if options.exclude_tags:
subprocess_args.append("--exclude_tag=%s" % options.exclude_tags)
if options.shuffle is not False:
if options.shuffle is None:
subprocess_args.append("--shuffle")
else:
subprocess_args.append("--shuffle=%s" % options.shuffle)
return subprocess_args
def bisect_tests(bisection_label, options, test_labels, start_at, start_after):
if not test_labels:
test_labels = collect_test_modules(start_at, start_after)
print("***** Bisecting test suite: %s" % " ".join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, "model_inheritance_same_model_name"]:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print("***** Pass %da: Running the first half of the test suite" % iteration)
print("***** Test labels: %s" % " ".join(test_labels_a))
failures_a = subprocess.run(subprocess_args + test_labels_a)
print("***** Pass %db: Running the second half of the test suite" % iteration)
print("***** Test labels: %s" % " ".join(test_labels_b))
print("")
failures_b = subprocess.run(subprocess_args + test_labels_b)
if failures_a.returncode and not failures_b.returncode:
print("***** Problem found in first half. Bisecting again...")
iteration += 1
test_labels = test_labels_a[:-1]
elif failures_b.returncode and not failures_a.returncode:
print("***** Problem found in second half. Bisecting again...")
iteration += 1
test_labels = test_labels_b[:-1]
elif failures_a.returncode and failures_b.returncode:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
def paired_tests(paired_test, options, test_labels, start_at, start_after):
if not test_labels:
test_labels = collect_test_modules(start_at, start_after)
print("***** Trying paired execution")
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, "model_inheritance_same_model_name"]:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
for i, label in enumerate(test_labels):
print(
"***** %d of %d: Check test pairing with %s"
% (i + 1, len(test_labels), label)
)
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print("***** Found problem pair with %s" % label)
return
print("***** No problem pair found")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Django test suite.")
parser.add_argument(
"modules",
nargs="*",
metavar="module",
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".',
)
parser.add_argument(
"-v",
"--verbosity",
default=1,
type=int,
choices=[0, 1, 2, 3],
help="Verbosity level; 0=minimal output, 1=normal output, 2=all output",
)
parser.add_argument(
"--noinput",
action="store_false",
dest="interactive",
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_argument(
"--failfast",
action="store_true",
help="Tells Django to stop running the test suite after first failed test.",
)
parser.add_argument(
"--keepdb",
action="store_true",
help="Tells Django to preserve the test database between runs.",
)
parser.add_argument(
"--settings",
help='Python path to settings module, e.g. "myproject.settings". If '
"this isn't provided, either the DJANGO_SETTINGS_MODULE "
'environment variable or "test_sqlite" will be used.',
)
parser.add_argument(
"--bisect",
help="Bisect the test suite to discover a test that causes a test "
"failure when combined with the named test.",
)
parser.add_argument(
"--pair",
help="Run the test suite in pairs with the named test to find problem pairs.",
)
parser.add_argument(
"--shuffle",
nargs="?",
default=False,
type=int,
metavar="SEED",
help=(
"Shuffle the order of test cases to help check that tests are "
"properly isolated."
),
)
parser.add_argument(
"--reverse",
action="store_true",
help="Sort test suites and test cases in opposite order to debug "
"test side effects not apparent with normal execution lineup.",
)
parser.add_argument(
"--selenium",
action=ActionSelenium,
metavar="BROWSERS",
help="A comma-separated list of browsers to run the Selenium tests against.",
)
parser.add_argument(
"--headless",
action="store_true",
help="Run selenium tests in headless mode, if the browser supports the option.",
)
parser.add_argument(
"--selenium-hub",
help="A URL for a selenium hub instance to use in combination with --selenium.",
)
parser.add_argument(
"--external-host",
default=socket.gethostname(),
help=(
"The external host that can be reached by the selenium hub instance when "
"running Selenium tests via Selenium Hub."
),
)
parser.add_argument(
"--debug-sql",
action="store_true",
help="Turn on the SQL query logger within tests.",
)
# 0 is converted to "auto" or 1 later on, depending on a method used by
# multiprocessing to start subprocesses and on the backend support for
# cloning databases.
parser.add_argument(
"--parallel",
nargs="?",
const="auto",
default=0,
type=parallel_type,
metavar="N",
help=(
'Run tests using up to N parallel processes. Use the value "auto" '
"to run one test process for each processor core."
),
)
parser.add_argument(
"--tag",
dest="tags",
action="append",
help="Run only tests with the specified tags. Can be used multiple times.",
)
parser.add_argument(
"--exclude-tag",
dest="exclude_tags",
action="append",
help="Do not run tests with the specified tag. Can be used multiple times.",
)
parser.add_argument(
"--start-after",
dest="start_after",
help="Run tests starting after the specified top-level module.",
)
parser.add_argument(
"--start-at",
dest="start_at",
help="Run tests starting at the specified top-level module.",
)
parser.add_argument(
"--pdb", action="store_true", help="Runs the PDB debugger on error or failure."
)
parser.add_argument(
"-b",
"--buffer",
action="store_true",
help="Discard output of passing tests.",
)
parser.add_argument(
"--timing",
action="store_true",
help="Output timings, including database set up and total run time.",
)
parser.add_argument(
"-k",
dest="test_name_patterns",
action="append",
help=(
"Only run test methods and classes matching test name pattern. "
"Same as unittest -k option. Can be used multiple times."
),
)
options = parser.parse_args()
using_selenium_hub = options.selenium and options.selenium_hub
if options.selenium_hub and not options.selenium:
parser.error(
"--selenium-hub and --external-host require --selenium to be used."
)
if using_selenium_hub and not options.external_host:
parser.error("--selenium-hub and --external-host must be used together.")
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
mutually_exclusive_options = [
options.start_at,
options.start_after,
options.modules,
]
enabled_module_options = [
bool(option) for option in mutually_exclusive_options
].count(True)
if enabled_module_options > 1:
print(
"Aborting: --start-at, --start-after, and test labels are mutually "
"exclusive."
)
sys.exit(1)
for opt_name in ["start_at", "start_after"]:
opt_val = getattr(options, opt_name)
if opt_val:
if "." in opt_val:
print(
"Aborting: --%s must be a top-level module."
% opt_name.replace("_", "-")
)
sys.exit(1)
setattr(options, opt_name, os.path.normpath(opt_val))
if options.settings:
os.environ["DJANGO_SETTINGS_MODULE"] = options.settings
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_sqlite")
options.settings = os.environ["DJANGO_SETTINGS_MODULE"]
if options.selenium:
if multiprocessing.get_start_method() == "spawn" and options.parallel != 1:
parser.error(
"You cannot use --selenium with parallel tests on this system. "
"Pass --parallel=1 to use --selenium."
)
if not options.tags:
options.tags = ["selenium"]
elif "selenium" not in options.tags:
options.tags.append("selenium")
if options.selenium_hub:
SeleniumTestCaseBase.selenium_hub = options.selenium_hub
SeleniumTestCaseBase.external_host = options.external_host
SeleniumTestCaseBase.headless = options.headless
SeleniumTestCaseBase.browsers = options.selenium
if options.bisect:
bisect_tests(
options.bisect,
options,
options.modules,
options.start_at,
options.start_after,
)
elif options.pair:
paired_tests(
options.pair,
options,
options.modules,
options.start_at,
options.start_after,
)
else:
time_keeper = TimeKeeper() if options.timing else NullTimeKeeper()
with time_keeper.timed("Total run"):
failures = django_tests(
options.verbosity,
options.interactive,
options.failfast,
options.keepdb,
options.reverse,
options.modules,
options.debug_sql,
options.parallel,
options.tags,
options.exclude_tags,
getattr(options, "test_name_patterns", None),
options.start_at,
options.start_after,
options.pdb,
options.buffer,
options.timing,
options.shuffle,
)
time_keeper.print_results()
if failures:
sys.exit(1)
|
a362b906e4b2f5a9bf84d57df1b6164fd72d2f88c0cd770595c512ed6840fc8c | import argparse
import ctypes
import faulthandler
import io
import itertools
import logging
import multiprocessing
import os
import pickle
import random
import sys
import textwrap
import unittest
import warnings
from collections import defaultdict
from contextlib import contextmanager
from importlib import import_module
from io import StringIO
from django.core.management import call_command
from django.db import connections
from django.test import SimpleTestCase, TestCase
from django.test.utils import (
NullTimeKeeper,
TimeKeeper,
captured_stdout,
iter_test_cases,
)
from django.test.utils import setup_databases as _setup_databases
from django.test.utils import setup_test_environment
from django.test.utils import teardown_databases as _teardown_databases
from django.test.utils import teardown_test_environment
from django.utils.crypto import new_hash
from django.utils.datastructures import OrderedSet
from django.utils.deprecation import RemovedInDjango50Warning
try:
import ipdb as pdb
except ImportError:
import pdb
try:
import tblib.pickling_support
except ImportError:
tblib = None
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger("django.db.backends")
self.logger.setLevel(logging.DEBUG)
self.debug_sql_stream = None
super().__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super().startTest(test)
def stopTest(self, test):
super().stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super().addError(test, err)
if self.debug_sql_stream is None:
# Error before tests e.g. in setUpTestData().
sql = ""
else:
self.debug_sql_stream.seek(0)
sql = self.debug_sql_stream.read()
self.errors[-1] = self.errors[-1] + (sql,)
def addFailure(self, test, err):
super().addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def addSubTest(self, test, subtest, err):
super().addSubTest(test, subtest, err)
if err is not None:
self.debug_sql_stream.seek(0)
errors = (
self.failures
if issubclass(err[0], test.failureException)
else self.errors
)
errors[-1] = errors[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
self.stream.writeln(self.separator2)
self.stream.writeln(sql_debug)
class PDBDebugResult(unittest.TextTestResult):
"""
Custom result class that triggers a PDB session when an error or failure
occurs.
"""
def addError(self, test, err):
super().addError(test, err)
self.debug(err)
def addFailure(self, test, err):
super().addFailure(test, err)
self.debug(err)
def addSubTest(self, test, subtest, err):
if err is not None:
self.debug(err)
super().addSubTest(test, subtest, err)
def debug(self, error):
self._restoreStdout()
self.buffer = False
exc_type, exc_value, traceback = error
print("\nOpening PDB: %r" % exc_value)
pdb.post_mortem(traceback)
class DummyList:
"""
Dummy list class for faking storage of results in unittest.TestResult.
"""
__slots__ = ()
def append(self, item):
pass
class RemoteTestResult(unittest.TestResult):
"""
Extend unittest.TestResult to record events in the child processes so they
can be replayed in the parent process. Events include things like which
tests succeeded or failed.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Fake storage of results to reduce memory usage. These are used by the
# unittest default methods, but here 'events' is used instead.
dummy_list = DummyList()
self.failures = dummy_list
self.errors = dummy_list
self.skipped = dummy_list
self.expectedFailures = dummy_list
self.unexpectedSuccesses = dummy_list
if tblib is not None:
tblib.pickling_support.install()
self.events = []
def __getstate__(self):
# Make this class picklable by removing the file-like buffer
# attributes. This is possible since they aren't used after unpickling
# after being sent to ParallelTestSuite.
state = self.__dict__.copy()
state.pop("_stdout_buffer", None)
state.pop("_stderr_buffer", None)
state.pop("_original_stdout", None)
state.pop("_original_stderr", None)
return state
@property
def test_index(self):
return self.testsRun - 1
def _confirm_picklable(self, obj):
"""
Confirm that obj can be pickled and unpickled as multiprocessing will
need to pickle the exception in the child process and unpickle it in
the parent process. Let the exception rise, if not.
"""
pickle.loads(pickle.dumps(obj))
def _print_unpicklable_subtest(self, test, subtest, pickle_exc):
print(
"""
Subtest failed:
test: {}
subtest: {}
Unfortunately, the subtest that failed cannot be pickled, so the parallel
test runner cannot handle it cleanly. Here is the pickling error:
> {}
You should re-run this test with --parallel=1 to reproduce the failure
with a cleaner failure message.
""".format(
test, subtest, pickle_exc
)
)
def check_picklable(self, test, err):
# Ensure that sys.exc_info() tuples are picklable. This displays a
# clear multiprocessing.pool.RemoteTraceback generated in the child
# process instead of a multiprocessing.pool.MaybeEncodingError, making
# the root cause easier to figure out for users who aren't familiar
# with the multiprocessing module. Since we're in a forked process,
# our best chance to communicate with them is to print to stdout.
try:
self._confirm_picklable(err)
except Exception as exc:
original_exc_txt = repr(err[1])
original_exc_txt = textwrap.fill(
original_exc_txt, 75, initial_indent=" ", subsequent_indent=" "
)
pickle_exc_txt = repr(exc)
pickle_exc_txt = textwrap.fill(
pickle_exc_txt, 75, initial_indent=" ", subsequent_indent=" "
)
if tblib is None:
print(
"""
{} failed:
{}
Unfortunately, tracebacks cannot be pickled, making it impossible for the
parallel test runner to handle this exception cleanly.
In order to see the traceback, you should install tblib:
python -m pip install tblib
""".format(
test, original_exc_txt
)
)
else:
print(
"""
{} failed:
{}
Unfortunately, the exception it raised cannot be pickled, making it impossible
for the parallel test runner to handle it cleanly.
Here's the error encountered while trying to pickle the exception:
{}
You should re-run this test with the --parallel=1 option to reproduce the
failure and get a correct traceback.
""".format(
test, original_exc_txt, pickle_exc_txt
)
)
raise
def check_subtest_picklable(self, test, subtest):
try:
self._confirm_picklable(subtest)
except Exception as exc:
self._print_unpicklable_subtest(test, subtest, exc)
raise
def startTestRun(self):
super().startTestRun()
self.events.append(("startTestRun",))
def stopTestRun(self):
super().stopTestRun()
self.events.append(("stopTestRun",))
def startTest(self, test):
super().startTest(test)
self.events.append(("startTest", self.test_index))
def stopTest(self, test):
super().stopTest(test)
self.events.append(("stopTest", self.test_index))
def addError(self, test, err):
self.check_picklable(test, err)
self.events.append(("addError", self.test_index, err))
super().addError(test, err)
def addFailure(self, test, err):
self.check_picklable(test, err)
self.events.append(("addFailure", self.test_index, err))
super().addFailure(test, err)
def addSubTest(self, test, subtest, err):
# Follow Python's implementation of unittest.TestResult.addSubTest() by
# not doing anything when a subtest is successful.
if err is not None:
# Call check_picklable() before check_subtest_picklable() since
# check_picklable() performs the tblib check.
self.check_picklable(test, err)
self.check_subtest_picklable(test, subtest)
self.events.append(("addSubTest", self.test_index, subtest, err))
super().addSubTest(test, subtest, err)
def addSuccess(self, test):
self.events.append(("addSuccess", self.test_index))
super().addSuccess(test)
def addSkip(self, test, reason):
self.events.append(("addSkip", self.test_index, reason))
super().addSkip(test, reason)
def addExpectedFailure(self, test, err):
# If tblib isn't installed, pickling the traceback will always fail.
# However we don't want tblib to be required for running the tests
# when they pass or fail as expected. Drop the traceback when an
# expected failure occurs.
if tblib is None:
err = err[0], err[1], None
self.check_picklable(test, err)
self.events.append(("addExpectedFailure", self.test_index, err))
super().addExpectedFailure(test, err)
def addUnexpectedSuccess(self, test):
self.events.append(("addUnexpectedSuccess", self.test_index))
super().addUnexpectedSuccess(test)
def wasSuccessful(self):
"""Tells whether or not this result was a success."""
failure_types = {"addError", "addFailure", "addSubTest", "addUnexpectedSuccess"}
return all(e[0] not in failure_types for e in self.events)
def _exc_info_to_string(self, err, test):
# Make this method no-op. It only powers the default unittest behavior
# for recording errors, but this class pickles errors into 'events'
# instead.
return ""
class RemoteTestRunner:
"""
Run tests and record everything but don't display anything.
The implementation matches the unpythonic coding style of unittest2.
"""
resultclass = RemoteTestResult
def __init__(self, failfast=False, resultclass=None, buffer=False):
self.failfast = failfast
self.buffer = buffer
if resultclass is not None:
self.resultclass = resultclass
def run(self, test):
result = self.resultclass()
unittest.registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
test(result)
return result
def get_max_test_processes():
"""
The maximum number of test processes when using the --parallel option.
"""
# The current implementation of the parallel test runner requires
# multiprocessing to start subprocesses with fork() or spawn().
if multiprocessing.get_start_method() not in {"fork", "spawn"}:
return 1
try:
return int(os.environ["DJANGO_TEST_PROCESSES"])
except KeyError:
return multiprocessing.cpu_count()
def parallel_type(value):
"""Parse value passed to the --parallel option."""
if value == "auto":
return value
try:
return int(value)
except ValueError:
raise argparse.ArgumentTypeError(
f"{value!r} is not an integer or the string 'auto'"
)
_worker_id = 0
def _init_worker(
counter,
initial_settings=None,
serialized_contents=None,
process_setup=None,
process_setup_args=None,
):
"""
Switch to databases dedicated to this worker.
This helper lives at module-level because of the multiprocessing module's
requirements.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
start_method = multiprocessing.get_start_method()
if start_method == "spawn":
process_setup(*process_setup_args)
setup_test_environment()
for alias in connections:
connection = connections[alias]
if start_method == "spawn":
# Restore initial settings in spawned processes.
connection.settings_dict.update(initial_settings[alias])
if value := serialized_contents.get(alias):
connection._test_serialized_contents = value
connection.creation.setup_worker_connection(_worker_id)
with captured_stdout():
call_command("check", databases=connections)
def _run_subsuite(args):
"""
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult.
This helper lives at module-level and its arguments are wrapped in a tuple
because of the multiprocessing module's requirements.
"""
runner_class, subsuite_index, subsuite, failfast, buffer = args
runner = runner_class(failfast=failfast, buffer=buffer)
result = runner.run(subsuite)
return subsuite_index, result.events
class ParallelTestSuite(unittest.TestSuite):
"""
Run a series of tests in parallel in several processes.
While the unittest module's documentation implies that orchestrating the
execution of tests is the responsibility of the test runner, in practice,
it appears that TestRunner classes are more concerned with formatting and
displaying test results.
Since there are fewer use cases for customizing TestSuite than TestRunner,
implementing parallelization at the level of the TestSuite improves
interoperability with existing custom test runners. A single instance of a
test runner can still collect results from all tests without being aware
that they have been run in parallel.
"""
# In case someone wants to modify these in a subclass.
init_worker = _init_worker
run_subsuite = _run_subsuite
runner_class = RemoteTestRunner
def __init__(self, subsuites, processes, failfast=False, buffer=False):
self.subsuites = subsuites
self.processes = processes
self.failfast = failfast
self.buffer = buffer
self.initial_settings = None
self.serialized_contents = None
super().__init__()
def run(self, result):
"""
Distribute test cases across workers.
Return an identifier of each test case with its result in order to use
imap_unordered to show results as soon as they're available.
To minimize pickling errors when getting results from workers:
- pass back numeric indexes in self.subsuites instead of tests
- make tracebacks picklable with tblib, if available
Even with tblib, errors may still occur for dynamically created
exception classes which cannot be unpickled.
"""
self.initialize_suite()
counter = multiprocessing.Value(ctypes.c_int, 0)
pool = multiprocessing.Pool(
processes=self.processes,
initializer=self.init_worker,
initargs=[
counter,
self.initial_settings,
self.serialized_contents,
],
)
args = [
(self.runner_class, index, subsuite, self.failfast, self.buffer)
for index, subsuite in enumerate(self.subsuites)
]
test_results = pool.imap_unordered(self.run_subsuite.__func__, args)
while True:
if result.shouldStop:
pool.terminate()
break
try:
subsuite_index, events = test_results.next(timeout=0.1)
except multiprocessing.TimeoutError:
continue
except StopIteration:
pool.close()
break
tests = list(self.subsuites[subsuite_index])
for event in events:
event_name = event[0]
handler = getattr(result, event_name, None)
if handler is None:
continue
test = tests[event[1]]
args = event[2:]
handler(test, *args)
pool.join()
return result
def __iter__(self):
return iter(self.subsuites)
def initialize_suite(self):
if multiprocessing.get_start_method() == "spawn":
self.initial_settings = {
alias: connections[alias].settings_dict for alias in connections
}
self.serialized_contents = {
alias: connections[alias]._test_serialized_contents
for alias in connections
if alias in self.serialized_aliases
}
class Shuffler:
"""
This class implements shuffling with a special consistency property.
Consistency means that, for a given seed and key function, if two sets of
items are shuffled, the resulting order will agree on the intersection of
the two sets. For example, if items are removed from an original set, the
shuffled order for the new set will be the shuffled order of the original
set restricted to the smaller set.
"""
# This doesn't need to be cryptographically strong, so use what's fastest.
hash_algorithm = "md5"
@classmethod
def _hash_text(cls, text):
h = new_hash(cls.hash_algorithm, usedforsecurity=False)
h.update(text.encode("utf-8"))
return h.hexdigest()
def __init__(self, seed=None):
if seed is None:
# Limit seeds to 10 digits for simpler output.
seed = random.randint(0, 10**10 - 1)
seed_source = "generated"
else:
seed_source = "given"
self.seed = seed
self.seed_source = seed_source
@property
def seed_display(self):
return f"{self.seed!r} ({self.seed_source})"
def _hash_item(self, item, key):
text = "{}{}".format(self.seed, key(item))
return self._hash_text(text)
def shuffle(self, items, key):
"""
Return a new list of the items in a shuffled order.
The `key` is a function that accepts an item in `items` and returns
a string unique for that item that can be viewed as a string id. The
order of the return value is deterministic. It depends on the seed
and key function but not on the original order.
"""
hashes = {}
for item in items:
hashed = self._hash_item(item, key)
if hashed in hashes:
msg = "item {!r} has same hash {!r} as item {!r}".format(
item,
hashed,
hashes[hashed],
)
raise RuntimeError(msg)
hashes[hashed] = item
return [hashes[hashed] for hashed in sorted(hashes)]
class DiscoverRunner:
"""A Django test runner that uses unittest2 test discovery."""
test_suite = unittest.TestSuite
parallel_test_suite = ParallelTestSuite
test_runner = unittest.TextTestRunner
test_loader = unittest.defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(
self,
pattern=None,
top_level=None,
verbosity=1,
interactive=True,
failfast=False,
keepdb=False,
reverse=False,
debug_mode=False,
debug_sql=False,
parallel=0,
tags=None,
exclude_tags=None,
test_name_patterns=None,
pdb=False,
buffer=False,
enable_faulthandler=True,
timing=False,
shuffle=False,
logger=None,
**kwargs,
):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_mode = debug_mode
self.debug_sql = debug_sql
self.parallel = parallel
self.tags = set(tags or [])
self.exclude_tags = set(exclude_tags or [])
if not faulthandler.is_enabled() and enable_faulthandler:
try:
faulthandler.enable(file=sys.stderr.fileno())
except (AttributeError, io.UnsupportedOperation):
faulthandler.enable(file=sys.__stderr__.fileno())
self.pdb = pdb
if self.pdb and self.parallel > 1:
raise ValueError(
"You cannot use --pdb with parallel tests; pass --parallel=1 to use it."
)
self.buffer = buffer
self.test_name_patterns = None
self.time_keeper = TimeKeeper() if timing else NullTimeKeeper()
if test_name_patterns:
# unittest does not export the _convert_select_pattern function
# that converts command-line arguments to patterns.
self.test_name_patterns = {
pattern if "*" in pattern else "*%s*" % pattern
for pattern in test_name_patterns
}
self.shuffle = shuffle
self._shuffler = None
self.logger = logger
@classmethod
def add_arguments(cls, parser):
parser.add_argument(
"-t",
"--top-level-directory",
dest="top_level",
help="Top level of project for unittest discovery.",
)
parser.add_argument(
"-p",
"--pattern",
default="test*.py",
help="The test matching pattern. Defaults to test*.py.",
)
parser.add_argument(
"--keepdb", action="store_true", help="Preserves the test DB between runs."
)
parser.add_argument(
"--shuffle",
nargs="?",
default=False,
type=int,
metavar="SEED",
help="Shuffles test case order.",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
help="Reverses test case order.",
)
parser.add_argument(
"--debug-mode",
action="store_true",
help="Sets settings.DEBUG to True.",
)
parser.add_argument(
"-d",
"--debug-sql",
action="store_true",
help="Prints logged SQL queries on failure.",
)
parser.add_argument(
"--parallel",
nargs="?",
const="auto",
default=0,
type=parallel_type,
metavar="N",
help=(
"Run tests using up to N parallel processes. Use the value "
'"auto" to run one test process for each processor core.'
),
)
parser.add_argument(
"--tag",
action="append",
dest="tags",
help="Run only tests with the specified tag. Can be used multiple times.",
)
parser.add_argument(
"--exclude-tag",
action="append",
dest="exclude_tags",
help="Do not run tests with the specified tag. Can be used multiple times.",
)
parser.add_argument(
"--pdb",
action="store_true",
help="Runs a debugger (pdb, or ipdb if installed) on error or failure.",
)
parser.add_argument(
"-b",
"--buffer",
action="store_true",
help="Discard output from passing tests.",
)
parser.add_argument(
"--no-faulthandler",
action="store_false",
dest="enable_faulthandler",
help="Disables the Python faulthandler module during tests.",
)
parser.add_argument(
"--timing",
action="store_true",
help=("Output timings, including database set up and total run time."),
)
parser.add_argument(
"-k",
action="append",
dest="test_name_patterns",
help=(
"Only run test methods and classes that match the pattern "
"or substring. Can be used multiple times. Same as "
"unittest -k option."
),
)
@property
def shuffle_seed(self):
if self._shuffler is None:
return None
return self._shuffler.seed
def log(self, msg, level=None):
"""
Log the message at the given logging level (the default is INFO).
If a logger isn't set, the message is instead printed to the console,
respecting the configured verbosity. A verbosity of 0 prints no output,
a verbosity of 1 prints INFO and above, and a verbosity of 2 or higher
prints all levels.
"""
if level is None:
level = logging.INFO
if self.logger is None:
if self.verbosity <= 0 or (self.verbosity == 1 and level < logging.INFO):
return
print(msg)
else:
self.logger.log(level, msg)
def setup_test_environment(self, **kwargs):
setup_test_environment(debug=self.debug_mode)
unittest.installHandler()
def setup_shuffler(self):
if self.shuffle is False:
return
shuffler = Shuffler(seed=self.shuffle)
self.log(f"Using shuffle seed: {shuffler.seed_display}")
self._shuffler = shuffler
@contextmanager
def load_with_patterns(self):
original_test_name_patterns = self.test_loader.testNamePatterns
self.test_loader.testNamePatterns = self.test_name_patterns
try:
yield
finally:
# Restore the original patterns.
self.test_loader.testNamePatterns = original_test_name_patterns
def load_tests_for_label(self, label, discover_kwargs):
label_as_path = os.path.abspath(label)
tests = None
# If a module, or "module.ClassName[.method_name]", just run those.
if not os.path.exists(label_as_path):
with self.load_with_patterns():
tests = self.test_loader.loadTestsFromName(label)
if tests.countTestCases():
return tests
# Try discovery if "label" is a package or directory.
is_importable, is_package = try_importing(label)
if is_importable:
if not is_package:
return tests
elif not os.path.isdir(label_as_path):
if os.path.exists(label_as_path):
assert tests is None
raise RuntimeError(
f"One of the test labels is a path to a file: {label!r}, "
f"which is not supported. Use a dotted module name or "
f"path to a directory instead."
)
return tests
kwargs = discover_kwargs.copy()
if os.path.isdir(label_as_path) and not self.top_level:
kwargs["top_level_dir"] = find_top_level(label_as_path)
with self.load_with_patterns():
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this run,
# to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
return tests
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
if extra_tests is not None:
warnings.warn(
"The extra_tests argument is deprecated.",
RemovedInDjango50Warning,
stacklevel=2,
)
test_labels = test_labels or ["."]
extra_tests = extra_tests or []
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs["pattern"] = self.pattern
if self.top_level is not None:
discover_kwargs["top_level_dir"] = self.top_level
self.setup_shuffler()
all_tests = []
for label in test_labels:
tests = self.load_tests_for_label(label, discover_kwargs)
all_tests.extend(iter_test_cases(tests))
all_tests.extend(iter_test_cases(extra_tests))
if self.tags or self.exclude_tags:
if self.tags:
self.log(
"Including test tag(s): %s." % ", ".join(sorted(self.tags)),
level=logging.DEBUG,
)
if self.exclude_tags:
self.log(
"Excluding test tag(s): %s." % ", ".join(sorted(self.exclude_tags)),
level=logging.DEBUG,
)
all_tests = filter_tests_by_tags(all_tests, self.tags, self.exclude_tags)
# Put the failures detected at load time first for quicker feedback.
# _FailedTest objects include things like test modules that couldn't be
# found or that couldn't be loaded due to syntax errors.
test_types = (unittest.loader._FailedTest, *self.reorder_by)
all_tests = list(
reorder_tests(
all_tests,
test_types,
shuffler=self._shuffler,
reverse=self.reverse,
)
)
self.log("Found %d test(s)." % len(all_tests))
suite = self.test_suite(all_tests)
if self.parallel > 1:
subsuites = partition_suite_by_case(suite)
# Since tests are distributed across processes on a per-TestCase
# basis, there's no need for more processes than TestCases.
processes = min(self.parallel, len(subsuites))
# Update also "parallel" because it's used to determine the number
# of test databases.
self.parallel = processes
if processes > 1:
suite = self.parallel_test_suite(
subsuites,
processes,
self.failfast,
self.buffer,
)
return suite
def setup_databases(self, **kwargs):
return _setup_databases(
self.verbosity,
self.interactive,
time_keeper=self.time_keeper,
keepdb=self.keepdb,
debug_sql=self.debug_sql,
parallel=self.parallel,
**kwargs,
)
def get_resultclass(self):
if self.debug_sql:
return DebugSQLTextTestResult
elif self.pdb:
return PDBDebugResult
def get_test_runner_kwargs(self):
return {
"failfast": self.failfast,
"resultclass": self.get_resultclass(),
"verbosity": self.verbosity,
"buffer": self.buffer,
}
def run_checks(self, databases):
# Checks are run after database creation since some checks require
# database access.
call_command("check", verbosity=self.verbosity, databases=databases)
def run_suite(self, suite, **kwargs):
kwargs = self.get_test_runner_kwargs()
runner = self.test_runner(**kwargs)
try:
return runner.run(suite)
finally:
if self._shuffler is not None:
seed_display = self._shuffler.seed_display
self.log(f"Used shuffle seed: {seed_display}")
def teardown_databases(self, old_config, **kwargs):
"""Destroy all the non-mirror databases."""
_teardown_databases(
old_config,
verbosity=self.verbosity,
parallel=self.parallel,
keepdb=self.keepdb,
)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return (
len(result.failures) + len(result.errors) + len(result.unexpectedSuccesses)
)
def _get_databases(self, suite):
databases = {}
for test in iter_test_cases(suite):
test_databases = getattr(test, "databases", None)
if test_databases == "__all__":
test_databases = connections
if test_databases:
serialized_rollback = getattr(test, "serialized_rollback", False)
databases.update(
(alias, serialized_rollback or databases.get(alias, False))
for alias in test_databases
)
return databases
def get_databases(self, suite):
databases = self._get_databases(suite)
unused_databases = [alias for alias in connections if alias not in databases]
if unused_databases:
self.log(
"Skipping setup of unused database(s): %s."
% ", ".join(sorted(unused_databases)),
level=logging.DEBUG,
)
return databases
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
Return the number of tests that failed.
"""
if extra_tests is not None:
warnings.warn(
"The extra_tests argument is deprecated.",
RemovedInDjango50Warning,
stacklevel=2,
)
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
databases = self.get_databases(suite)
suite.serialized_aliases = set(
alias for alias, serialize in databases.items() if serialize
)
with self.time_keeper.timed("Total database setup"):
old_config = self.setup_databases(
aliases=databases,
serialized_aliases=suite.serialized_aliases,
)
run_failed = False
try:
self.run_checks(databases)
result = self.run_suite(suite)
except Exception:
run_failed = True
raise
finally:
try:
with self.time_keeper.timed("Total database teardown"):
self.teardown_databases(old_config)
self.teardown_test_environment()
except Exception:
# Silence teardown exceptions if an exception was raised during
# runs to avoid shadowing it.
if not run_failed:
raise
self.time_keeper.print_results()
return self.suite_result(suite, result)
def try_importing(label):
"""
Try importing a test label, and return (is_importable, is_package).
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
return (False, False)
return (True, hasattr(mod, "__path__"))
def find_top_level(top_level):
# Try to be a bit smarter than unittest about finding the default top-level
# for a given directory path, to avoid breaking relative imports.
# (Unittest's default is to set top-level equal to the path, which means
# relative imports will result in "Attempted relative import in
# non-package.").
# We'd be happy to skip this and require dotted module paths (which don't
# cause this problem) instead of file paths (which do), but in the case of
# a directory in the cwd, which would be equally valid if considered as a
# top-level module or as a directory path, unittest unfortunately prefers
# the latter.
while True:
init_py = os.path.join(top_level, "__init__.py")
if not os.path.exists(init_py):
break
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
return top_level
def _class_shuffle_key(cls):
return f"{cls.__module__}.{cls.__qualname__}"
def shuffle_tests(tests, shuffler):
"""
Return an iterator over the given tests in a shuffled order, keeping tests
next to other tests of their class.
`tests` should be an iterable of tests.
"""
tests_by_type = {}
for _, class_tests in itertools.groupby(tests, type):
class_tests = list(class_tests)
test_type = type(class_tests[0])
class_tests = shuffler.shuffle(class_tests, key=lambda test: test.id())
tests_by_type[test_type] = class_tests
classes = shuffler.shuffle(tests_by_type, key=_class_shuffle_key)
return itertools.chain(*(tests_by_type[cls] for cls in classes))
def reorder_test_bin(tests, shuffler=None, reverse=False):
"""
Return an iterator that reorders the given tests, keeping tests next to
other tests of their class.
`tests` should be an iterable of tests that supports reversed().
"""
if shuffler is None:
if reverse:
return reversed(tests)
# The function must return an iterator.
return iter(tests)
tests = shuffle_tests(tests, shuffler)
if not reverse:
return tests
# Arguments to reversed() must be reversible.
return reversed(list(tests))
def reorder_tests(tests, classes, reverse=False, shuffler=None):
"""
Reorder an iterable of tests, grouping by the given TestCase classes.
This function also removes any duplicates and reorders so that tests of the
same type are consecutive.
The result is returned as an iterator. `classes` is a sequence of types.
Tests that are instances of `classes[0]` are grouped first, followed by
instances of `classes[1]`, etc. Tests that are not instances of any of the
classes are grouped last.
If `reverse` is True, the tests within each `classes` group are reversed,
but without reversing the order of `classes` itself.
The `shuffler` argument is an optional instance of this module's `Shuffler`
class. If provided, tests will be shuffled within each `classes` group, but
keeping tests with other tests of their TestCase class. Reversing is
applied after shuffling to allow reversing the same random order.
"""
# Each bin maps TestCase class to OrderedSet of tests. This permits tests
# to be grouped by TestCase class even if provided non-consecutively.
bins = [defaultdict(OrderedSet) for i in range(len(classes) + 1)]
*class_bins, last_bin = bins
for test in tests:
for test_bin, test_class in zip(class_bins, classes):
if isinstance(test, test_class):
break
else:
test_bin = last_bin
test_bin[type(test)].add(test)
for test_bin in bins:
# Call list() since reorder_test_bin()'s input must support reversed().
tests = list(itertools.chain.from_iterable(test_bin.values()))
yield from reorder_test_bin(tests, shuffler=shuffler, reverse=reverse)
def partition_suite_by_case(suite):
"""Partition a test suite by test case, preserving the order of tests."""
suite_class = type(suite)
all_tests = iter_test_cases(suite)
return [suite_class(tests) for _, tests in itertools.groupby(all_tests, type)]
def test_match_tags(test, tags, exclude_tags):
if isinstance(test, unittest.loader._FailedTest):
# Tests that couldn't load always match to prevent tests from falsely
# passing due e.g. to syntax errors.
return True
test_tags = set(getattr(test, "tags", []))
test_fn_name = getattr(test, "_testMethodName", str(test))
if hasattr(test, test_fn_name):
test_fn = getattr(test, test_fn_name)
test_fn_tags = list(getattr(test_fn, "tags", []))
test_tags = test_tags.union(test_fn_tags)
if tags and test_tags.isdisjoint(tags):
return False
return test_tags.isdisjoint(exclude_tags)
def filter_tests_by_tags(tests, tags, exclude_tags):
"""Return the matching tests as an iterator."""
return (test for test in tests if test_match_tags(test, tags, exclude_tags))
|
76663a2c7dac40c05ab18c7b58112fb611d709e2ee70767268a8963934c9c686 | import asyncio
import difflib
import json
import logging
import posixpath
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy, deepcopy
from difflib import get_close_matches
from functools import wraps
from unittest.suite import _DebugResult
from unittest.util import safe_repr
from urllib.parse import (
parse_qsl,
unquote,
urlencode,
urljoin,
urlparse,
urlsplit,
urlunparse,
)
from urllib.request import url2pathname
from asgiref.sync import async_to_sync
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler
from django.core.signals import setting_changed
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.test.client import AsyncClient, Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import template_rendered
from django.test.utils import (
CaptureQueriesContext,
ContextList,
compare_xml,
modify_settings,
override_settings,
)
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import classproperty
from django.utils.version import PY310
from django.views.static import serve
__all__ = (
"TestCase",
"TransactionTestCase",
"SimpleTestCase",
"skipIfDBFeature",
"skipUnlessDBFeature",
)
def to_list(value):
"""Put value into a list if it's not already one."""
if not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = "%s\n%s" % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super().__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed,
self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s"
% (
executed,
self.num,
"\n".join(
"%d. %s" % (i, query["sql"])
for i, query in enumerate(self.captured_queries, start=1)
),
),
)
class _AssertTemplateUsedContext:
def __init__(self, test_case, template_name, msg_prefix="", count=None):
self.test_case = test_case
self.template_name = template_name
self.msg_prefix = msg_prefix
self.count = count
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
self.test_case._assert_template_used(
self.template_name,
self.rendered_template_names,
self.msg_prefix,
self.count,
)
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
self.test()
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
self.test_case.assertFalse(
self.template_name in self.rendered_template_names,
f"{self.msg_prefix}Template '{self.template_name}' was used "
f"unexpectedly in rendering the response",
)
class DatabaseOperationForbidden(AssertionError):
pass
class _DatabaseFailure:
def __init__(self, wrapped, message):
self.wrapped = wrapped
self.message = message
def __call__(self):
raise DatabaseOperationForbidden(self.message)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
async_client_class = AsyncClient
_overridden_settings = None
_modified_settings = None
databases = set()
_disallowed_database_msg = (
"Database %(operation)s to %(alias)r are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to ensure "
"proper test isolation or add %(alias)r to %(test)s.databases to silence "
"this failure."
)
_disallowed_connection_methods = [
("connect", "connections"),
("temporary_connection", "connections"),
("cursor", "queries"),
("chunked_cursor", "queries"),
]
@classmethod
def setUpClass(cls):
super().setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
cls.addClassCleanup(cls._cls_overridden_context.disable)
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
cls.addClassCleanup(cls._cls_modified_context.disable)
cls._add_databases_failures()
cls.addClassCleanup(cls._remove_databases_failures)
@classmethod
def _validate_databases(cls):
if cls.databases == "__all__":
return frozenset(connections)
for alias in cls.databases:
if alias not in connections:
message = (
"%s.%s.databases refers to %r which is not defined in "
"settings.DATABASES."
% (
cls.__module__,
cls.__qualname__,
alias,
)
)
close_matches = get_close_matches(alias, list(connections))
if close_matches:
message += " Did you mean %r?" % close_matches[0]
raise ImproperlyConfigured(message)
return frozenset(cls.databases)
@classmethod
def _add_databases_failures(cls):
cls.databases = cls._validate_databases()
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, operation in cls._disallowed_connection_methods:
message = cls._disallowed_database_msg % {
"test": "%s.%s" % (cls.__module__, cls.__qualname__),
"alias": alias,
"operation": operation,
}
method = getattr(connection, name)
setattr(connection, name, _DatabaseFailure(method, message))
@classmethod
def _remove_databases_failures(cls):
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, _ in cls._disallowed_connection_methods:
method = getattr(connection, name)
setattr(connection, name, method.wrapped)
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
self._setup_and_call(result)
def debug(self):
"""Perform the same as __call__(), without catching the exception."""
debug_result = _DebugResult()
self._setup_and_call(debug_result, debug=True)
def _setup_and_call(self, result, debug=False):
"""
Perform the following in order: pre-setup, run test, post-teardown,
skipping pre/post hooks if test is set to be skipped.
If debug=True, reraise any errors in setup and use super().debug()
instead of __call__() to run the test.
"""
testMethod = getattr(self, self._testMethodName)
skipped = getattr(self.__class__, "__unittest_skip__", False) or getattr(
testMethod, "__unittest_skip__", False
)
# Convert async test methods.
if asyncio.iscoroutinefunction(testMethod):
setattr(self, self._testMethodName, async_to_sync(testMethod))
if not skipped:
try:
self._pre_setup()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
if debug:
super().debug()
else:
super().__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""
Perform pre-test setup:
* Create a test client.
* Clear the mail test outbox.
"""
self.client = self.client_class()
self.async_client = self.async_client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the
original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(
self,
response,
expected_url,
status_code=302,
target_status_code=200,
msg_prefix="",
fetch_redirect_response=True,
):
"""
Assert that a response redirected to a specific URL and that the
redirect URL can be loaded.
Won't work for external links since it uses the test client to do a
request (use fetch_redirect_response=False to check such links without
fetching them).
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, "redirect_chain"):
# The request was a followed redirect
self.assertTrue(
response.redirect_chain,
msg_prefix
+ (
"Response didn't redirect as expected: Response code was %d "
"(expected %d)"
)
% (response.status_code, status_code),
)
self.assertEqual(
response.redirect_chain[0][1],
status_code,
msg_prefix
+ (
"Initial response didn't redirect as expected: Response code was "
"%d (expected %d)"
)
% (response.redirect_chain[0][1], status_code),
)
url, status_code = response.redirect_chain[-1]
self.assertEqual(
response.status_code,
target_status_code,
msg_prefix
+ (
"Response didn't redirect as expected: Final Response code was %d "
"(expected %d)"
)
% (response.status_code, target_status_code),
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code,
status_code,
msg_prefix
+ (
"Response didn't redirect as expected: Response code was %d "
"(expected %d)"
)
% (response.status_code, status_code),
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith("/"):
url = urljoin(response.request["PATH_INFO"], url)
path = urljoin(response.request["PATH_INFO"], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use "
"assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
# Get the redirection page, using the same client that was used
# to obtain the original response.
extra = response.client.extra or {}
redirect_response = response.client.get(
path,
QueryDict(query),
secure=(scheme == "https"),
**extra,
)
self.assertEqual(
redirect_response.status_code,
target_status_code,
msg_prefix
+ (
"Couldn't retrieve redirection page '%s': response code was %d "
"(expected %d)"
)
% (path, redirect_response.status_code, target_status_code),
)
self.assertURLEqual(
url,
expected_url,
msg_prefix
+ "Response redirected to '%s', expected '%s'" % (url, expected_url),
)
def assertURLEqual(self, url1, url2, msg_prefix=""):
"""
Assert that two URLs are the same, ignoring the order of query string
parameters except for parameters with the same name.
For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but
/path/?a=1&a=2 isn't equal to /path/?a=2&a=1.
"""
def normalize(url):
"""Sort the URL's query string parameters."""
url = str(url) # Coerce reverse_lazy() URLs.
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse(
(scheme, netloc, path, params, urlencode(query_parts), fragment)
)
self.assertEqual(
normalize(url1),
normalize(url2),
msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2),
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (
hasattr(response, "render")
and callable(response.render)
and not response.is_rendered
):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code,
status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code),
)
if response.streaming:
content = b"".join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = str(text)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(
self, content, None, "Response's content is not valid HTML:"
)
text = assert_and_parse_html(
self, text, None, "Second argument is not valid HTML:"
)
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(
self, response, text, count=None, status_code=200, msg_prefix="", html=False
):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html
)
if count is not None:
self.assertEqual(
real_count,
count,
msg_prefix
+ "Found %d instances of %s in response (expected %d)"
% (real_count, text_repr, count),
)
else:
self.assertTrue(
real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr
)
def assertNotContains(
self, response, text, status_code=200, msg_prefix="", html=False
):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` doesn't occur in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html
)
self.assertEqual(
real_count, 0, msg_prefix + "Response should not contain %s" % text_repr
)
def _check_test_client_response(self, response, attribute, method_name):
"""
Raise a ValueError if the given response doesn't have the required
attribute.
"""
if not hasattr(response, attribute):
raise ValueError(
f"{method_name}() is only usable on responses fetched using "
"the Django test Client."
)
def _assert_form_error(self, form, field, errors, msg_prefix, form_repr):
if not form.is_bound:
self.fail(
f"{msg_prefix}The {form_repr} is not bound, it will never have any "
f"errors."
)
if field is not None and field not in form.fields:
self.fail(
f"{msg_prefix}The {form_repr} does not contain the field {field!r}."
)
if field is None:
field_errors = form.non_field_errors()
failure_message = f"The non-field errors of {form_repr} don't match."
else:
field_errors = form.errors.get(field, [])
failure_message = (
f"The errors of field {field!r} on {form_repr} don't match."
)
self.assertEqual(field_errors, errors, msg_prefix + failure_message)
def assertFormError(self, response, form, field, errors, msg_prefix=""):
"""
Assert that a form used to render the response has a specific field
error.
"""
self._check_test_client_response(response, "context", "assertFormError")
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = [] if response.context is None else to_list(response.context)
if not contexts:
self.fail(
msg_prefix + "Response did not use any contexts to render the response"
)
if errors is None:
warnings.warn(
"Passing errors=None to assertFormError() is deprecated, use "
"errors=[] instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
errors = []
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form in context:
found_form = True
self._assert_form_error(
context[form], field, errors, msg_prefix, "form %r" % context[form]
)
if not found_form:
self.fail(
msg_prefix + "The form '%s' was not used to render the response" % form
)
def assertFormsetError(
self, response, formset, form_index, field, errors, msg_prefix=""
):
"""
Assert that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
self._check_test_client_response(response, "context", "assertFormsetError")
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = [] if response.context is None else to_list(response.context)
if not contexts:
self.fail(
msg_prefix + "Response did not use any contexts to "
"render the response"
)
if errors is None:
warnings.warn(
"Passing errors=None to assertFormsetError() is deprecated, "
"use errors=[] instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
errors = []
if form_index is None and field is not None:
raise ValueError("You must use field=None with form_index=None.")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context or not hasattr(context[formset], "forms"):
continue
formset_repr = repr(context[formset])
if not context[formset].is_bound:
self.fail(
f"{msg_prefix}The formset {formset_repr} is not bound, it will "
f"never have any errors."
)
found_formset = True
if form_index is not None:
form_count = context[formset].total_form_count()
if form_index >= form_count:
form_or_forms = "forms" if form_count > 1 else "form"
self.fail(
f"{msg_prefix}The formset {formset_repr} only has "
f"{form_count} {form_or_forms}."
)
if form_index is not None:
form_repr = f"form {form_index} of formset {formset_repr}"
self._assert_form_error(
context[formset].forms[form_index],
field,
errors,
msg_prefix,
form_repr,
)
else:
failure_message = (
f"{msg_prefix}The non-form errors of formset {formset_repr} don't "
f"match."
)
self.assertEqual(
context[formset].non_form_errors(), errors, failure_message
)
if not found_formset:
self.fail(
msg_prefix
+ "The formset '%s' was not used to render the response" % formset
)
def _get_template_used(self, response, template_name, msg_prefix, method_name):
if response is None and template_name is None:
raise TypeError("response and/or template_name argument must be provided")
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None:
self._check_test_client_response(response, "templates", method_name)
if not hasattr(response, "templates") or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def _assert_template_used(self, template_name, template_names, msg_prefix, count):
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ", ".join(template_names)),
)
if count is not None:
self.assertEqual(
template_names.count(template_name),
count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name)),
)
def assertTemplateUsed(
self, response=None, template_name=None, msg_prefix="", count=None
):
"""
Assert that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._get_template_used(
response,
template_name,
msg_prefix,
"assertTemplateUsed",
)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(
self, context_mgr_template, msg_prefix, count
)
self._assert_template_used(template_name, template_names, msg_prefix, count)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=""):
"""
Assert that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._get_template_used(
response,
template_name,
msg_prefix,
"assertTemplateNotUsed",
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template, msg_prefix)
self.assertFalse(
template_name in template_names,
msg_prefix
+ "Template '%s' was used unexpectedly in rendering the response"
% template_name,
)
@contextmanager
def _assert_raises_or_warns_cm(
self, func, cm_attr, expected_exception, expected_message
):
with func(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(getattr(cm, cm_attr)))
def _assertFooMessage(
self, func, cm_attr, expected_exception, expected_message, *args, **kwargs
):
callable_obj = None
if args:
callable_obj, *args = args
cm = self._assert_raises_or_warns_cm(
func, cm_attr, expected_exception, expected_message
)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertRaisesMessage(
self, expected_exception, expected_message, *args, **kwargs
):
"""
Assert that expected_message is found in the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
return self._assertFooMessage(
self.assertRaises,
"exception",
expected_exception,
expected_message,
*args,
**kwargs,
)
def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs):
"""
Same as assertRaisesMessage but for assertWarns() instead of
assertRaises().
"""
return self._assertFooMessage(
self.assertWarns,
"warning",
expected_warning,
expected_message,
*args,
**kwargs,
)
# A similar method is available in Python 3.10+.
if not PY310:
@contextmanager
def assertNoLogs(self, logger, level=None):
"""
Assert no messages are logged on the logger, with at least the
given level.
"""
if isinstance(level, int):
level = logging.getLevelName(level)
elif level is None:
level = "INFO"
try:
with self.assertLogs(logger, level) as cm:
yield
except AssertionError as e:
msg = e.args[0]
expected_msg = (
f"no logs of level {level} or higher triggered on {logger}"
)
if msg != expected_msg:
raise e
else:
self.fail(f"Unexpected logs found: {cm.output!r}")
def assertFieldOutput(
self,
fieldclass,
valid,
invalid,
field_args=None,
field_kwargs=None,
empty_value="",
):
"""
Assert that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **{**field_kwargs, "required": False})
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [required.error_messages["required"]]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({"min_length": 2, "max_length": 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Assert that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(
self, html1, msg, "First argument is not valid HTML:"
)
dom2 = assert_and_parse_html(
self, html2, msg, "Second argument is not valid HTML:"
)
if dom1 != dom2:
standardMsg = "%s != %s" % (safe_repr(dom1, True), safe_repr(dom2, True))
diff = "\n" + "\n".join(
difflib.ndiff(
str(dom1).splitlines(),
str(dom2).splitlines(),
)
)
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Assert that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(
self, html1, msg, "First argument is not valid HTML:"
)
dom2 = assert_and_parse_html(
self, html2, msg, "Second argument is not valid HTML:"
)
if dom1 == dom2:
standardMsg = "%s == %s" % (safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=""):
needle = assert_and_parse_html(
self, needle, None, "First argument is not valid HTML:"
)
haystack = assert_and_parse_html(
self, haystack, None, "Second argument is not valid HTML:"
)
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count,
count,
msg_prefix
+ "Found %d instances of '%s' in response (expected %d)"
% (real_count, needle, count),
)
else:
self.assertTrue(
real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle
)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except json.JSONDecodeError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are semantically the same.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = "First or second argument is not valid XML\n%s" % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = "%s != %s" % (
safe_repr(xml1, True),
safe_repr(xml2, True),
)
diff = "\n" + "\n".join(
difflib.ndiff(xml1.splitlines(), xml2.splitlines())
)
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = "First or second argument is not valid XML\n%s" % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = "%s == %s" % (
safe_repr(xml1, True),
safe_repr(xml2, True),
)
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
databases = {DEFAULT_DB_ALIAS}
_disallowed_database_msg = (
"Database %(operation)s to %(alias)r are not allowed in this test. "
"Add %(alias)r to %(test)s.databases to ensure proper test isolation "
"and silence this failure."
)
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
def _pre_setup(self):
"""
Perform pre-test setup:
* If the class has an 'available_apps' attribute, restrict the app
registry to these applications, then fire the post_migrate signal --
it must run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, install those fixtures.
"""
super()._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=settings.INSTALLED_APPS,
enter=False,
)
raise
# Clear the queries_log so that it's less likely to overflow (a single
# test probably won't execute 9K queries). If queries_log overflows,
# then assertNumQueries() doesn't work.
for db_name in self._databases_names(include_mirrors=False):
connections[db_name].queries_log.clear()
@classmethod
def _databases_names(cls, include_mirrors=True):
# Only consider allowed database aliases, including mirrors or not.
return [
alias
for alias in connections
if alias in cls.databases
and (
include_mirrors
or not connections[alias].settings_dict["TEST"]["MIRROR"]
)
]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list()
)
if sql_list:
with transaction.atomic(using=db_name):
with conn.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# Provide replica initial data from migrated apps, if needed.
if self.serialized_rollback and hasattr(
connections[db_name], "_test_serialized_contents"
):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command(
"loaddata", *self.fixtures, **{"verbosity": 0, "database": db_name}
)
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""
Perform post-test things:
* Flush the contents of the database to leave a clean slate. If the
class has an 'available_apps' attribute, don't fire post_migrate.
* Force-close the connection so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super()._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all(initialized_only=True):
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=settings.INSTALLED_APPS,
enter=False,
)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None
or ( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback
and hasattr(connections[db_name], "_test_serialized_contents")
)
)
call_command(
"flush",
verbosity=0,
interactive=False,
database=db_name,
reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate,
)
def assertQuerysetEqual(self, qs, values, transform=None, ordered=True, msg=None):
values = list(values)
items = qs
if transform is not None:
items = map(transform, items)
if not ordered:
return self.assertDictEqual(Counter(items), Counter(values), msg=msg)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, "ordered") and not qs.ordered:
raise ValueError(
"Trying to compare non-ordered queryset against more than one "
"ordered value."
)
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs):
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions(aliases=None):
"""
Return whether or not all (or specified) connections support
transactions.
"""
conns = (
connections.all()
if aliases is None
else (connections[alias] for alias in aliases)
)
return all(conn.features.supports_transactions for conn in conns)
class TestData:
"""
Descriptor to provide TestCase instance isolation for attributes assigned
during the setUpTestData() phase.
Allow safe alteration of objects assigned in setUpTestData() by test
methods by exposing deep copies instead of the original objects.
Objects are deep copied using a memo kept on the test case instance in
order to maintain their original relationships.
"""
memo_attr = "_testdata_memo"
def __init__(self, name, data):
self.name = name
self.data = data
def get_memo(self, testcase):
try:
memo = getattr(testcase, self.memo_attr)
except AttributeError:
memo = {}
setattr(testcase, self.memo_attr, memo)
return memo
def __get__(self, instance, owner):
if instance is None:
return self.data
memo = self.get_memo(instance)
data = deepcopy(self.data, memo)
setattr(instance, self.name, data)
return data
def __repr__(self):
return "<TestData: name=%r, data=%r>" % (self.name, self.data)
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but use `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Open atomic blocks for multiple databases."""
atomics = {}
for db_name in cls._databases_names():
atomic = transaction.atomic(using=db_name)
atomic._from_testcase = True
atomic.__enter__()
atomics[db_name] = atomic
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened by the previous method."""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def _databases_support_transactions(cls):
return connections_support_transactions(cls.databases)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not cls._databases_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command(
"loaddata",
*cls.fixtures,
**{"verbosity": 0, "database": db_name},
)
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
pre_attrs = cls.__dict__.copy()
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
for name, value in cls.__dict__.items():
if value is not pre_attrs.get(name):
setattr(cls, name, TestData(name, value))
@classmethod
def tearDownClass(cls):
if cls._databases_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all(initialized_only=True):
conn.close()
super().tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase."""
pass
def _should_reload_connections(self):
if self._databases_support_transactions():
return False
return super()._should_reload_connections()
def _fixture_setup(self):
if not self._databases_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super()._fixture_setup()
if self.reset_sequences:
raise TypeError("reset_sequences cannot be used on TestCase instances")
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not self._databases_support_transactions():
return super()._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks
and not connection.needs_rollback
and connection.is_usable()
)
@classmethod
@contextmanager
def captureOnCommitCallbacks(cls, *, using=DEFAULT_DB_ALIAS, execute=False):
"""Context manager to capture transaction.on_commit() callbacks."""
callbacks = []
start_count = len(connections[using].run_on_commit)
try:
yield callbacks
finally:
while True:
callback_count = len(connections[using].run_on_commit)
for _, callback in connections[using].run_on_commit[start_count:]:
callbacks.append(callback)
if execute:
callback()
if callback_count == len(connections[using].run_on_commit):
break
start_count = callback_count
class CheckCondition:
"""Descriptor class for deferred condition checking."""
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions, (condition, reason))
def __get__(self, instance, cls=None):
# Trigger access for all bases.
if any(getattr(base, "__unittest_skip__", False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
# Override this descriptor's value and set the skip reason.
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason, name):
def decorator(test_func):
nonlocal condition
if not (
isinstance(test_func, type) and issubclass(test_func, unittest.TestCase)
):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if (
args
and isinstance(args[0], unittest.TestCase)
and connection.alias not in getattr(args[0], "databases", {})
):
raise ValueError(
"%s cannot be used on %s as %s doesn't allow queries "
"against the %r database."
% (
name,
args[0],
args[0].__class__.__qualname__,
connection.alias,
)
)
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
databases = getattr(test_item, "databases", None)
if not databases or connection.alias not in databases:
# Defer raising to allow importing test class's module.
def condition():
raise ValueError(
"%s cannot be used on %s as it doesn't allow queries "
"against the '%s' database."
% (
name,
test_item,
connection.alias,
)
)
# Retrieve the possibly existing value from the class's dict to
# avoid triggering the descriptor.
skip = test_func.__dict__.get("__unittest_skip__")
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
"""Skip a test if a database has at least one of the named features."""
return _deferredSkip(
lambda: any(
getattr(connection.features, feature, False) for feature in features
),
"Database has feature(s) %s" % ", ".join(features),
"skipIfDBFeature",
)
def skipUnlessDBFeature(*features):
"""Skip a test unless a database has all the named features."""
return _deferredSkip(
lambda: not all(
getattr(connection.features, feature, False) for feature in features
),
"Database doesn't support feature(s): %s" % ", ".join(features),
"skipUnlessDBFeature",
)
def skipUnlessAnyDBFeature(*features):
"""Skip a test unless a database has any of the named features."""
return _deferredSkip(
lambda: not any(
getattr(connection.features, feature, False) for feature in features
),
"Database doesn't support any of the feature(s): %s" % ", ".join(features),
"skipUnlessAnyDBFeature",
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
A WSGIRequestHandler that doesn't log to standard output any of the
requests received, so as to not clutter the test result output.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super().__init__()
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""Return the relative path to the file on disk for the given URL."""
relative_url = url[len(self.base_url[2]) :]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super().get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace("\\", "/").lstrip("/")
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super().__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""Thread for running a live HTTP server while the tests are running."""
server_class = ThreadedWSGIServer
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super().__init__()
def run(self):
"""
Set up the live server and databases, and then loop over handling
HTTP requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server()
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self, connections_override=None):
return self.server_class(
(self.host, self.port),
QuietWSGIRequestHandler,
allow_reuse_address=False,
connections_override=connections_override,
)
def terminate(self):
if hasattr(self, "httpd"):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Do basically the same as TransactionTestCase but also launch a live HTTP
server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
It inherits from TransactionTestCase instead of TestCase because the
threads don't share the same transactions (unless if using in-memory sqlite)
and each thread needs to commit all their transactions so that the other
thread can see the changes.
"""
host = "localhost"
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return "http://%s:%s" % (cls.host, cls.server_thread.port)
@classproperty
def allowed_host(cls):
return cls.host
@classmethod
def _make_connections_override(cls):
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == "sqlite" and conn.is_in_memory_db():
connections_override[conn.alias] = conn
return connections_override
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={"append": cls.allowed_host},
)
cls._live_server_modified_settings.enable()
cls.addClassCleanup(cls._live_server_modified_settings.disable)
cls._start_server_thread()
@classmethod
def _start_server_thread(cls):
connections_override = cls._make_connections_override()
for conn in connections_override.values():
# Explicitly enable thread-shareability for this connection.
conn.inc_thread_sharing()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
cls.addClassCleanup(cls._terminate_thread)
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _terminate_thread(cls):
# Terminate the live server's thread.
cls.server_thread.terminate()
# Restore shared connections' non-shareability.
for conn in cls.server_thread.connections_override.values():
conn.dec_thread_sharing()
class SerializeMixin:
"""
Enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass()/tearDownClass().
"""
lockfile = None
def __init_subclass__(cls, /, **kwargs):
super().__init_subclass__(**kwargs)
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__)
)
@classmethod
def setUpClass(cls):
cls._lockfile = open(cls.lockfile)
cls.addClassCleanup(cls._lockfile.close)
locks.lock(cls._lockfile, locks.LOCK_EX)
super().setUpClass()
|
fdfafd458279a52de3787004dc6b7b1b7684ecb7ad883eb7ebdf314160d5dd3c | import os
import time
import warnings
from asgiref.local import Local
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.db import connections, router
from django.db.utils import ConnectionRouter
from django.dispatch import Signal, receiver
from django.utils import timezone
from django.utils.formats import FORMAT_SETTINGS, reset_format_cache
from django.utils.functional import empty
template_rendered = Signal()
# Most setting_changed receivers are supposed to be added below,
# except for cases where the receiver is related to a contrib app.
# Settings that may not work well when using 'override_settings' (#19031)
COMPLEX_OVERRIDE_SETTINGS = {"DATABASES"}
@receiver(setting_changed)
def clear_cache_handlers(*, setting, **kwargs):
if setting == "CACHES":
from django.core.cache import caches, close_caches
close_caches()
caches._settings = caches.settings = caches.configure_settings(None)
caches._connections = Local()
@receiver(setting_changed)
def update_installed_apps(*, setting, **kwargs):
if setting == "INSTALLED_APPS":
# Rebuild any AppDirectoriesFinder instance.
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
# Rebuild management commands cache
from django.core.management import get_commands
get_commands.cache_clear()
# Rebuild get_app_template_dirs cache.
from django.template.utils import get_app_template_dirs
get_app_template_dirs.cache_clear()
# Rebuild translations cache.
from django.utils.translation import trans_real
trans_real._translations = {}
@receiver(setting_changed)
def update_connections_time_zone(*, setting, **kwargs):
if setting == "TIME_ZONE":
# Reset process time zone
if hasattr(time, "tzset"):
if kwargs["value"]:
os.environ["TZ"] = kwargs["value"]
else:
os.environ.pop("TZ", None)
time.tzset()
# Reset local time zone cache
timezone.get_default_timezone.cache_clear()
# Reset the database connections' time zone
if setting in {"TIME_ZONE", "USE_TZ"}:
for conn in connections.all(initialized_only=True):
try:
del conn.timezone
except AttributeError:
pass
try:
del conn.timezone_name
except AttributeError:
pass
conn.ensure_timezone()
@receiver(setting_changed)
def clear_routers_cache(*, setting, **kwargs):
if setting == "DATABASE_ROUTERS":
router.routers = ConnectionRouter().routers
@receiver(setting_changed)
def reset_template_engines(*, setting, **kwargs):
if setting in {
"TEMPLATES",
"DEBUG",
"INSTALLED_APPS",
}:
from django.template import engines
try:
del engines.templates
except AttributeError:
pass
engines._templates = None
engines._engines = {}
from django.template.engine import Engine
Engine.get_default.cache_clear()
from django.forms.renderers import get_default_renderer
get_default_renderer.cache_clear()
@receiver(setting_changed)
def clear_serializers_cache(*, setting, **kwargs):
if setting == "SERIALIZATION_MODULES":
from django.core import serializers
serializers._serializers = {}
@receiver(setting_changed)
def language_changed(*, setting, **kwargs):
if setting in {"LANGUAGES", "LANGUAGE_CODE", "LOCALE_PATHS"}:
from django.utils.translation import trans_real
trans_real._default = None
trans_real._active = Local()
if setting in {"LANGUAGES", "LOCALE_PATHS"}:
from django.utils.translation import trans_real
trans_real._translations = {}
trans_real.check_for_language.cache_clear()
@receiver(setting_changed)
def localize_settings_changed(*, setting, **kwargs):
if setting in FORMAT_SETTINGS or setting == "USE_THOUSAND_SEPARATOR":
reset_format_cache()
@receiver(setting_changed)
def file_storage_changed(*, setting, **kwargs):
if setting == "DEFAULT_FILE_STORAGE":
from django.core.files.storage import default_storage
default_storage._wrapped = empty
@receiver(setting_changed)
def complex_setting_changed(*, enter, setting, **kwargs):
if enter and setting in COMPLEX_OVERRIDE_SETTINGS:
# Considering the current implementation of the signals framework,
# this stacklevel shows the line containing the override_settings call.
warnings.warn(
f"Overriding setting {setting} can lead to unexpected behavior.",
stacklevel=6,
)
@receiver(setting_changed)
def root_urlconf_changed(*, setting, **kwargs):
if setting == "ROOT_URLCONF":
from django.urls import clear_url_caches, set_urlconf
clear_url_caches()
set_urlconf(None)
@receiver(setting_changed)
def static_storage_changed(*, setting, **kwargs):
if setting in {
"STATICFILES_STORAGE",
"STATIC_ROOT",
"STATIC_URL",
}:
from django.contrib.staticfiles.storage import staticfiles_storage
staticfiles_storage._wrapped = empty
@receiver(setting_changed)
def static_finders_changed(*, setting, **kwargs):
if setting in {
"STATICFILES_DIRS",
"STATIC_ROOT",
}:
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
@receiver(setting_changed)
def auth_password_validators_changed(*, setting, **kwargs):
if setting == "AUTH_PASSWORD_VALIDATORS":
from django.contrib.auth.password_validation import (
get_default_password_validators,
)
get_default_password_validators.cache_clear()
@receiver(setting_changed)
def user_model_swapped(*, setting, **kwargs):
if setting == "AUTH_USER_MODEL":
apps.clear_cache()
try:
from django.contrib.auth import get_user_model
UserModel = get_user_model()
except ImproperlyConfigured:
# Some tests set an invalid AUTH_USER_MODEL.
pass
else:
from django.contrib.auth import backends
backends.UserModel = UserModel
from django.contrib.auth import forms
forms.UserModel = UserModel
from django.contrib.auth.handlers import modwsgi
modwsgi.UserModel = UserModel
from django.contrib.auth.management.commands import changepassword
changepassword.UserModel = UserModel
from django.contrib.auth import views
views.UserModel = UserModel
|
53b40b9b35705b7a2263f96ba96fe419c51507664a62ae67ab4223f8ad30f10f | """
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import posixpath
from pathlib import Path
from django.http import FileResponse, Http404, HttpResponse, HttpResponseNotModified
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils._os import safe_join
from django.utils.http import http_date, parse_http_date
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
path('<path:path>', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(path).lstrip("/")
fullpath = Path(safe_join(document_root, path))
if fullpath.is_dir():
if show_indexes:
return directory_index(path, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not fullpath.exists():
raise Http404(_("“%(path)s” does not exist") % {"path": fullpath})
# Respect the If-Modified-Since header.
statobj = fullpath.stat()
if not was_modified_since(
request.META.get("HTTP_IF_MODIFIED_SINCE"), statobj.st_mtime
):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(str(fullpath))
content_type = content_type or "application/octet-stream"
response = FileResponse(fullpath.open("rb"), content_type=content_type)
response.headers["Last-Modified"] = http_date(statobj.st_mtime)
if encoding:
response.headers["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8">
<meta http-equiv="Content-Language" content="en-us">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% blocktranslate %}Index of {{ directory }}{% endblocktranslate %}</title>
</head>
<body>
<h1>{% blocktranslate %}Index of {{ directory }}{% endblocktranslate %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = gettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template(
[
"static/directory_index.html",
"static/directory_index",
]
)
except TemplateDoesNotExist:
t = Engine(libraries={"i18n": "django.templatetags.i18n"}).from_string(
DEFAULT_DIRECTORY_INDEX_TEMPLATE
)
c = Context()
else:
c = {}
files = []
for f in fullpath.iterdir():
if not f.name.startswith("."):
url = str(f.relative_to(fullpath))
if f.is_dir():
url += "/"
files.append(url)
c.update(
{
"directory": path + "/",
"file_list": files,
}
)
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
"""
try:
if header is None:
raise ValueError
header_mtime = parse_http_date(header)
if int(mtime) > header_mtime:
raise ValueError
except (ValueError, OverflowError):
return True
return False
|
ac082a83a860ebac2c38a902f293e88f965bf06a81cf96bb4a9e6bd6ecb4fd13 | import functools
import itertools
import logging
import os
import signal
import subprocess
import sys
import threading
import time
import traceback
import weakref
from collections import defaultdict
from pathlib import Path
from types import ModuleType
from zipimport import zipimporter
import django
from django.apps import apps
from django.core.signals import request_finished
from django.dispatch import Signal
from django.utils.functional import cached_property
from django.utils.version import get_version_tuple
autoreload_started = Signal()
file_changed = Signal()
DJANGO_AUTORELOAD_ENV = "RUN_MAIN"
logger = logging.getLogger("django.utils.autoreload")
# If an error is raised while importing a file, it's not placed in sys.modules.
# This means that any future modifications aren't caught. Keep a list of these
# file paths to allow watching them in the future.
_error_files = []
_exception = None
try:
import termios
except ImportError:
termios = None
try:
import pywatchman
except ImportError:
pywatchman = None
def is_django_module(module):
"""Return True if the given module is nested under Django."""
return module.__name__.startswith("django.")
def is_django_path(path):
"""Return True if the given file path is nested under Django."""
return Path(django.__file__).parent in Path(path).parents
def check_errors(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, "filename", None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
raise _exception[1]
def ensure_echo_on():
"""
Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after reload.
"""
if not termios or not sys.stdin.isatty():
return
attr_list = termios.tcgetattr(sys.stdin)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, "SIGTTOU"):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def iter_all_python_module_files():
# This is a hot path during reloading. Create a stable sorted list of
# modules based on the module name and pass it to iter_modules_and_files().
# This ensures cached results are returned in the usual case that modules
# aren't loaded on the fly.
keys = sorted(sys.modules)
modules = tuple(
m
for m in map(sys.modules.__getitem__, keys)
if not isinstance(m, weakref.ProxyTypes)
)
return iter_modules_and_files(modules, frozenset(_error_files))
@functools.lru_cache(maxsize=1)
def iter_modules_and_files(modules, extra_files):
"""Iterate through all modules needed to be watched."""
sys_file_paths = []
for module in modules:
# During debugging (with PyDev) the 'typing.io' and 'typing.re' objects
# are added to sys.modules, however they are types not modules and so
# cause issues here.
if not isinstance(module, ModuleType):
continue
if module.__name__ in ("__main__", "__mp_main__"):
# __main__ (usually manage.py) doesn't always have a __spec__ set.
# Handle this by falling back to using __file__, resolved below.
# See https://docs.python.org/reference/import.html#main-spec
# __file__ may not exists, e.g. when running ipdb debugger.
if hasattr(module, "__file__"):
sys_file_paths.append(module.__file__)
continue
if getattr(module, "__spec__", None) is None:
continue
spec = module.__spec__
# Modules could be loaded from places without a concrete location. If
# this is the case, skip them.
if spec.has_location:
origin = (
spec.loader.archive
if isinstance(spec.loader, zipimporter)
else spec.origin
)
sys_file_paths.append(origin)
results = set()
for filename in itertools.chain(sys_file_paths, extra_files):
if not filename:
continue
path = Path(filename)
try:
if not path.exists():
# The module could have been removed, don't fail loudly if this
# is the case.
continue
except ValueError as e:
# Network filesystems may return null bytes in file paths.
logger.debug('"%s" raised when resolving path: "%s"', e, path)
continue
resolved_path = path.resolve().absolute()
results.add(resolved_path)
return frozenset(results)
@functools.lru_cache(maxsize=1)
def common_roots(paths):
"""
Return a tuple of common roots that are shared between the given paths.
File system watchers operate on directories and aren't cheap to create.
Try to find the minimum set of directories to watch that encompass all of
the files that need to be watched.
"""
# Inspired from Werkzeug:
# https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py
# Create a sorted list of the path components, longest first.
path_parts = sorted([x.parts for x in paths], key=len, reverse=True)
tree = {}
for chunks in path_parts:
node = tree
# Add each part of the path to the tree.
for chunk in chunks:
node = node.setdefault(chunk, {})
# Clear the last leaf in the tree.
node.clear()
# Turn the tree into a list of Path instances.
def _walk(node, path):
for prefix, child in node.items():
yield from _walk(child, path + (prefix,))
if not node:
yield Path(*path)
return tuple(_walk(tree, ()))
def sys_path_directories():
"""
Yield absolute directories from sys.path, ignoring entries that don't
exist.
"""
for path in sys.path:
path = Path(path)
if not path.exists():
continue
resolved_path = path.resolve().absolute()
# If the path is a file (like a zip file), watch the parent directory.
if resolved_path.is_file():
yield resolved_path.parent
else:
yield resolved_path
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import __main__
py_script = Path(sys.argv[0])
args = [sys.executable] + ["-W%s" % o for o in sys.warnoptions]
if sys.implementation.name == "cpython":
args.extend(
f"-X{key}" if value is True else f"-X{key}={value}"
for key, value in sys._xoptions.items()
)
# __spec__ is set when the server was started with the `-m` option,
# see https://docs.python.org/3/reference/import.html#main-spec
# __spec__ may not exist, e.g. when running in a Conda env.
if getattr(__main__, "__spec__", None) is not None:
spec = __main__.__spec__
if (spec.name == "__main__" or spec.name.endswith(".__main__")) and spec.parent:
name = spec.parent
else:
name = spec.name
args += ["-m", name]
args += sys.argv[1:]
elif not py_script.exists():
# sys.argv[0] may not exist for several reasons on Windows.
# It may exist with a .exe extension or have a -script.py suffix.
exe_entrypoint = py_script.with_suffix(".exe")
if exe_entrypoint.exists():
# Should be executed directly, ignoring sys.executable.
return [exe_entrypoint, *sys.argv[1:]]
script_entrypoint = py_script.with_name("%s-script.py" % py_script.name)
if script_entrypoint.exists():
# Should be executed as usual.
return [*args, script_entrypoint, *sys.argv[1:]]
raise RuntimeError("Script %s does not exist." % py_script)
else:
args += sys.argv
return args
def trigger_reload(filename):
logger.info("%s changed, reloading.", filename)
sys.exit(3)
def restart_with_reloader():
new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: "true"}
args = get_child_arguments()
while True:
p = subprocess.run(args, env=new_environ, close_fds=False)
if p.returncode != 3:
return p.returncode
class BaseReloader:
def __init__(self):
self.extra_files = set()
self.directory_globs = defaultdict(set)
self._stop_condition = threading.Event()
def watch_dir(self, path, glob):
path = Path(path)
try:
path = path.absolute()
except FileNotFoundError:
logger.debug(
"Unable to watch directory %s as it cannot be resolved.",
path,
exc_info=True,
)
return
logger.debug("Watching dir %s with glob %s.", path, glob)
self.directory_globs[path].add(glob)
def watched_files(self, include_globs=True):
"""
Yield all files that need to be watched, including module files and
files within globs.
"""
yield from iter_all_python_module_files()
yield from self.extra_files
if include_globs:
for directory, patterns in self.directory_globs.items():
for pattern in patterns:
yield from directory.glob(pattern)
def wait_for_apps_ready(self, app_reg, django_main_thread):
"""
Wait until Django reports that the apps have been loaded. If the given
thread has terminated before the apps are ready, then a SyntaxError or
other non-recoverable error has been raised. In that case, stop waiting
for the apps_ready event and continue processing.
Return True if the thread is alive and the ready event has been
triggered, or False if the thread is terminated while waiting for the
event.
"""
while django_main_thread.is_alive():
if app_reg.ready_event.wait(timeout=0.1):
return True
else:
logger.debug("Main Django thread has terminated before apps are ready.")
return False
def run(self, django_main_thread):
logger.debug("Waiting for apps ready_event.")
self.wait_for_apps_ready(apps, django_main_thread)
from django.urls import get_resolver
# Prevent a race condition where URL modules aren't loaded when the
# reloader starts by accessing the urlconf_module property.
try:
get_resolver().urlconf_module
except Exception:
# Loading the urlconf can result in errors during development.
# If this occurs then swallow the error and continue.
pass
logger.debug("Apps ready_event triggered. Sending autoreload_started signal.")
autoreload_started.send(sender=self)
self.run_loop()
def run_loop(self):
ticker = self.tick()
while not self.should_stop:
try:
next(ticker)
except StopIteration:
break
self.stop()
def tick(self):
"""
This generator is called in a loop from run_loop. It's important that
the method takes care of pausing or otherwise waiting for a period of
time. This split between run_loop() and tick() is to improve the
testability of the reloader implementations by decoupling the work they
do from the loop.
"""
raise NotImplementedError("subclasses must implement tick().")
@classmethod
def check_availability(cls):
raise NotImplementedError("subclasses must implement check_availability().")
def notify_file_changed(self, path):
results = file_changed.send(sender=self, file_path=path)
logger.debug("%s notified as changed. Signal results: %s.", path, results)
if not any(res[1] for res in results):
trigger_reload(path)
# These are primarily used for testing.
@property
def should_stop(self):
return self._stop_condition.is_set()
def stop(self):
self._stop_condition.set()
class StatReloader(BaseReloader):
SLEEP_TIME = 1 # Check for changes once per second.
def tick(self):
mtimes = {}
while True:
for filepath, mtime in self.snapshot_files():
old_time = mtimes.get(filepath)
mtimes[filepath] = mtime
if old_time is None:
logger.debug("File %s first seen with mtime %s", filepath, mtime)
continue
elif mtime > old_time:
logger.debug(
"File %s previous mtime: %s, current mtime: %s",
filepath,
old_time,
mtime,
)
self.notify_file_changed(filepath)
time.sleep(self.SLEEP_TIME)
yield
def snapshot_files(self):
# watched_files may produce duplicate paths if globs overlap.
seen_files = set()
for file in self.watched_files():
if file in seen_files:
continue
try:
mtime = file.stat().st_mtime
except OSError:
# This is thrown when the file does not exist.
continue
seen_files.add(file)
yield file, mtime
@classmethod
def check_availability(cls):
return True
class WatchmanUnavailable(RuntimeError):
pass
class WatchmanReloader(BaseReloader):
def __init__(self):
self.roots = defaultdict(set)
self.processed_request = threading.Event()
self.client_timeout = int(os.environ.get("DJANGO_WATCHMAN_TIMEOUT", 5))
super().__init__()
@cached_property
def client(self):
return pywatchman.client(timeout=self.client_timeout)
def _watch_root(self, root):
# In practice this shouldn't occur, however, it's possible that a
# directory that doesn't exist yet is being watched. If it's outside of
# sys.path then this will end up a new root. How to handle this isn't
# clear: Not adding the root will likely break when subscribing to the
# changes, however, as this is currently an internal API, no files
# will be being watched outside of sys.path. Fixing this by checking
# inside watch_glob() and watch_dir() is expensive, instead this could
# could fall back to the StatReloader if this case is detected? For
# now, watching its parent, if possible, is sufficient.
if not root.exists():
if not root.parent.exists():
logger.warning(
"Unable to watch root dir %s as neither it or its parent exist.",
root,
)
return
root = root.parent
result = self.client.query("watch-project", str(root.absolute()))
if "warning" in result:
logger.warning("Watchman warning: %s", result["warning"])
logger.debug("Watchman watch-project result: %s", result)
return result["watch"], result.get("relative_path")
@functools.lru_cache
def _get_clock(self, root):
return self.client.query("clock", root)["clock"]
def _subscribe(self, directory, name, expression):
root, rel_path = self._watch_root(directory)
# Only receive notifications of files changing, filtering out other types
# like special files: https://facebook.github.io/watchman/docs/type
only_files_expression = [
"allof",
["anyof", ["type", "f"], ["type", "l"]],
expression,
]
query = {
"expression": only_files_expression,
"fields": ["name"],
"since": self._get_clock(root),
"dedup_results": True,
}
if rel_path:
query["relative_root"] = rel_path
logger.debug(
"Issuing watchman subscription %s, for root %s. Query: %s",
name,
root,
query,
)
self.client.query("subscribe", root, name, query)
def _subscribe_dir(self, directory, filenames):
if not directory.exists():
if not directory.parent.exists():
logger.warning(
"Unable to watch directory %s as neither it or its parent exist.",
directory,
)
return
prefix = "files-parent-%s" % directory.name
filenames = ["%s/%s" % (directory.name, filename) for filename in filenames]
directory = directory.parent
expression = ["name", filenames, "wholename"]
else:
prefix = "files"
expression = ["name", filenames]
self._subscribe(directory, "%s:%s" % (prefix, directory), expression)
def _watch_glob(self, directory, patterns):
"""
Watch a directory with a specific glob. If the directory doesn't yet
exist, attempt to watch the parent directory and amend the patterns to
include this. It's important this method isn't called more than one per
directory when updating all subscriptions. Subsequent calls will
overwrite the named subscription, so it must include all possible glob
expressions.
"""
prefix = "glob"
if not directory.exists():
if not directory.parent.exists():
logger.warning(
"Unable to watch directory %s as neither it or its parent exist.",
directory,
)
return
prefix = "glob-parent-%s" % directory.name
patterns = ["%s/%s" % (directory.name, pattern) for pattern in patterns]
directory = directory.parent
expression = ["anyof"]
for pattern in patterns:
expression.append(["match", pattern, "wholename"])
self._subscribe(directory, "%s:%s" % (prefix, directory), expression)
def watched_roots(self, watched_files):
extra_directories = self.directory_globs.keys()
watched_file_dirs = [f.parent for f in watched_files]
sys_paths = list(sys_path_directories())
return frozenset((*extra_directories, *watched_file_dirs, *sys_paths))
def _update_watches(self):
watched_files = list(self.watched_files(include_globs=False))
found_roots = common_roots(self.watched_roots(watched_files))
logger.debug("Watching %s files", len(watched_files))
logger.debug("Found common roots: %s", found_roots)
# Setup initial roots for performance, shortest roots first.
for root in sorted(found_roots):
self._watch_root(root)
for directory, patterns in self.directory_globs.items():
self._watch_glob(directory, patterns)
# Group sorted watched_files by their parent directory.
sorted_files = sorted(watched_files, key=lambda p: p.parent)
for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent):
# These paths need to be relative to the parent directory.
self._subscribe_dir(
directory, [str(p.relative_to(directory)) for p in group]
)
def update_watches(self):
try:
self._update_watches()
except Exception as ex:
# If the service is still available, raise the original exception.
if self.check_server_status(ex):
raise
def _check_subscription(self, sub):
subscription = self.client.getSubscription(sub)
if not subscription:
return
logger.debug("Watchman subscription %s has results.", sub)
for result in subscription:
# When using watch-project, it's not simple to get the relative
# directory without storing some specific state. Store the full
# path to the directory in the subscription name, prefixed by its
# type (glob, files).
root_directory = Path(result["subscription"].split(":", 1)[1])
logger.debug("Found root directory %s", root_directory)
for file in result.get("files", []):
self.notify_file_changed(root_directory / file)
def request_processed(self, **kwargs):
logger.debug("Request processed. Setting update_watches event.")
self.processed_request.set()
def tick(self):
request_finished.connect(self.request_processed)
self.update_watches()
while True:
if self.processed_request.is_set():
self.update_watches()
self.processed_request.clear()
try:
self.client.receive()
except pywatchman.SocketTimeout:
pass
except pywatchman.WatchmanError as ex:
logger.debug("Watchman error: %s, checking server status.", ex)
self.check_server_status(ex)
else:
for sub in list(self.client.subs.keys()):
self._check_subscription(sub)
yield
# Protect against busy loops.
time.sleep(0.1)
def stop(self):
self.client.close()
super().stop()
def check_server_status(self, inner_ex=None):
"""Return True if the server is available."""
try:
self.client.query("version")
except Exception:
raise WatchmanUnavailable(str(inner_ex)) from inner_ex
return True
@classmethod
def check_availability(cls):
if not pywatchman:
raise WatchmanUnavailable("pywatchman not installed.")
client = pywatchman.client(timeout=0.1)
try:
result = client.capabilityCheck()
except Exception:
# The service is down?
raise WatchmanUnavailable("Cannot connect to the watchman service.")
version = get_version_tuple(result["version"])
# Watchman 4.9 includes multiple improvements to watching project
# directories as well as case insensitive filesystems.
logger.debug("Watchman version %s", version)
if version < (4, 9):
raise WatchmanUnavailable("Watchman 4.9 or later is required.")
def get_reloader():
"""Return the most suitable reloader for this environment."""
try:
WatchmanReloader.check_availability()
except WatchmanUnavailable:
return StatReloader()
return WatchmanReloader()
def start_django(reloader, main_func, *args, **kwargs):
ensure_echo_on()
main_func = check_errors(main_func)
django_main_thread = threading.Thread(
target=main_func, args=args, kwargs=kwargs, name="django-main-thread"
)
django_main_thread.daemon = True
django_main_thread.start()
while not reloader.should_stop:
try:
reloader.run(django_main_thread)
except WatchmanUnavailable as ex:
# It's possible that the watchman service shuts down or otherwise
# becomes unavailable. In that case, use the StatReloader.
reloader = StatReloader()
logger.error("Error connecting to Watchman: %s", ex)
logger.info(
"Watching for file changes with %s", reloader.__class__.__name__
)
def run_with_reloader(main_func, *args, **kwargs):
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get(DJANGO_AUTORELOAD_ENV) == "true":
reloader = get_reloader()
logger.info(
"Watching for file changes with %s", reloader.__class__.__name__
)
start_django(reloader, main_func, *args, **kwargs)
else:
exit_code = restart_with_reloader()
sys.exit(exit_code)
except KeyboardInterrupt:
pass
|
1b0ab2c7d4b77362175d19fda35de846dbd01d714dc3be30112864cf4cd212f4 | from asgiref.local import Local
from django.conf import settings as django_settings
from django.utils.functional import cached_property
class ConnectionProxy:
"""Proxy for accessing a connection object's attributes."""
def __init__(self, connections, alias):
self.__dict__["_connections"] = connections
self.__dict__["_alias"] = alias
def __getattr__(self, item):
return getattr(self._connections[self._alias], item)
def __setattr__(self, name, value):
return setattr(self._connections[self._alias], name, value)
def __delattr__(self, name):
return delattr(self._connections[self._alias], name)
def __contains__(self, key):
return key in self._connections[self._alias]
def __eq__(self, other):
return self._connections[self._alias] == other
class ConnectionDoesNotExist(Exception):
pass
class BaseConnectionHandler:
settings_name = None
exception_class = ConnectionDoesNotExist
thread_critical = False
def __init__(self, settings=None):
self._settings = settings
self._connections = Local(self.thread_critical)
@cached_property
def settings(self):
self._settings = self.configure_settings(self._settings)
return self._settings
def configure_settings(self, settings):
if settings is None:
settings = getattr(django_settings, self.settings_name)
return settings
def create_connection(self, alias):
raise NotImplementedError("Subclasses must implement create_connection().")
def __getitem__(self, alias):
try:
return getattr(self._connections, alias)
except AttributeError:
if alias not in self.settings:
raise self.exception_class(f"The connection '{alias}' doesn't exist.")
conn = self.create_connection(alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.settings)
def all(self, initialized_only=False):
return [
self[alias]
for alias in self
# If initialized_only is True, return only initialized connections.
if not initialized_only or hasattr(self._connections, alias)
]
|
f16f810292a5af860d7da0dd439e381b2a7701861b5f61347720e64138951ebd | from django.core import signals
from django.db.utils import (
DEFAULT_DB_ALIAS,
DJANGO_VERSION_PICKLE_KEY,
ConnectionHandler,
ConnectionRouter,
DatabaseError,
DataError,
Error,
IntegrityError,
InterfaceError,
InternalError,
NotSupportedError,
OperationalError,
ProgrammingError,
)
from django.utils.connection import ConnectionProxy
__all__ = [
"connection",
"connections",
"router",
"DatabaseError",
"IntegrityError",
"InternalError",
"ProgrammingError",
"DataError",
"NotSupportedError",
"Error",
"InterfaceError",
"OperationalError",
"DEFAULT_DB_ALIAS",
"DJANGO_VERSION_PICKLE_KEY",
]
connections = ConnectionHandler()
router = ConnectionRouter()
# For backwards compatibility. Prefer connections['default'] instead.
connection = ConnectionProxy(connections, DEFAULT_DB_ALIAS)
# Register an event to reset saved queries when a Django request is started.
def reset_queries(**kwargs):
for conn in connections.all(initialized_only=True):
conn.queries_log.clear()
signals.request_started.connect(reset_queries)
# Register an event to reset transaction state and close connections past
# their lifetime.
def close_old_connections(**kwargs):
for conn in connections.all(initialized_only=True):
conn.close_if_unusable_or_obsolete()
signals.request_started.connect(close_old_connections)
signals.request_finished.connect(close_old_connections)
|
b1a6f232b1fe98ce0709b133fd0909c9198d18eda15339ecde92575dbfab9d4a | import pkgutil
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
# For backwards compatibility with Django < 3.2
from django.utils.connection import ConnectionDoesNotExist # NOQA: F401
from django.utils.connection import BaseConnectionHandler
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = "default"
DJANGO_VERSION_PICKLE_KEY = "_django_version"
class Error(Exception):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper:
"""
Context manager and decorator that reraises backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
raise dj_exc_value.with_traceback(traceback) from exc_value
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
"""
Return a database backend's "base" module given a fully qualified database
backend name, or raise an error if it doesn't exist.
"""
# This backend was renamed in Django 1.9.
if backend_name == "django.db.backends.postgresql_psycopg2":
backend_name = "django.db.backends.postgresql"
try:
return import_module("%s.base" % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all built-in database backends.
import django.db.backends
builtin_backends = [
name
for _, name, ispkg in pkgutil.iter_modules(django.db.backends.__path__)
if ispkg and name not in {"base", "dummy"}
]
if backend_name not in ["django.db.backends.%s" % b for b in builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
raise ImproperlyConfigured(
"%r isn't an available database backend or couldn't be "
"imported. Check the above exception. To use one of the "
"built-in backends, use 'django.db.backends.XXX', where XXX "
"is one of:\n"
" %s" % (backend_name, ", ".join(backend_reprs))
) from e_user
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionHandler(BaseConnectionHandler):
settings_name = "DATABASES"
# Connections needs to still be an actual thread local, as it's truly
# thread-critical. Database backends should use @async_unsafe to protect
# their code from async contexts, but this will give those contexts
# separate connections in case it's needed as well. There's no cleanup
# after async contexts, though, so we don't allow that if we can help it.
thread_critical = True
def configure_settings(self, databases):
databases = super().configure_settings(databases)
if databases == {}:
databases[DEFAULT_DB_ALIAS] = {"ENGINE": "django.db.backends.dummy"}
elif DEFAULT_DB_ALIAS not in databases:
raise ImproperlyConfigured(
f"You must define a '{DEFAULT_DB_ALIAS}' database."
)
elif databases[DEFAULT_DB_ALIAS] == {}:
databases[DEFAULT_DB_ALIAS]["ENGINE"] = "django.db.backends.dummy"
# Configure default settings.
for conn in databases.values():
conn.setdefault("ATOMIC_REQUESTS", False)
conn.setdefault("AUTOCOMMIT", True)
conn.setdefault("ENGINE", "django.db.backends.dummy")
if conn["ENGINE"] == "django.db.backends." or not conn["ENGINE"]:
conn["ENGINE"] = "django.db.backends.dummy"
conn.setdefault("CONN_MAX_AGE", 0)
conn.setdefault("CONN_HEALTH_CHECKS", False)
conn.setdefault("OPTIONS", {})
conn.setdefault("TIME_ZONE", None)
for setting in ["NAME", "USER", "PASSWORD", "HOST", "PORT"]:
conn.setdefault(setting, "")
test_settings = conn.setdefault("TEST", {})
default_test_settings = [
("CHARSET", None),
("COLLATION", None),
("MIGRATE", True),
("MIRROR", None),
("NAME", None),
]
for key, value in default_test_settings:
test_settings.setdefault(key, value)
return databases
@property
def databases(self):
return self.settings
def create_connection(self, alias):
db = self.settings[alias]
backend = load_backend(db["ENGINE"])
return backend.DatabaseWrapper(db, alias)
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter:
def __init__(self, routers=None):
"""
If routers is not specified, default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, str):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get("instance")
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func("db_for_read")
db_for_write = _router_func("db_for_write")
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""Return app models allowed to be migrated on provided db."""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
|
6af731c65693bc8546567e826a953f0f16d8c99cc5ced7db91994dfd5d54f9da | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS,
FieldError,
ImproperlyConfigured,
ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput,
MultipleHiddenInput,
RadioSelect,
SelectMultiple,
)
from django.utils.text import capfirst, get_text_list
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
__all__ = (
"ModelForm",
"BaseModelForm",
"model_to_dict",
"fields_for_model",
"ModelChoiceField",
"ModelMultipleChoiceField",
"ALL_FIELDS",
"BaseModelFormSet",
"modelformset_factory",
"BaseInlineFormSet",
"inlineformset_factory",
"modelform_factory",
)
ALL_FIELDS = "__all__"
def construct_instance(form, instance, fields=None, exclude=None):
"""
Construct and return a model instance from the bound ``form``'s
``cleaned_data``, but do not save the returned instance to the database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if (
not f.editable
or isinstance(f, models.AutoField)
or f.name not in cleaned_data
):
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Leave defaults for fields that aren't in POST data, except for
# checkbox inputs because they don't appear in POST data if not checked.
if (
f.has_default()
and form[f.name].field.widget.value_omitted_from_data(
form.data, form.files, form.add_prefix(f.name)
)
and cleaned_data.get(f.name) in form[f.name].field.empty_values
):
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Return a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, return only the
named.
``exclude`` is an optional list of field names. If provided, exclude the
named from the returned dict, even if they are listed in the ``fields``
argument.
"""
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):
if not getattr(f, "editable", False):
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
data[f.name] = f.value_from_object(instance)
return data
def apply_limit_choices_to_to_formfield(formfield):
"""Apply limit_choices_to to the formfield's queryset if needed."""
from django.db.models import Exists, OuterRef, Q
if hasattr(formfield, "queryset") and hasattr(formfield, "get_limit_choices_to"):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to:
complex_filter = limit_choices_to
if not isinstance(complex_filter, Q):
complex_filter = Q(**limit_choices_to)
complex_filter &= Q(pk=OuterRef("pk"))
# Use Exists() to avoid potential duplicates.
formfield.queryset = formfield.queryset.filter(
Exists(formfield.queryset.model._base_manager.filter(complex_filter)),
)
def fields_for_model(
model,
fields=None,
exclude=None,
widgets=None,
formfield_callback=None,
localized_fields=None,
labels=None,
help_texts=None,
error_messages=None,
field_classes=None,
*,
apply_limit_choices_to=True,
):
"""
Return a dictionary containing form fields for the given model.
``fields`` is an optional list of field names. If provided, return only the
named fields.
``exclude`` is an optional list of field names. If provided, exclude the
named fields from the returned fields, even if they are listed in the
``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
``apply_limit_choices_to`` is a boolean indicating if limit_choices_to
should be applied to a field's queryset.
"""
field_dict = {}
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models import Field as ModelField
sortable_private_fields = [
f for f in opts.private_fields if isinstance(f, ModelField)
]
for f in sorted(
chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many)
):
if not getattr(f, "editable", False):
if (
fields is not None
and f.name in fields
and (exclude is None or f.name not in exclude)
):
raise FieldError(
"'%s' cannot be specified for %s model form as it is a "
"non-editable field" % (f.name, model.__name__)
)
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs["widget"] = widgets[f.name]
if localized_fields == ALL_FIELDS or (
localized_fields and f.name in localized_fields
):
kwargs["localize"] = True
if labels and f.name in labels:
kwargs["label"] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs["help_text"] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs["error_messages"] = error_messages[f.name]
if field_classes and f.name in field_classes:
kwargs["form_class"] = field_classes[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError("formfield_callback must be a function or callable")
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
if apply_limit_choices_to:
apply_limit_choices_to_to_formfield(formfield)
field_dict[f.name] = formfield
else:
ignored.append(f.name)
if fields:
field_dict = {
f: field_dict.get(f)
for f in fields
if (not exclude or f not in exclude) and f not in ignored
}
return field_dict
class ModelFormOptions:
def __init__(self, options=None):
self.model = getattr(options, "model", None)
self.fields = getattr(options, "fields", None)
self.exclude = getattr(options, "exclude", None)
self.widgets = getattr(options, "widgets", None)
self.localized_fields = getattr(options, "localized_fields", None)
self.labels = getattr(options, "labels", None)
self.help_texts = getattr(options, "help_texts", None)
self.error_messages = getattr(options, "error_messages", None)
self.field_classes = getattr(options, "field_classes", None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
base_formfield_callback = None
for b in bases:
if hasattr(b, "Meta") and hasattr(b.Meta, "formfield_callback"):
base_formfield_callback = b.Meta.formfield_callback
break
formfield_callback = attrs.pop("formfield_callback", base_formfield_callback)
new_class = super().__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, "Meta", None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ["fields", "exclude", "localized_fields"]:
value = getattr(opts, opt)
if isinstance(value, str) and value != ALL_FIELDS:
msg = (
"%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?"
% {
"model": new_class.__name__,
"opt": opt,
"value": value,
}
)
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(
opts.model,
opts.fields,
opts.exclude,
opts.widgets,
formfield_callback,
opts.localized_fields,
opts.labels,
opts.help_texts,
opts.error_messages,
opts.field_classes,
# limit_choices_to will be applied during ModelForm.__init__().
apply_limit_choices_to=False,
)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = {k for k, v in fields.items() if not v}
missing_fields = none_model_fields.difference(new_class.declared_fields)
if missing_fields:
message = "Unknown field(s) (%s) specified for %s"
message = message % (", ".join(missing_fields), opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(
self,
data=None,
files=None,
auto_id="id_%s",
prefix=None,
initial=None,
error_class=ErrorList,
label_suffix=None,
empty_permitted=False,
instance=None,
use_required_attribute=None,
renderer=None,
):
opts = self._meta
if opts.model is None:
raise ValueError("ModelForm has no model class specified.")
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super().__init__(
data,
files,
auto_id,
prefix,
object_data,
error_class,
label_suffix,
empty_permitted,
use_required_attribute=use_required_attribute,
renderer=renderer,
)
for formfield in self.fields.values():
apply_limit_choices_to_to_formfield(formfield)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, exclude several types of fields from model
validation. See tickets #12507, #12521, #12553.
"""
exclude = set()
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.add(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.add(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.add(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors:
exclude.add(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field)
if (
not f.blank
and not form_field.required
and field_value in form_field.empty_values
):
exclude.add(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
# Allow the model generated by construct_instance() to raise
# ValidationError and have them handled in the same way as others.
if hasattr(errors, "error_dict"):
error_dict = errors.error_dict
else:
error_dict = {NON_FIELD_ERRORS: errors}
for field, messages in error_dict.items():
if (
field == NON_FIELD_ERRORS
and opts.error_messages
and NON_FIELD_ERRORS in opts.error_messages
):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (
isinstance(message, ValidationError)
and message.code in error_messages
):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.add(name)
try:
self.instance = construct_instance(
self, self.instance, opts.fields, opts.exclude
)
except ValidationError as e:
self._update_errors(e)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Call the instance's validate_unique() method and update the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def _save_m2m(self):
"""
Save the many-to-many fields and generic relations for this form.
"""
cleaned_data = self.cleaned_data
exclude = self._meta.exclude
fields = self._meta.fields
opts = self.instance._meta
# Note that for historical reasons we want to include also
# private_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, "save_form_data"):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(self.instance, cleaned_data[f.name])
def save(self, commit=True):
"""
Save this form's self.instance object if commit=True. Otherwise, add
a save_m2m() method to the form which can be called after the instance
is saved manually at a later time. Return the model instance.
"""
if self.errors:
raise ValueError(
"The %s could not be %s because the data didn't validate."
% (
self.instance._meta.object_name,
"created" if self.instance._state.adding else "changed",
)
)
if commit:
# If committing, save the instance and the m2m data immediately.
self.instance.save()
self._save_m2m()
else:
# If not committing, add a method to the form to allow deferred
# saving of m2m data.
self.save_m2m = self._save_m2m
return self.instance
save.alters_data = True
class ModelForm(BaseModelForm, metaclass=ModelFormMetaclass):
pass
def modelform_factory(
model,
form=ModelForm,
fields=None,
exclude=None,
formfield_callback=None,
widgets=None,
localized_fields=None,
labels=None,
help_texts=None,
error_messages=None,
field_classes=None,
):
"""
Return a ModelForm containing form fields for the given model. You can
optionally pass a `form` argument to use as a starting point for
constructing the ModelForm.
``fields`` is an optional list of field names. If provided, include only
the named fields in the returned fields. If omitted or '__all__', use all
fields.
``exclude`` is an optional list of field names. If provided, exclude the
named fields from the returned fields, even if they are listed in the
``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {"model": model}
if fields is not None:
attrs["fields"] = fields
if exclude is not None:
attrs["exclude"] = exclude
if widgets is not None:
attrs["widgets"] = widgets
if localized_fields is not None:
attrs["localized_fields"] = localized_fields
if labels is not None:
attrs["labels"] = labels
if help_texts is not None:
attrs["help_texts"] = help_texts
if error_messages is not None:
attrs["error_messages"] = error_messages
if field_classes is not None:
attrs["field_classes"] = field_classes
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
bases = (form.Meta,) if hasattr(form, "Meta") else ()
Meta = type("Meta", bases, attrs)
if formfield_callback:
Meta.formfield_callback = staticmethod(formfield_callback)
# Give this new form class a reasonable name.
class_name = model.__name__ + "Form"
# Class attributes for the new form class.
form_class_attrs = {"Meta": Meta, "formfield_callback": formfield_callback}
if getattr(Meta, "fields", None) is None and getattr(Meta, "exclude", None) is None:
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
# Set of fields that must be unique among forms of this set.
unique_fields = set()
def __init__(
self,
data=None,
files=None,
auto_id="id_%s",
prefix=None,
queryset=None,
*,
initial=None,
**kwargs,
):
self.queryset = queryset
self.initial_extra = initial
super().__init__(
**{
"data": data,
"files": files,
"auto_id": auto_id,
"prefix": prefix,
**kwargs,
}
)
def initial_form_count(self):
"""Return the number of forms that are required in this FormSet."""
if not self.is_bound:
return len(self.get_queryset())
return super().initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, "_object_dict"):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.remote_field is not None:
field = field.remote_field.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
pk_required = i < self.initial_form_count()
if pk_required:
if self.is_bound:
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
try:
pk = self.data[pk_key]
except KeyError:
# The primary key is missing. The user may have tampered
# with POST data.
pass
else:
to_python = self._get_to_python(self.model._meta.pk)
try:
pk = to_python(pk)
except ValidationError:
# The primary key exists but is an invalid value. The
# user may have tampered with POST data.
pass
else:
kwargs["instance"] = self._existing_object(pk)
else:
kwargs["instance"] = self.get_queryset()[i]
elif self.initial_extra:
# Set initial values for extra forms
try:
kwargs["initial"] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
form = super()._construct_form(i, **kwargs)
if pk_required:
form.fields[self.model._meta.pk.name].required = True
return form
def get_queryset(self):
if not hasattr(self, "_queryset"):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Save and return a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Save and return an existing model instance for the given form."""
return form.save(commit=commit)
def delete_existing(self, obj, commit=True):
"""Deletes an existing model instance."""
if commit:
obj.delete()
def save(self, commit=True):
"""
Save model instances for every form, adding and changing instances
as necessary, and return the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
if self.edit_only:
return self.save_existing_objects(commit)
else:
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [
form
for form in self.forms
if form.is_valid() and form not in forms_to_delete
]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(
exclude=exclude
)
all_unique_checks.update(unique_checks)
all_date_checks.update(date_checks)
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# Get the data for the set of fields that must be unique among
# the forms.
row_data = (
field if field in self.unique_fields else form.cleaned_data[field]
for field in unique_check
if field in form.cleaned_data
)
# Reduce Model instances to their primary key values
row_data = tuple(
d._get_pk_val() if hasattr(d, "_get_pk_val")
# Prevent "unhashable type: list" errors later on.
else tuple(d) if isinstance(d, list) else d
for d in row_data
)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class(
[self.get_form_error()],
renderer=self.renderer,
)
# Remove the data from the cleaned_data dict since it
# was invalid.
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (
form.cleaned_data
and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None
):
# if it's a date lookup we need to get the data for all the fields
if lookup == "date":
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class(
[self.get_form_error()],
renderer=self.renderer,
)
# Remove the data from the cleaned_data dict since it
# was invalid.
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return gettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return gettext(
"Please correct the duplicate data for %(field)s, which must be unique."
) % {
"field": get_text_list(unique_check, _("and")),
}
def get_date_error_message(self, date_check):
return gettext(
"Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s."
) % {
"field_name": date_check[2],
"date_field": date_check[3],
"lookup": str(date_check[1]),
}
def get_form_error(self):
return gettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
# If the pk is None, it means either:
# 1. The object is an unexpected empty model, created by invalid
# POST data such as an object outside the formset's queryset.
# 2. The object was already deleted from the database.
if obj.pk is None:
continue
if form in forms_to_delete:
self.deleted_objects.append(obj)
self.delete_existing(obj, commit=commit)
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, ForeignKey, OneToOneField
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return (
(not pk.editable)
or (pk.auto_created or isinstance(pk, AutoField))
or (
pk.remote_field
and pk.remote_field.parent_link
and pk_is_not_editable(pk.remote_field.model._meta.pk)
)
)
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, (ForeignKey, OneToOneField)):
qs = pk.remote_field.model._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(
qs, initial=pk_value, required=False, widget=widget
)
super().add_fields(form, index)
def modelformset_factory(
model,
form=ModelForm,
formfield_callback=None,
formset=BaseModelFormSet,
extra=1,
can_delete=False,
can_order=False,
max_num=None,
fields=None,
exclude=None,
widgets=None,
validate_max=False,
localized_fields=None,
labels=None,
help_texts=None,
error_messages=None,
min_num=None,
validate_min=False,
field_classes=None,
absolute_max=None,
can_delete_extra=True,
renderer=None,
edit_only=False,
):
"""Return a FormSet class for the given Django model class."""
meta = getattr(form, "Meta", None)
if (
getattr(meta, "fields", fields) is None
and getattr(meta, "exclude", exclude) is None
):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(
model,
form=form,
fields=fields,
exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets,
localized_fields=localized_fields,
labels=labels,
help_texts=help_texts,
error_messages=error_messages,
field_classes=field_classes,
)
FormSet = formset_factory(
form,
formset,
extra=extra,
min_num=min_num,
max_num=max_num,
can_order=can_order,
can_delete=can_delete,
validate_min=validate_min,
validate_max=validate_max,
absolute_max=absolute_max,
can_delete_extra=can_delete_extra,
renderer=renderer,
)
FormSet.model = model
FormSet.edit_only = edit_only
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(
self,
data=None,
files=None,
instance=None,
save_as_new=False,
prefix=None,
queryset=None,
**kwargs,
):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
self.unique_fields = {self.fk.name}
super().__init__(data, files, prefix=prefix, queryset=qs, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if self.form._meta.fields and self.fk.name not in self.form._meta.fields:
if isinstance(self.form._meta.fields, tuple):
self.form._meta.fields = list(self.form._meta.fields)
self.form._meta.fields.append(self.fk.name)
def initial_form_count(self):
if self.save_as_new:
return 0
return super().initial_form_count()
def _construct_form(self, i, **kwargs):
form = super()._construct_form(i, **kwargs)
if self.save_as_new:
mutable = getattr(form.data, "_mutable", None)
# Allow modifying an immutable QueryDict.
if mutable is not None:
form.data._mutable = True
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
if mutable is not None:
form.data._mutable = mutable
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
fk_value = getattr(self.instance, self.fk.remote_field.field_name)
fk_value = getattr(fk_value, "pk", fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.remote_field.get_accessor_name(model=cls.model).replace("+", "")
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
return super().save_new(form, commit=commit)
def add_fields(self, form, index):
super().add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {"pk_field": True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
"label": getattr(
form.fields.get(name), "label", capfirst(self.fk.verbose_name)
)
}
# The InlineForeignKeyField assumes that the foreign key relation is
# based on the parent model's pk. If this isn't the case, set to_field
# to correctly resolve the initial form value.
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
kwargs["to_field"] = self.fk.remote_field.field_name
# If we're adding a new object, ignore a parent's auto-generated key
# as it will be regenerated on the save request.
if self.instance._state.adding:
if kwargs.get("to_field") is not None:
to_field = self.instance._meta.get_field(kwargs["to_field"])
else:
to_field = self.instance._meta.pk
if to_field.has_default():
setattr(self.instance, to_field.attname, None)
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super().get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Find and return the ForeignKey from model to parent if there is one
(return None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, raise an exception if there isn't a ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
parent_list = parent_model._meta.get_parent_list()
if (
not isinstance(fk, ForeignKey)
or (
# ForeignKey to proxy models.
fk.remote_field.model._meta.proxy
and fk.remote_field.model._meta.proxy_for_model not in parent_list
)
or (
# ForeignKey to concrete models.
not fk.remote_field.model._meta.proxy
and fk.remote_field.model != parent_model
and fk.remote_field.model not in parent_list
)
):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s'."
% (fk_name, parent_model._meta.label)
)
elif not fks_to_parent:
raise ValueError(
"'%s' has no field named '%s'." % (model._meta.label, fk_name)
)
else:
# Try to discover what the ForeignKey from model to parent_model is
parent_list = parent_model._meta.get_parent_list()
fks_to_parent = [
f
for f in opts.fields
if isinstance(f, ForeignKey)
and (
f.remote_field.model == parent_model
or f.remote_field.model in parent_list
or (
f.remote_field.model._meta.proxy
and f.remote_field.model._meta.proxy_for_model in parent_list
)
)
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif not fks_to_parent:
if can_fail:
return
raise ValueError(
"'%s' has no ForeignKey to '%s'."
% (
model._meta.label,
parent_model._meta.label,
)
)
else:
raise ValueError(
"'%s' has more than one ForeignKey to '%s'. You must specify "
"a 'fk_name' attribute."
% (
model._meta.label,
parent_model._meta.label,
)
)
return fk
def inlineformset_factory(
parent_model,
model,
form=ModelForm,
formset=BaseInlineFormSet,
fk_name=None,
fields=None,
exclude=None,
extra=3,
can_order=False,
can_delete=True,
max_num=None,
formfield_callback=None,
widgets=None,
validate_max=False,
localized_fields=None,
labels=None,
help_texts=None,
error_messages=None,
min_num=None,
validate_min=False,
field_classes=None,
absolute_max=None,
can_delete_extra=True,
renderer=None,
edit_only=False,
):
"""
Return an ``InlineFormSet`` for the given kwargs.
``fk_name`` must be provided if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
"form": form,
"formfield_callback": formfield_callback,
"formset": formset,
"extra": extra,
"can_delete": can_delete,
"can_order": can_order,
"fields": fields,
"exclude": exclude,
"min_num": min_num,
"max_num": max_num,
"widgets": widgets,
"validate_min": validate_min,
"validate_max": validate_max,
"localized_fields": localized_fields,
"labels": labels,
"help_texts": help_texts,
"error_messages": error_messages,
"field_classes": field_classes,
"absolute_max": absolute_max,
"can_delete_extra": can_delete_extra,
"renderer": renderer,
"edit_only": edit_only,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
"invalid_choice": _("The inline value did not match the parent instance."),
}
def __init__(self, parent_instance, *args, pk_field=False, to_field=None, **kwargs):
self.parent_instance = parent_instance
self.pk_field = pk_field
self.to_field = to_field
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super().__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if str(value) != str(orig):
raise ValidationError(
self.error_messages["invalid_choice"], code="invalid_choice"
)
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIteratorValue:
def __init__(self, value, instance):
self.value = value
self.instance = instance
def __str__(self):
return str(self.value)
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if isinstance(other, ModelChoiceIteratorValue):
other = other.value
return self.value == other
class ModelChoiceIterator:
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
queryset = self.queryset
# Can't use iterator() when queryset uses prefetch_related()
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for obj in queryset:
yield self.choice(obj)
def __len__(self):
# count() adds a query but uses less memory since the QuerySet results
# won't be cached. In most cases, the choices will only be iterated on,
# and __len__() won't be called.
return self.queryset.count() + (1 if self.field.empty_label is not None else 0)
def __bool__(self):
return self.field.empty_label is not None or self.queryset.exists()
def choice(self, obj):
return (
ModelChoiceIteratorValue(self.field.prepare_value(obj), obj),
self.field.label_from_instance(obj),
)
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
"invalid_choice": _(
"Select a valid choice. That choice is not one of the available choices."
),
}
iterator = ModelChoiceIterator
def __init__(
self,
queryset,
*,
empty_label="---------",
required=True,
widget=None,
label=None,
initial=None,
help_text="",
to_field_name=None,
limit_choices_to=None,
blank=False,
**kwargs,
):
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(
self,
required=required,
widget=widget,
label=label,
initial=initial,
help_text=help_text,
**kwargs,
)
if (required and initial is not None) or (
isinstance(self.widget, RadioSelect) and not blank
):
self.empty_label = None
else:
self.empty_label = empty_label
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Return ``limit_choices_to`` for this form field.
If it is a callable, invoke it and return the result.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
if self.queryset is not None:
result.queryset = self.queryset.all()
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = None if queryset is None else queryset.all()
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
Convert objects into strings and generate the labels for the choices
presented by this object. Subclasses can override this method to
customize the display of the choices.
"""
return str(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, "_choices"):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return self.iterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, "_meta"):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super().prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or "pk"
if isinstance(value, self.queryset.model):
value = getattr(value, key)
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": value},
)
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
if self.disabled:
return False
initial_value = initial if initial is not None else ""
data_value = data if data is not None else ""
return str(self.prepare_value(initial_value)) != str(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
"invalid_list": _("Enter a list of values."),
"invalid_choice": _(
"Select a valid choice. %(value)s is not one of the available choices."
),
"invalid_pk_value": _("“%(pk)s” is not a valid value."),
}
def __init__(self, queryset, **kwargs):
super().__init__(queryset, empty_label=None, **kwargs)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
value = self.prepare_value(value)
if self.required and not value:
raise ValidationError(self.error_messages["required"], code="required")
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(
self.error_messages["invalid_list"],
code="invalid_list",
)
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, return a QuerySet of the
corresponding objects. Raise a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or "pk"
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages["invalid_list"],
code="invalid_list",
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages["invalid_pk_value"],
code="invalid_pk_value",
params={"pk": pk},
)
qs = self.queryset.filter(**{"%s__in" % key: value})
pks = {str(getattr(o, key)) for o in qs}
for val in value:
if str(val) not in pks:
raise ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": val},
)
return qs
def prepare_value(self, value):
if (
hasattr(value, "__iter__")
and not isinstance(value, str)
and not hasattr(value, "_meta")
):
prepare_value = super().prepare_value
return [prepare_value(v) for v in value]
return super().prepare_value(value)
def has_changed(self, initial, data):
if self.disabled:
return False
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = {str(value) for value in self.prepare_value(initial)}
data_set = {str(value) for value in data}
return data_set != initial_set
def modelform_defines_fields(form_class):
return hasattr(form_class, "_meta") and (
form_class._meta.fields is not None or form_class._meta.exclude is not None
)
|
008763573d5845f3f0085b815e08712d34f232104894fe9322ba83465e940e2d | import copy
import inspect
import warnings
from functools import partialmethod
from itertools import chain
import django
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS,
FieldDoesNotExist,
FieldError,
MultipleObjectsReturned,
ObjectDoesNotExist,
ValidationError,
)
from django.db import (
DJANGO_VERSION_PICKLE_KEY,
DatabaseError,
connection,
connections,
router,
transaction,
)
from django.db.models import NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value
from django.db.models.constants import LOOKUP_SEP
from django.db.models.constraints import CheckConstraint, UniqueConstraint
from django.db.models.deletion import CASCADE, Collector
from django.db.models.fields.related import (
ForeignObjectRel,
OneToOneField,
lazy_related_operation,
resolve_relation,
)
from django.db.models.functions import Coalesce
from django.db.models.manager import Manager
from django.db.models.options import Options
from django.db.models.query import F, Q
from django.db.models.signals import (
class_prepared,
post_init,
post_save,
pre_init,
pre_save,
)
from django.db.models.utils import make_model_tuple
from django.utils.encoding import force_str
from django.utils.hashable import make_hashable
from django.utils.text import capfirst, get_text_list
from django.utils.translation import gettext_lazy as _
class Deferred:
def __repr__(self):
return "<Deferred field>"
def __str__(self):
return "<Deferred field>"
DEFERRED = Deferred()
def subclass_exception(name, bases, module, attached_to):
"""
Create exception subclass. Used by ModelBase below.
The exception is created in a way that allows it to be pickled, assuming
that the returned exception class will be added as an attribute to the
'attached_to' class.
"""
return type(
name,
bases,
{
"__module__": module,
"__qualname__": "%s.%s" % (attached_to.__qualname__, name),
},
)
def _has_contribute_to_class(value):
# Only call contribute_to_class() if it's bound.
return not inspect.isclass(value) and hasattr(value, "contribute_to_class")
class ModelBase(type):
"""Metaclass for all models."""
def __new__(cls, name, bases, attrs, **kwargs):
super_new = super().__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop("__module__")
new_attrs = {"__module__": module}
classcell = attrs.pop("__classcell__", None)
if classcell is not None:
new_attrs["__classcell__"] = classcell
attr_meta = attrs.pop("Meta", None)
# Pass all attrs without a (Django-specific) contribute_to_class()
# method to type.__new__() so that they're properly initialized
# (i.e. __set_name__()).
contributable_attrs = {}
for obj_name, obj in attrs.items():
if _has_contribute_to_class(obj):
contributable_attrs[obj_name] = obj
else:
new_attrs[obj_name] = obj
new_class = super_new(cls, name, bases, new_attrs, **kwargs)
abstract = getattr(attr_meta, "abstract", False)
meta = attr_meta or getattr(new_class, "Meta", None)
base_meta = getattr(new_class, "_meta", None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, "app_label", None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
new_class.add_to_class("_meta", Options(meta, app_label))
if not abstract:
new_class.add_to_class(
"DoesNotExist",
subclass_exception(
"DoesNotExist",
tuple(
x.DoesNotExist
for x in parents
if hasattr(x, "_meta") and not x._meta.abstract
)
or (ObjectDoesNotExist,),
module,
attached_to=new_class,
),
)
new_class.add_to_class(
"MultipleObjectsReturned",
subclass_exception(
"MultipleObjectsReturned",
tuple(
x.MultipleObjectsReturned
for x in parents
if hasattr(x, "_meta") and not x._meta.abstract
)
or (MultipleObjectsReturned,),
module,
attached_to=new_class,
),
)
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, "ordering"):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, "get_latest_by"):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError(
"%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)
)
# Add remaining attributes (those with a contribute_to_class() method)
# to the class.
for obj_name, obj in contributable_attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.private_fields,
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, "_meta")]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is None:
base = parent
elif parent._meta.concrete_model is not base._meta.concrete_model:
raise TypeError(
"Proxy model '%s' has more than one non-abstract model base "
"class." % name
)
if base is None:
raise TypeError(
"Proxy model '%s' has no non-abstract model base class." % name
)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, "_meta"):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField) and field.remote_field.parent_link:
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Track fields inherited from base models.
inherited_attributes = set()
# Do the appropriate setup for any model parents.
for base in new_class.mro():
if base not in parents or not hasattr(base, "_meta"):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
inherited_attributes.update(base.__dict__)
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
if not base._meta.abstract:
# Check for clashes between locally declared fields and those
# on the base classes.
for field in parent_fields:
if field.name in field_names:
raise FieldError(
"Local field %r in class %r clashes with field of "
"the same name from base class %r."
% (
field.name,
name,
base.__name__,
)
)
else:
inherited_attributes.add(field.name)
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = "%s_ptr" % base._meta.model_name
field = OneToOneField(
base,
on_delete=CASCADE,
name=attr_name,
auto_created=True,
parent_link=True,
)
if attr_name in field_names:
raise FieldError(
"Auto-generated field '%s' in class %r for "
"parent_link to base class %r clashes with "
"declared field of the same name."
% (
attr_name,
name,
base.__name__,
)
)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
base_parents = base._meta.parents.copy()
# Add fields from abstract base class if it wasn't overridden.
for field in parent_fields:
if (
field.name not in field_names
and field.name not in new_class.__dict__
and field.name not in inherited_attributes
):
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Replace parent links defined on this base by the new
# field. It will be appropriately resolved if required.
if field.one_to_one:
for parent, parent_link in base_parents.items():
if field == parent_link:
base_parents[parent] = new_field
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base_parents)
# Inherit private fields (like GenericForeignKey) from the parent
# class
for field in base._meta.private_fields:
if field.name in field_names:
if not base._meta.abstract:
raise FieldError(
"Local field %r in class %r clashes with field of "
"the same name from base class %r."
% (
field.name,
name,
base.__name__,
)
)
else:
field = copy.deepcopy(field)
if not base._meta.abstract:
field.mti_inherited = True
new_class.add_to_class(field.name, field)
# Copy indexes so that index names are unique when models extend an
# abstract model.
new_class._meta.indexes = [
copy.deepcopy(idx) for idx in new_class._meta.indexes
]
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def add_to_class(cls, name, value):
if _has_contribute_to_class(value):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""Create some methods once self._meta has been populated."""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = partialmethod(
cls._get_next_or_previous_in_order, is_next=True
)
cls.get_previous_in_order = partialmethod(
cls._get_next_or_previous_in_order, is_next=False
)
# Defer creating accessors on the foreign class until it has been
# created and registered. If remote_field is None, we're ordering
# with respect to a GenericForeignKey and don't know what the
# foreign class is - we'll add those accessors later in
# contribute_to_class().
if opts.order_with_respect_to.remote_field:
wrt = opts.order_with_respect_to
remote = wrt.remote_field.model
lazy_related_operation(make_foreign_order_accessors, cls, remote)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (
cls.__name__,
", ".join(f.name for f in opts.fields),
)
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(
opts.label_lower
)
if get_absolute_url_override:
setattr(cls, "get_absolute_url", get_absolute_url_override)
if not opts.managers:
if any(f.name == "objects" for f in opts.fields):
raise ValueError(
"Model %s must specify a custom Manager, because it has a "
"field named 'objects'." % cls.__name__
)
manager = Manager()
manager.auto_created = True
cls.add_to_class("objects", manager)
# Set the name of _meta.indexes. This can't be done in
# Options.contribute_to_class() because fields haven't been added to
# the model at that point.
for index in cls._meta.indexes:
if not index.name:
index.set_name_with_model(cls)
class_prepared.send(sender=cls)
@property
def _base_manager(cls):
return cls._meta.base_manager
@property
def _default_manager(cls):
return cls._meta.default_manager
class ModelStateCacheDescriptor:
"""
Upon first access, replace itself with an empty dictionary on the instance.
"""
def __set_name__(self, owner, name):
self.attribute_name = name
def __get__(self, instance, cls=None):
if instance is None:
return self
res = instance.__dict__[self.attribute_name] = {}
return res
class ModelState:
"""Store model instance state."""
db = None
# If true, uniqueness validation checks will consider this a new, unsaved
# object. Necessary for correct validation of new instances of objects with
# explicit (non-auto) PKs. This impacts validation only; it has no effect
# on the actual save.
adding = True
fields_cache = ModelStateCacheDescriptor()
related_managers_cache = ModelStateCacheDescriptor()
def __getstate__(self):
state = self.__dict__.copy()
if "fields_cache" in state:
state["fields_cache"] = self.fields_cache.copy()
# Manager instances stored in related_managers_cache won't necessarily
# be deserializable if they were dynamically created via an inner
# scope, e.g. create_forward_many_to_many_manager() and
# create_generic_related_manager().
if "related_managers_cache" in state:
state["related_managers_cache"] = {}
return state
class Model(metaclass=ModelBase):
def __init__(self, *args, **kwargs):
# Alias some things as locals to avoid repeat global lookups
cls = self.__class__
opts = self._meta
_setattr = setattr
_DEFERRED = DEFERRED
if opts.abstract:
raise TypeError("Abstract models cannot be instantiated.")
pre_init.send(sender=cls, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
if len(args) > len(opts.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(opts.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(opts.fields)
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
if kwargs.pop(field.name, NOT_PROVIDED) is not NOT_PROVIDED:
raise TypeError(
f"{cls.__qualname__}() got both positional and "
f"keyword arguments for field '{field.name}'."
)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# Virtual field
if field.attname not in kwargs and field.column is None:
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
if rel_obj is not _DEFERRED:
_setattr(self, field.name, rel_obj)
else:
if val is not _DEFERRED:
_setattr(self, field.attname, val)
if kwargs:
property_names = opts._property_names
unexpected = ()
for prop, value in kwargs.items():
# Any remaining kwargs must correspond to properties or virtual
# fields.
if prop in property_names:
if value is not _DEFERRED:
_setattr(self, prop, value)
else:
try:
opts.get_field(prop)
except FieldDoesNotExist:
unexpected += (prop,)
else:
if value is not _DEFERRED:
_setattr(self, prop, value)
if unexpected:
unexpected_names = ", ".join(repr(n) for n in unexpected)
raise TypeError(
f"{cls.__name__}() got unexpected keyword arguments: "
f"{unexpected_names}"
)
super().__init__()
post_init.send(sender=cls, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if len(values) != len(cls._meta.concrete_fields):
values_iter = iter(values)
values = [
next(values_iter) if f.attname in field_names else DEFERRED
for f in cls._meta.concrete_fields
]
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def __str__(self):
return "%s object (%s)" % (self.__class__.__name__, self.pk)
def __eq__(self, other):
if not isinstance(other, Model):
return NotImplemented
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self.pk
if my_pk is None:
return self is other
return my_pk == other.pk
def __hash__(self):
if self.pk is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self.pk)
def __reduce__(self):
data = self.__getstate__()
data[DJANGO_VERSION_PICKLE_KEY] = django.__version__
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id,), data
def __getstate__(self):
"""Hook to allow choosing the attributes to pickle."""
state = self.__dict__.copy()
state["_state"] = copy.copy(state["_state"])
# memoryview cannot be pickled, so cast it to bytes and store
# separately.
_memoryview_attrs = []
for attr, value in state.items():
if isinstance(value, memoryview):
_memoryview_attrs.append((attr, bytes(value)))
if _memoryview_attrs:
state["_memoryview_attrs"] = _memoryview_attrs
for attr, value in _memoryview_attrs:
state.pop(attr)
return state
def __setstate__(self, state):
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
if pickled_version != django.__version__:
warnings.warn(
"Pickled model instance's Django version %s does not "
"match the current version %s."
% (pickled_version, django.__version__),
RuntimeWarning,
stacklevel=2,
)
else:
warnings.warn(
"Pickled model instance's Django version is not specified.",
RuntimeWarning,
stacklevel=2,
)
if "_memoryview_attrs" in state:
for attr, value in state.pop("_memoryview_attrs"):
state[attr] = memoryview(value)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
meta = meta or self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
for parent_link in self._meta.parents.values():
if parent_link and parent_link != self._meta.pk:
setattr(self, parent_link.target_field.attname, value)
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Return a set containing names of deferred fields for this instance.
"""
return {
f.attname
for f in self._meta.concrete_fields
if f.attname not in self.__dict__
}
def refresh_from_db(self, using=None, fields=None):
"""
Reload field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is None:
self._prefetched_objects_cache = {}
else:
prefetched_objects_cache = getattr(self, "_prefetched_objects_cache", ())
for field in fields:
if field in prefetched_objects_cache:
del prefetched_objects_cache[field]
fields.remove(field)
if not fields:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
"are not allowed in fields." % LOOKUP_SEP
)
hints = {"instance": self}
db_instance_qs = self.__class__._base_manager.db_manager(
using, hints=hints
).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
deferred_fields = self.get_deferred_fields()
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif deferred_fields:
fields = [
f.attname
for f in self._meta.concrete_fields
if f.attname not in deferred_fields
]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Clear cached foreign keys.
if field.is_relation and field.is_cached(self):
field.delete_cached_value(self)
# Clear cached relations.
for field in self._meta.related_objects:
if field.is_cached(self):
field.delete_cached_value(self)
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Return the value of the field name for this instance. If the field is
a foreign key, return the id value instead of the object. If there's
no Field object with this name on the model, return the model
attribute's value.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(
self, force_insert=False, force_update=False, using=None, update_fields=None
):
"""
Save the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
self._prepare_related_fields_for_save(operation_name="save")
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
deferred_fields = self.get_deferred_fields()
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if not update_fields:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError(
"The following fields do not exist in this model, are m2m "
"fields, or are non-concrete fields: %s"
% ", ".join(non_model_fields)
)
# If saving to the same database, and this model is deferred, then
# automatically do an "update_fields" save on the loaded fields.
elif not force_insert and deferred_fields and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, "through"):
field_names.add(field.attname)
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(
using=using,
force_insert=force_insert,
force_update=force_update,
update_fields=update_fields,
)
save.alters_data = True
def save_base(
self,
raw=False,
force_insert=False,
force_update=False,
using=None,
update_fields=None,
):
"""
Handle the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or update_fields
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
pre_save.send(
sender=origin,
instance=self,
raw=raw,
using=using,
update_fields=update_fields,
)
# A transaction isn't needed if one query is issued.
if meta.parents:
context_manager = transaction.atomic(using=using, savepoint=False)
else:
context_manager = transaction.mark_for_rollback_on_error(using=using)
with context_manager:
parent_inserted = False
if not raw:
parent_inserted = self._save_parents(cls, using, update_fields)
updated = self._save_table(
raw,
cls,
force_insert or parent_inserted,
force_update,
using,
update_fields,
)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
post_save.send(
sender=origin,
instance=self,
created=(not updated),
update_fields=update_fields,
raw=raw,
using=using,
)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""Save all the parents of cls using values from self."""
meta = cls._meta
inserted = False
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (
field
and getattr(self, parent._meta.pk.attname) is None
and getattr(self, field.attname) is not None
):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
parent_inserted = self._save_parents(
cls=parent, using=using, update_fields=update_fields
)
updated = self._save_table(
cls=parent,
using=using,
update_fields=update_fields,
force_insert=parent_inserted,
)
if not updated:
inserted = True
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
if field.is_cached(self):
field.delete_cached_value(self)
return inserted
def _save_table(
self,
raw=False,
cls=None,
force_insert=False,
force_update=False,
using=None,
update_fields=None,
):
"""
Do the heavy-lifting involved in saving. Update or insert the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [
f
for f in non_pks
if f.name in update_fields or f.attname in update_fields
]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# Skip an UPDATE when adding an instance and primary key has a default.
if (
not raw
and not force_insert
and self._state.adding
and meta.pk.default
and meta.pk.default is not NOT_PROVIDED
):
force_insert = True
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [
(
f,
None,
(getattr(self, f.attname) if raw else f.pre_save(self, False)),
)
for f in non_pks
]
forced_update = update_fields or force_update
updated = self._do_update(
base_qs, using, pk_val, values, update_fields, forced_update
)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
filter_args = field.get_filter_kwargs_for_object(self)
self._order = (
cls._base_manager.using(using)
.filter(**filter_args)
.aggregate(
_order__max=Coalesce(
ExpressionWrapper(
Max("_order") + Value(1), output_field=IntegerField()
),
Value(0),
),
)["_order__max"]
)
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if f is not meta.auto_field]
returning_fields = meta.db_returning_fields
results = self._do_insert(
cls._base_manager, using, fields, returning_fields, raw
)
if results:
for value, field in zip(results[0], returning_fields):
setattr(self, field.attname, value)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
Try to update the model. Return True if the model was updated (if an
update query was done and a matching row was found in the DB).
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
return (
filtered.exists()
and
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
(filtered._update(values) > 0 or filtered.exists())
)
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, returning_fields, raw):
"""
Do an INSERT. If returning_fields is defined then this method should
return the newly created data for the model.
"""
return manager._insert(
[self],
fields=fields,
returning_fields=returning_fields,
using=using,
raw=raw,
)
def _prepare_related_fields_for_save(self, operation_name, fields=None):
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save would result in silent data loss.
for field in self._meta.concrete_fields:
if fields and field not in fields:
continue
# If the related field isn't cached, then an instance hasn't been
# assigned and there's no need to worry about this check.
if field.is_relation and field.is_cached(self):
obj = getattr(self, field.name, None)
if not obj:
continue
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj.pk is None:
# Remove the object from a related instance cache.
if not field.remote_field.multiple:
field.remote_field.delete_cached_value(obj)
raise ValueError(
"%s() prohibited to prevent data loss due to unsaved "
"related object '%s'." % (operation_name, field.name)
)
elif getattr(self, field.attname) in field.empty_values:
# Use pk from related object if it has been saved after
# an assignment.
setattr(self, field.attname, obj.pk)
# If the relationship's pk/to_field was changed, clear the
# cached relationship.
if getattr(obj, field.target_field.attname) != getattr(
self, field.attname
):
field.delete_cached_value(self)
def delete(self, using=None, keep_parents=False):
if self.pk is None:
raise ValueError(
"%s object can't be deleted because its %s attribute is set "
"to None." % (self._meta.object_name, self._meta.pk.attname)
)
using = using or router.db_for_write(self.__class__, instance=self)
collector = Collector(using=using, origin=self)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
choices_dict = dict(make_hashable(field.flatchoices))
# force_str() to coerce lazy strings.
return force_str(
choices_dict.get(make_hashable(value), value), strings_only=True
)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = "gt" if is_next else "lt"
order = "" if is_next else "-"
param = getattr(self, field.attname)
q = Q((field.name, param), (f"pk__{op}", self.pk), _connector=Q.AND)
q = Q(q, (f"{field.name}__{op}", param), _connector=Q.OR)
qs = (
self.__class__._default_manager.using(self._state.db)
.filter(**kwargs)
.filter(q)
.order_by("%s%s" % (order, field.name), "%spk" % order)
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist(
"%s matching query does not exist." % self.__class__._meta.object_name
)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = "gt" if is_next else "lt"
order = "_order" if is_next else "-_order"
order_field = self._meta.order_with_respect_to
filter_args = order_field.get_filter_kwargs_for_object(self)
obj = (
self.__class__._default_manager.filter(**filter_args)
.filter(
**{
"_order__%s"
% op: self.__class__._default_manager.values("_order").filter(
**{self._meta.pk.name: self.pk}
)
}
)
.order_by(order)[:1]
.get()
)
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError(
"Unsaved model instance %r cannot be used in an ORM query." % self
)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Check unique constraints on the model and raise ValidationError if any
failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Return a list of checks to perform. Since validate_unique() could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check. Fields that did not validate should also be excluded,
but they need to be passed in via the exclude argument.
"""
if exclude is None:
exclude = set()
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
constraints = [(self.__class__, self._meta.total_unique_constraints)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append(
(parent_class, parent_class._meta.unique_together)
)
if parent_class._meta.total_unique_constraints:
constraints.append(
(parent_class, parent_class._meta.total_unique_constraints)
)
for model_class, unique_together in unique_togethers:
for check in unique_together:
if not any(name in exclude for name in check):
# Add the check if the field isn't excluded.
unique_checks.append((model_class, tuple(check)))
for model_class, model_constraints in constraints:
for constraint in model_constraints:
if not any(name in exclude for name in constraint.fields):
unique_checks.append((model_class, constraint.fields))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, "date", name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, "year", name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, "month", name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
# TODO: Handle multiple backends with different feature flags.
if lookup_value is None or (
lookup_value == ""
and connection.features.interprets_empty_strings_as_nulls
):
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(
self.unique_error_message(model_class, unique_check)
)
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == "date":
lookup_kwargs["%s__day" % unique_for] = date.day
lookup_kwargs["%s__month" % unique_for] = date.month
lookup_kwargs["%s__year" % unique_for] = date.year
else:
lookup_kwargs["%s__%s" % (unique_for, lookup_type)] = getattr(
date, lookup_type
)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages["unique_for_date"],
code="unique_for_date",
params={
"model": self,
"model_name": capfirst(opts.verbose_name),
"lookup_type": lookup_type,
"field": field_name,
"field_label": capfirst(field.verbose_name),
"date_field": unique_for,
"date_field_label": capfirst(opts.get_field(unique_for).verbose_name),
},
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
"model": self,
"model_class": model_class,
"model_name": capfirst(opts.verbose_name),
"unique_check": unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params["field_label"] = capfirst(field.verbose_name)
return ValidationError(
message=field.error_messages["unique"],
code="unique",
params=params,
)
# unique_together
else:
field_labels = [
capfirst(opts.get_field(f).verbose_name) for f in unique_check
]
params["field_labels"] = get_text_list(field_labels, _("and"))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code="unique_together",
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Call clean_fields(), clean(), and validate_unique() on the model.
Raise a ValidationError for any errors that occur.
"""
errors = {}
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors:
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.add(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Clean all fields and raise a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = set()
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = [
*cls._check_swappable(),
*cls._check_model(),
*cls._check_managers(**kwargs),
]
if not cls._meta.swapped:
databases = kwargs.get("databases") or []
errors += [
*cls._check_fields(**kwargs),
*cls._check_m2m_through_same_relationship(),
*cls._check_long_column_names(databases),
]
clash_errors = (
*cls._check_id_field(),
*cls._check_field_name_clashes(),
*cls._check_model_name_db_lookup_clashes(),
*cls._check_property_name_related_field_accessor_clashes(),
*cls._check_single_primary_key(),
)
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors += [
*cls._check_index_together(),
*cls._check_unique_together(),
*cls._check_indexes(databases),
*cls._check_ordering(),
*cls._check_constraints(databases),
*cls._check_default_pk(),
]
return errors
@classmethod
def _check_default_pk(cls):
if (
not cls._meta.abstract
and cls._meta.pk.auto_created
and
# Inherited PKs are checked in parents models.
not (
isinstance(cls._meta.pk, OneToOneField)
and cls._meta.pk.remote_field.parent_link
)
and not settings.is_overridden("DEFAULT_AUTO_FIELD")
and cls._meta.app_config
and not cls._meta.app_config._is_default_auto_field_overridden
):
return [
checks.Warning(
f"Auto-created primary key used when not defining a "
f"primary key type, by default "
f"'{settings.DEFAULT_AUTO_FIELD}'.",
hint=(
f"Configure the DEFAULT_AUTO_FIELD setting or the "
f"{cls._meta.app_config.__class__.__qualname__}."
f"default_auto_field attribute to point to a subclass "
f"of AutoField, e.g. 'django.db.models.BigAutoField'."
),
obj=cls,
id="models.W042",
),
]
return []
@classmethod
def _check_swappable(cls):
"""Check if the swapped model exists."""
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'."
% cls._meta.swappable,
id="models.E001",
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split(".")
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract."
% (cls._meta.swappable, app_label, model_name),
id="models.E002",
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
id="models.E017",
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
"""Perform all manager checks."""
errors = []
for manager in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
"""Perform all field checks."""
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
"""Check if no relationship model is used by more than one m2m field."""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (
f.remote_field.model,
cls,
f.remote_field.through,
f.remote_field.through_fields,
)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two identical many-to-many relations "
"through the intermediate model '%s'."
% f.remote_field.through._meta.label,
obj=cls,
id="models.E003",
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
"""Check if `id` field is a primary key."""
fields = [
f for f in cls._meta.local_fields if f.name == "id" and f != cls._meta.pk
]
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == "id":
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
obj=cls,
id="models.E004",
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
"""Forbid field shadowing in multi-table inheritance."""
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'."
% (clash.name, clash.model._meta, f.name, f.model._meta),
obj=cls,
id="models.E005",
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents, including auto-generated fields like multi-table inheritance
# child accessors.
for parent in cls._meta.get_parent_list():
for f in parent._meta.get_fields():
if f not in used_fields:
used_fields[f.name] = f
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = (
f.name == "id" and clash and clash.name == "id" and clash.model == cls
)
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (f.name, clash.name, clash.model._meta),
obj=f,
id="models.E006",
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id="models.E007",
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_model_name_db_lookup_clashes(cls):
errors = []
model_name = cls.__name__
if model_name.startswith("_") or model_name.endswith("_"):
errors.append(
checks.Error(
"The model name '%s' cannot start or end with an underscore "
"as it collides with the query lookup syntax." % model_name,
obj=cls,
id="models.E023",
)
)
elif LOOKUP_SEP in model_name:
errors.append(
checks.Error(
"The model name '%s' cannot contain double underscores as "
"it collides with the query lookup syntax." % model_name,
obj=cls,
id="models.E024",
)
)
return errors
@classmethod
def _check_property_name_related_field_accessor_clashes(cls):
errors = []
property_names = cls._meta._property_names
related_field_accessors = (
f.get_attname()
for f in cls._meta._get_fields(reverse=False)
if f.is_relation and f.related_model is not None
)
for accessor in related_field_accessors:
if accessor in property_names:
errors.append(
checks.Error(
"The property '%s' clashes with a related field "
"accessor." % accessor,
obj=cls,
id="models.E025",
)
)
return errors
@classmethod
def _check_single_primary_key(cls):
errors = []
if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1:
errors.append(
checks.Error(
"The model cannot have more than one field with "
"'primary_key=True'.",
obj=cls,
id="models.E026",
)
)
return errors
@classmethod
def _check_index_together(cls):
"""Check the value of "index_together" option."""
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
obj=cls,
id="models.E008",
)
]
elif any(
not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together
):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
obj=cls,
id="models.E009",
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
"""Check the value of "unique_together" option."""
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
obj=cls,
id="models.E010",
)
]
elif any(
not isinstance(fields, (tuple, list))
for fields in cls._meta.unique_together
):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
obj=cls,
id="models.E011",
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_indexes(cls, databases):
"""Check fields, names, and conditions of indexes."""
errors = []
references = set()
for index in cls._meta.indexes:
# Index name can't start with an underscore or a number, restricted
# for cross-database compatibility with Oracle.
if index.name[0] == "_" or index.name[0].isdigit():
errors.append(
checks.Error(
"The index name '%s' cannot start with an underscore "
"or a number." % index.name,
obj=cls,
id="models.E033",
),
)
if len(index.name) > index.max_name_length:
errors.append(
checks.Error(
"The index name '%s' cannot be longer than %d "
"characters." % (index.name, index.max_name_length),
obj=cls,
id="models.E034",
),
)
if index.contains_expressions:
for expression in index.expressions:
references.update(
ref[0] for ref in cls._get_expr_references(expression)
)
for db in databases:
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
if not (
connection.features.supports_partial_indexes
or "supports_partial_indexes" in cls._meta.required_db_features
) and any(index.condition is not None for index in cls._meta.indexes):
errors.append(
checks.Warning(
"%s does not support indexes with conditions."
% connection.display_name,
hint=(
"Conditions will be ignored. Silence this warning "
"if you don't care about it."
),
obj=cls,
id="models.W037",
)
)
if not (
connection.features.supports_covering_indexes
or "supports_covering_indexes" in cls._meta.required_db_features
) and any(index.include for index in cls._meta.indexes):
errors.append(
checks.Warning(
"%s does not support indexes with non-key columns."
% connection.display_name,
hint=(
"Non-key columns will be ignored. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W040",
)
)
if not (
connection.features.supports_expression_indexes
or "supports_expression_indexes" in cls._meta.required_db_features
) and any(index.contains_expressions for index in cls._meta.indexes):
errors.append(
checks.Warning(
"%s does not support indexes on expressions."
% connection.display_name,
hint=(
"An index won't be created. Silence this warning "
"if you don't care about it."
),
obj=cls,
id="models.W043",
)
)
fields = [
field for index in cls._meta.indexes for field, _ in index.fields_orders
]
fields += [include for index in cls._meta.indexes for include in index.include]
fields += references
errors.extend(cls._check_local_fields(fields, "indexes"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {}
for field in cls._meta._get_fields(reverse=False):
forward_fields_map[field.name] = field
if hasattr(field, "attname"):
forward_fields_map[field.attname] = field
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the nonexistent field '%s'."
% (
option,
field_name,
),
obj=cls,
id="models.E012",
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'."
% (
option,
field_name,
option,
),
obj=cls,
id="models.E013",
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
"'%s' refers to field '%s' which is not local to model "
"'%s'." % (option, field_name, cls._meta.object_name),
hint="This issue may be caused by multi-table inheritance.",
obj=cls,
id="models.E016",
)
)
return errors
@classmethod
def _check_ordering(cls):
"""
Check "ordering" option -- is it a list of strings and do all fields
exist?
"""
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=cls,
id="models.E021",
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
"'ordering' must be a tuple or list (even if you want to order by "
"only one field).",
obj=cls,
id="models.E014",
)
]
errors = []
fields = cls._meta.ordering
# Skip expressions and '?' fields.
fields = (f for f in fields if isinstance(f, str) and f != "?")
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith("-") else f) for f in fields)
# Separate related fields and non-related fields.
_fields = []
related_fields = []
for f in fields:
if LOOKUP_SEP in f:
related_fields.append(f)
else:
_fields.append(f)
fields = _fields
# Check related fields.
for field in related_fields:
_cls = cls
fld = None
for part in field.split(LOOKUP_SEP):
try:
# pk is an alias that won't be found by opts.get_field.
if part == "pk":
fld = _cls._meta.pk
else:
fld = _cls._meta.get_field(part)
if fld.is_relation:
_cls = fld.path_infos[-1].to_opts.model
else:
_cls = None
except (FieldDoesNotExist, AttributeError):
if fld is None or (
fld.get_transform(part) is None and fld.get_lookup(part) is None
):
errors.append(
checks.Error(
"'ordering' refers to the nonexistent field, "
"related field, or lookup '%s'." % field,
obj=cls,
id="models.E015",
)
)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != "pk"}
# Check for invalid or nonexistent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(
chain.from_iterable(
(f.name, f.attname)
if not (f.auto_created and not f.concrete)
else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
)
)
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the nonexistent field, related "
"field, or lookup '%s'." % invalid_field,
obj=cls,
id="models.E015",
)
)
return errors
@classmethod
def _check_long_column_names(cls, databases):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
if not databases:
return []
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in databases:
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if (
f.db_column is None
and column_name is not None
and len(column_name) > allowed_len
):
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id="models.E018",
)
)
for f in cls._meta.local_many_to_many:
# Skip nonexistent models.
if isinstance(f.remote_field.through, str):
continue
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if (
m2m.db_column is None
and rel_name is not None
and len(rel_name) > allowed_len
):
errors.append(
checks.Error(
"Autogenerated column name too long for M2M field "
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=(
"Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."
),
obj=cls,
id="models.E019",
)
)
return errors
@classmethod
def _get_expr_references(cls, expr):
if isinstance(expr, Q):
for child in expr.children:
if isinstance(child, tuple):
lookup, value = child
yield tuple(lookup.split(LOOKUP_SEP))
yield from cls._get_expr_references(value)
else:
yield from cls._get_expr_references(child)
elif isinstance(expr, F):
yield tuple(expr.name.split(LOOKUP_SEP))
elif hasattr(expr, "get_source_expressions"):
for src_expr in expr.get_source_expressions():
yield from cls._get_expr_references(src_expr)
@classmethod
def _check_constraints(cls, databases):
errors = []
for db in databases:
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
if not (
connection.features.supports_table_check_constraints
or "supports_table_check_constraints" in cls._meta.required_db_features
) and any(
isinstance(constraint, CheckConstraint)
for constraint in cls._meta.constraints
):
errors.append(
checks.Warning(
"%s does not support check constraints."
% connection.display_name,
hint=(
"A constraint won't be created. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W027",
)
)
if not (
connection.features.supports_partial_indexes
or "supports_partial_indexes" in cls._meta.required_db_features
) and any(
isinstance(constraint, UniqueConstraint)
and constraint.condition is not None
for constraint in cls._meta.constraints
):
errors.append(
checks.Warning(
"%s does not support unique constraints with "
"conditions." % connection.display_name,
hint=(
"A constraint won't be created. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W036",
)
)
if not (
connection.features.supports_deferrable_unique_constraints
or "supports_deferrable_unique_constraints"
in cls._meta.required_db_features
) and any(
isinstance(constraint, UniqueConstraint)
and constraint.deferrable is not None
for constraint in cls._meta.constraints
):
errors.append(
checks.Warning(
"%s does not support deferrable unique constraints."
% connection.display_name,
hint=(
"A constraint won't be created. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W038",
)
)
if not (
connection.features.supports_covering_indexes
or "supports_covering_indexes" in cls._meta.required_db_features
) and any(
isinstance(constraint, UniqueConstraint) and constraint.include
for constraint in cls._meta.constraints
):
errors.append(
checks.Warning(
"%s does not support unique constraints with non-key "
"columns." % connection.display_name,
hint=(
"A constraint won't be created. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W039",
)
)
if not (
connection.features.supports_expression_indexes
or "supports_expression_indexes" in cls._meta.required_db_features
) and any(
isinstance(constraint, UniqueConstraint)
and constraint.contains_expressions
for constraint in cls._meta.constraints
):
errors.append(
checks.Warning(
"%s does not support unique constraints on "
"expressions." % connection.display_name,
hint=(
"A constraint won't be created. Silence this "
"warning if you don't care about it."
),
obj=cls,
id="models.W044",
)
)
fields = set(
chain.from_iterable(
(*constraint.fields, *constraint.include)
for constraint in cls._meta.constraints
if isinstance(constraint, UniqueConstraint)
)
)
references = set()
for constraint in cls._meta.constraints:
if isinstance(constraint, UniqueConstraint):
if (
connection.features.supports_partial_indexes
or "supports_partial_indexes"
not in cls._meta.required_db_features
) and isinstance(constraint.condition, Q):
references.update(
cls._get_expr_references(constraint.condition)
)
if (
connection.features.supports_expression_indexes
or "supports_expression_indexes"
not in cls._meta.required_db_features
) and constraint.contains_expressions:
for expression in constraint.expressions:
references.update(cls._get_expr_references(expression))
elif isinstance(constraint, CheckConstraint):
if (
connection.features.supports_table_check_constraints
or "supports_table_check_constraints"
not in cls._meta.required_db_features
) and isinstance(constraint.check, Q):
references.update(cls._get_expr_references(constraint.check))
for field_name, *lookups in references:
# pk is an alias that won't be found by opts.get_field.
if field_name != "pk":
fields.add(field_name)
if not lookups:
# If it has no lookups it cannot result in a JOIN.
continue
try:
if field_name == "pk":
field = cls._meta.pk
else:
field = cls._meta.get_field(field_name)
if not field.is_relation or field.many_to_many or field.one_to_many:
continue
except FieldDoesNotExist:
continue
# JOIN must happen at the first lookup.
first_lookup = lookups[0]
if (
hasattr(field, "get_transform")
and hasattr(field, "get_lookup")
and field.get_transform(first_lookup) is None
and field.get_lookup(first_lookup) is None
):
errors.append(
checks.Error(
"'constraints' refers to the joined field '%s'."
% LOOKUP_SEP.join([field_name] + lookups),
obj=cls,
id="models.E041",
)
)
errors.extend(cls._check_local_fields(fields, "constraints"))
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(self, ordered_obj, id_list, using=None):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update(
[ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list)],
["_order"],
)
def method_get_order(self, ordered_obj):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
pk_name = ordered_obj._meta.pk.name
return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True)
def make_foreign_order_accessors(model, related_model):
setattr(
related_model,
"get_%s_order" % model.__name__.lower(),
partialmethod(method_get_order, model),
)
setattr(
related_model,
"set_%s_order" % model.__name__.lower(),
partialmethod(method_set_order, model),
)
########
# MISC #
########
def model_unpickle(model_id):
"""Used to unpickle Model subclasses with deferred fields."""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
return model.__new__(model)
model_unpickle.__safe_for_unpickle__ = True
|
cc7ec5137b292a5fcc399e24e423fd80f1c90408baaa00346e2c40969a33efba | import copy
import datetime
import functools
import inspect
from decimal import Decimal
from uuid import UUID
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q
from django.utils.deconstruct import deconstructible
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class SQLiteNumericMixin:
"""
Some expressions with output_field=DecimalField() must be cast to
numeric to be properly filtered.
"""
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
try:
if self.output_field.get_internal_type() == "DecimalField":
sql = "CAST(%s AS NUMERIC)" % sql
except FieldError:
pass
return sql, params
class Combinable:
"""
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = "+"
SUB = "-"
MUL = "*"
DIV = "/"
POW = "^"
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = "%%"
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = "&"
BITOR = "|"
BITLEFTSHIFT = "<<"
BITRIGHTSHIFT = ">>"
BITXOR = "#"
def _combine(self, other, connector, reversed):
if not hasattr(other, "resolve_expression"):
# everything must be resolvable to an expression
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __neg__(self):
return self._combine(-1, self.MUL, False)
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) & Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __xor__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) ^ Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitxor(self, other):
return self._combine(other, self.BITXOR, False)
def __or__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) | Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __rxor__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
class BaseExpression:
"""Base class for all query expressions."""
empty_result_set_value = NotImplemented
# aggregate specific fields
is_summary = False
_output_field_resolved_to_none = False
# Can the expression be used in a WHERE clause?
filterable = True
# Can the expression can be used as a source expression in Window?
window_compatible = False
def __init__(self, output_field=None):
if output_field is not None:
self.output_field = output_field
def __getstate__(self):
state = self.__dict__.copy()
state.pop("convert_value", None)
return state
def get_db_converters(self, connection):
return (
[]
if self.convert_value is self._convert_value_noop
else [self.convert_value]
) + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert not exprs
def _parse_expressions(self, *expressions):
return [
arg
if hasattr(arg, "resolve_expression")
else (F(arg) if isinstance(arg, str) else Value(arg))
for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
return any(
expr and expr.contains_aggregate for expr in self.get_source_expressions()
)
@cached_property
def contains_over_clause(self):
return any(
expr and expr.contains_over_clause for expr in self.get_source_expressions()
)
@cached_property
def contains_column_references(self):
return any(
expr and expr.contains_column_references
for expr in self.get_source_expressions()
)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
"""
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions(
[
expr.resolve_expression(query, allow_joins, reuse, summarize)
if expr
else None
for expr in c.get_source_expressions()
]
)
return c
@property
def conditional(self):
return isinstance(self.output_field, fields.BooleanField)
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""Return the output type of this expressions."""
output_field = self._resolve_output_field()
if output_field is None:
self._output_field_resolved_to_none = True
raise FieldError("Cannot resolve expression type, unknown output_field")
return output_field
@cached_property
def _output_field_or_none(self):
"""
Return the output field of this expression, or None if
_resolve_output_field() didn't return an output type.
"""
try:
return self.output_field
except FieldError:
if not self._output_field_resolved_to_none:
raise
def _resolve_output_field(self):
"""
Attempt to infer the output type of the expression. If the output
fields of all source fields match then, simply infer the same type
here. This isn't always correct, but it makes sense most of the time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source's output field resolves to None, exclude it from this check.
If all sources are None, then an error is raised higher up the stack in
the output_field property.
"""
sources_iter = (
source for source in self.get_source_fields() if source is not None
)
for output_field in sources_iter:
for source in sources_iter:
if not isinstance(output_field, source.__class__):
raise FieldError(
"Expression contains mixed types: %s, %s. You must "
"set output_field."
% (
output_field.__class__.__name__,
source.__class__.__name__,
)
)
return output_field
@staticmethod
def _convert_value_noop(value, expression, connection):
return value
@cached_property
def convert_value(self):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if internal_type == "FloatField":
return (
lambda value, expression, connection: None
if value is None
else float(value)
)
elif internal_type.endswith("IntegerField"):
return (
lambda value, expression, connection: None
if value is None
else int(value)
)
elif internal_type == "DecimalField":
return (
lambda value, expression, connection: None
if value is None
else Decimal(value)
)
return self._convert_value_noop
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[
e.relabeled_clone(change_map) if e is not None else None
for e in self.get_source_expressions()
]
)
return clone
def copy(self):
return copy.copy(self)
def get_group_by_cols(self, alias=None):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
if hasattr(expr, "flatten"):
yield from expr.flatten()
else:
yield expr
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, EXISTS expressions need
to be wrapped in CASE WHEN on Oracle.
"""
if hasattr(self.output_field, "select_format"):
return self.output_field.select_format(compiler, sql, params)
return sql, params
@deconstructible
class Expression(BaseExpression, Combinable):
"""An expression that can be combined with other expressions."""
@cached_property
def identity(self):
constructor_signature = inspect.signature(self.__init__)
args, kwargs = self._constructor_args
signature = constructor_signature.bind_partial(*args, **kwargs)
signature.apply_defaults()
arguments = signature.arguments.items()
identity = [self.__class__]
for arg, value in arguments:
if isinstance(value, fields.Field):
if value.name and value.model:
value = (value.model._meta.label, value.name)
else:
value = type(value)
else:
value = make_hashable(value)
identity.append((arg, value))
return tuple(identity)
def __eq__(self, other):
if not isinstance(other, Expression):
return NotImplemented
return other.identity == self.identity
def __hash__(self):
return hash(self.identity)
_connector_combinators = {
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
(fields.IntegerField, fields.DecimalField, fields.DecimalField),
(fields.DecimalField, fields.IntegerField, fields.DecimalField),
(fields.IntegerField, fields.FloatField, fields.FloatField),
(fields.FloatField, fields.IntegerField, fields.FloatField),
]
for connector in (Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV)
}
@functools.lru_cache(maxsize=128)
def _resolve_combined_type(connector, lhs_type, rhs_type):
combinators = _connector_combinators.get(connector, ())
for combinator_lhs_type, combinator_rhs_type, combined_type in combinators:
if issubclass(lhs_type, combinator_lhs_type) and issubclass(
rhs_type, combinator_rhs_type
):
return combined_type
class CombinedExpression(SQLiteNumericMixin, Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def _resolve_output_field(self):
try:
return super()._resolve_output_field()
except FieldError:
combined_type = _resolve_combined_type(
self.connector,
type(self.lhs.output_field),
type(self.rhs.output_field),
)
if combined_type is None:
raise
return combined_type()
def as_sql(self, compiler, connection):
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
lhs = self.lhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
rhs = self.rhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
if not isinstance(self, (DurationExpression, TemporalSubtraction)):
try:
lhs_type = lhs.output_field.get_internal_type()
except (AttributeError, FieldError):
lhs_type = None
try:
rhs_type = rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
rhs_type = None
if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type:
return DurationExpression(
self.lhs, self.connector, self.rhs
).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
datetime_fields = {"DateField", "DateTimeField", "TimeField"}
if (
self.connector == self.SUB
and lhs_type in datetime_fields
and lhs_type == rhs_type
):
return TemporalSubtraction(self.lhs, self.rhs).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
c = self.copy()
c.is_summary = summarize
c.lhs = lhs
c.rhs = rhs
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == "DurationField":
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
if self.connector in {Combinable.MUL, Combinable.DIV}:
try:
lhs_type = self.lhs.output_field.get_internal_type()
rhs_type = self.rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
pass
else:
allowed_fields = {
"DecimalField",
"DurationField",
"FloatField",
"IntegerField",
}
if lhs_type not in allowed_fields or rhs_type not in allowed_fields:
raise DatabaseError(
f"Invalid arguments for operator {self.connector}."
)
return sql, params
class TemporalSubtraction(CombinedExpression):
output_field = fields.DurationField()
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs)
rhs = compiler.compile(self.rhs)
return connection.ops.subtract_temporals(
self.lhs.output_field.get_internal_type(), lhs, rhs
)
@deconstructible(path="django.db.models.F")
class F(Combinable):
"""An object capable of resolving references to existing query objects."""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
contains_aggregate = False
def as_sql(self, *args, **kwargs):
raise ValueError(
"This queryset contains a reference to an outer query and may "
"only be used in a subquery."
)
def resolve_expression(self, *args, **kwargs):
col = super().resolve_expression(*args, **kwargs)
# FIXME: Rename possibly_multivalued to multivalued and fix detection
# for non-multivalued JOINs (e.g. foreign key fields). This should take
# into account only many-to-many and one-to-many relationships.
col.possibly_multivalued = LOOKUP_SEP in self.name
return col
def relabeled_clone(self, relabels):
return self
def get_group_by_cols(self, alias=None):
return []
class OuterRef(F):
contains_aggregate = False
def resolve_expression(self, *args, **kwargs):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def relabeled_clone(self, relabels):
return self
@deconstructible(path="django.db.models.Func")
class Func(SQLiteNumericMixin, Expression):
"""An SQL function call."""
function = None
template = "%(function)s(%(expressions)s)"
arg_joiner = ", "
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)"
% (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = {**self.extra, **self._get_repr_options()}
if extra:
extra = ", ".join(
str(key) + "=" + str(val) for key, val in sorted(extra.items())
)
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def _get_repr_options(self):
"""Return a dict of extra __init__() options to include in the repr."""
return {}
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(
self,
compiler,
connection,
function=None,
template=None,
arg_joiner=None,
**extra_context,
):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
try:
arg_sql, arg_params = compiler.compile(arg)
except EmptyResultSet:
empty_result_set_value = getattr(
arg, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
raise
arg_sql, arg_params = compiler.compile(Value(empty_result_set_value))
sql_parts.append(arg_sql)
params.extend(arg_params)
data = {**self.extra, **extra_context}
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data["function"] = function
else:
data.setdefault("function", self.function)
template = template or data.get("template", self.template)
arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner)
data["expressions"] = data["field"] = arg_joiner.join(sql_parts)
return template % data, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
@deconstructible(path="django.db.models.Value")
class Value(SQLiteNumericMixin, Expression):
"""Represent a wrapped value as a node within an expression."""
# Provide a default value for `for_save` in order to allow unresolved
# instances to be compiled until a decision is taken in #25425.
for_save = False
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return f"{self.__class__.__name__}({self.value!r})"
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
output_field = self._output_field_or_none
if output_field is not None:
if self.for_save:
val = output_field.get_db_prep_save(val, connection=connection)
else:
val = output_field.get_db_prep_value(val, connection=connection)
if hasattr(output_field, "get_placeholder"):
return output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return "NULL", []
return "%s", [val]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self, alias=None):
return []
def _resolve_output_field(self):
if isinstance(self.value, str):
return fields.CharField()
if isinstance(self.value, bool):
return fields.BooleanField()
if isinstance(self.value, int):
return fields.IntegerField()
if isinstance(self.value, float):
return fields.FloatField()
if isinstance(self.value, datetime.datetime):
return fields.DateTimeField()
if isinstance(self.value, datetime.date):
return fields.DateField()
if isinstance(self.value, datetime.time):
return fields.TimeField()
if isinstance(self.value, datetime.timedelta):
return fields.DurationField()
if isinstance(self.value, Decimal):
return fields.DecimalField()
if isinstance(self.value, bytes):
return fields.BinaryField()
if isinstance(self.value, UUID):
return fields.UUIDField()
@property
def empty_result_set_value(self):
return self.value
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return "(%s)" % self.sql, self.params
def get_group_by_cols(self, alias=None):
return [self]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# Resolve parents fields used in raw SQL.
if query.model:
for parent in query.model._meta.get_parent_list():
for parent_field in parent._meta.local_fields:
_, column_name = parent_field.get_attname_column()
if column_name.lower() in self.sql.lower():
query.resolve_ref(
parent_field.name, allow_joins, reuse, summarize
)
break
return super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return "*", []
class Col(Expression):
contains_column_references = True
possibly_multivalued = False
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
alias, target = self.alias, self.target
identifiers = (alias, str(target)) if alias else (str(target),)
return "{}({})".format(self.__class__.__name__, ", ".join(identifiers))
def as_sql(self, compiler, connection):
alias, column = self.alias, self.target.column
identifiers = (alias, column) if alias else (column,)
sql = ".".join(map(compiler.quote_name_unless_alias, identifiers))
return sql, []
def relabeled_clone(self, relabels):
if self.alias is None:
return self
return self.__class__(
relabels.get(self.alias, self.alias), self.target, self.output_field
)
def get_group_by_cols(self, alias=None):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return self.output_field.get_db_converters(
connection
) + self.target.get_db_converters(connection)
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
(self.source,) = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return connection.ops.quote_name(self.refs), []
def get_group_by_cols(self, alias=None):
return [self]
class ExpressionList(Func):
"""
An expression containing multiple expressions. Can be used to provide a
list of expressions as an argument to another expression, like a partition
clause.
"""
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if not expressions:
raise ValueError(
"%s requires at least one expression." % self.__class__.__name__
)
super().__init__(*expressions, **extra)
def __str__(self):
return self.arg_joiner.join(str(arg) for arg in self.source_expressions)
def as_sqlite(self, compiler, connection, **extra_context):
# Casting to numeric is unnecessary.
return self.as_sql(compiler, connection, **extra_context)
class OrderByList(Func):
template = "ORDER BY %(expressions)s"
def __init__(self, *expressions, **extra):
expressions = (
(
OrderBy(F(expr[1:]), descending=True)
if isinstance(expr, str) and expr[0] == "-"
else expr
)
for expr in expressions
)
super().__init__(*expressions, **extra)
def as_sql(self, *args, **kwargs):
if not self.source_expressions:
return "", ()
return super().as_sql(*args, **kwargs)
@deconstructible(path="django.db.models.ExpressionWrapper")
class ExpressionWrapper(SQLiteNumericMixin, Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def get_group_by_cols(self, alias=None):
if isinstance(self.expression, Expression):
expression = self.expression.copy()
expression.output_field = self.output_field
return expression.get_group_by_cols(alias=alias)
# For non-expressions e.g. an SQL WHERE clause, the entire
# `expression` must be included in the GROUP BY clause.
return super().get_group_by_cols()
def as_sql(self, compiler, connection):
return compiler.compile(self.expression)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
@deconstructible(path="django.db.models.When")
class When(Expression):
template = "WHEN %(condition)s THEN %(result)s"
# This isn't a complete conditional expression, must be used in Case().
conditional = False
def __init__(self, condition=None, then=None, **lookups):
if lookups:
if condition is None:
condition, lookups = Q(**lookups), None
elif getattr(condition, "conditional", False):
condition, lookups = Q(condition, **lookups), None
if condition is None or not getattr(condition, "conditional", False) or lookups:
raise TypeError(
"When() supports a Q object, a boolean expression, or lookups "
"as a condition."
)
if isinstance(condition, Q) and not condition:
raise ValueError("An empty Q() can't be used as a When() condition.")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, "resolve_expression"):
c.condition = c.condition.resolve_expression(
query, allow_joins, reuse, summarize, False
)
c.result = c.result.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params["condition"] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params["result"] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self, alias=None):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
@deconstructible(path="django.db.models.Case")
class Case(SQLiteNumericMixin, Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = "CASE %(cases)s ELSE %(default)s END"
case_joiner = " "
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (
", ".join(str(c) for c in self.cases),
self.default,
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
*self.cases, self.default = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
c.default = c.default.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(
self, compiler, connection, template=None, case_joiner=None, **extra_context
):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = {**self.extra, **extra_context}
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params["cases"] = case_joiner.join(case_parts)
template_params["default"] = default_sql
sql_params.extend(default_params)
template = template or template_params.get("template", self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def get_group_by_cols(self, alias=None):
if not self.cases:
return self.default.get_group_by_cols(alias)
return super().get_group_by_cols(alias)
class Subquery(BaseExpression, Combinable):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = "(%(subquery)s)"
contains_aggregate = False
empty_result_set_value = None
def __init__(self, queryset, output_field=None, **extra):
# Allow the usage of both QuerySet and sql.Query objects.
self.query = getattr(queryset, "query", queryset).clone()
self.query.subquery = True
self.extra = extra
super().__init__(output_field)
def get_source_expressions(self):
return [self.query]
def set_source_expressions(self, exprs):
self.query = exprs[0]
def _resolve_output_field(self):
return self.query.output_field
def copy(self):
clone = super().copy()
clone.query = clone.query.clone()
return clone
@property
def external_aliases(self):
return self.query.external_aliases
def get_external_cols(self):
return self.query.get_external_cols()
def as_sql(self, compiler, connection, template=None, query=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
query = query or self.query
subquery_sql, sql_params = query.as_sql(compiler, connection)
template_params["subquery"] = subquery_sql[1:-1]
template = template or template_params.get("template", self.template)
sql = template % template_params
return sql, sql_params
def get_group_by_cols(self, alias=None):
# If this expression is referenced by an alias for an explicit GROUP BY
# through values() a reference to this expression and not the
# underlying .query must be returned to ensure external column
# references are not grouped against as well.
if alias:
return [Ref(alias, self)]
return self.query.get_group_by_cols()
class Exists(Subquery):
template = "EXISTS(%(subquery)s)"
output_field = fields.BooleanField()
def __init__(self, queryset, negated=False, **kwargs):
self.negated = negated
super().__init__(queryset, **kwargs)
def __invert__(self):
clone = self.copy()
clone.negated = not self.negated
return clone
def as_sql(self, compiler, connection, template=None, **extra_context):
query = self.query.exists(using=connection.alias)
try:
sql, params = super().as_sql(
compiler,
connection,
template=template,
query=query,
**extra_context,
)
except EmptyResultSet:
if self.negated:
features = compiler.connection.features
if not features.supports_boolean_expr_in_select_clause:
return "1=1", ()
return compiler.compile(Value(True))
raise
if self.negated:
sql = "NOT {}".format(sql)
return sql, params
def select_format(self, compiler, sql, params):
# Wrap EXISTS() with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql)
return sql, params
@deconstructible(path="django.db.models.OrderBy")
class OrderBy(Expression):
template = "%(expression)s %(ordering)s"
conditional = False
def __init__(
self, expression, descending=False, nulls_first=False, nulls_last=False
):
if nulls_first and nulls_last:
raise ValueError("nulls_first and nulls_last are mutually exclusive")
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, "resolve_expression"):
raise ValueError("expression must be an expression type")
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending
)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
template = template or self.template
if connection.features.supports_order_by_nulls_modifier:
if self.nulls_last:
template = "%s NULLS LAST" % template
elif self.nulls_first:
template = "%s NULLS FIRST" % template
else:
if self.nulls_last and not (
self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NULL, %s" % template
elif self.nulls_first and not (
not self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NOT NULL, %s" % template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
"expression": expression_sql,
"ordering": "DESC" if self.descending else "ASC",
**extra_context,
}
params *= template.count("%(expression)s")
return (template % placeholders).rstrip(), params
def as_oracle(self, compiler, connection):
# Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped
# in a CASE WHEN.
if connection.ops.conditional_expression_supported_in_where_clause(
self.expression
):
copy = self.copy()
copy.expression = Case(
When(self.expression, then=True),
default=False,
)
return copy.as_sql(compiler, connection)
return self.as_sql(compiler, connection)
def get_group_by_cols(self, alias=None):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
if self.nulls_first or self.nulls_last:
self.nulls_first = not self.nulls_first
self.nulls_last = not self.nulls_last
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
class Window(SQLiteNumericMixin, Expression):
template = "%(expression)s OVER (%(window)s)"
# Although the main expression may either be an aggregate or an
# expression with an aggregate function, the GROUP BY that will
# be introduced in the query as a result is not desired.
contains_aggregate = False
contains_over_clause = True
filterable = False
def __init__(
self,
expression,
partition_by=None,
order_by=None,
frame=None,
output_field=None,
):
self.partition_by = partition_by
self.order_by = order_by
self.frame = frame
if not getattr(expression, "window_compatible", False):
raise ValueError(
"Expression '%s' isn't compatible with OVER clauses."
% expression.__class__.__name__
)
if self.partition_by is not None:
if not isinstance(self.partition_by, (tuple, list)):
self.partition_by = (self.partition_by,)
self.partition_by = ExpressionList(*self.partition_by)
if self.order_by is not None:
if isinstance(self.order_by, (list, tuple)):
self.order_by = OrderByList(*self.order_by)
elif isinstance(self.order_by, (BaseExpression, str)):
self.order_by = OrderByList(self.order_by)
else:
raise ValueError(
"Window.order_by must be either a string reference to a "
"field, an expression, or a list or tuple of them."
)
super().__init__(output_field=output_field)
self.source_expression = self._parse_expressions(expression)[0]
def _resolve_output_field(self):
return self.source_expression.output_field
def get_source_expressions(self):
return [self.source_expression, self.partition_by, self.order_by, self.frame]
def set_source_expressions(self, exprs):
self.source_expression, self.partition_by, self.order_by, self.frame = exprs
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
if not connection.features.supports_over_clause:
raise NotSupportedError("This backend does not support window expressions.")
expr_sql, params = compiler.compile(self.source_expression)
window_sql, window_params = [], []
if self.partition_by is not None:
sql_expr, sql_params = self.partition_by.as_sql(
compiler=compiler,
connection=connection,
template="PARTITION BY %(expressions)s",
)
window_sql.append(sql_expr)
window_params.extend(sql_params)
if self.order_by is not None:
order_sql, order_params = compiler.compile(self.order_by)
window_sql.append(order_sql)
window_params.extend(order_params)
if self.frame:
frame_sql, frame_params = compiler.compile(self.frame)
window_sql.append(frame_sql)
window_params.extend(frame_params)
params.extend(window_params)
template = template or self.template
return (
template % {"expression": expr_sql, "window": " ".join(window_sql).strip()},
params,
)
def as_sqlite(self, compiler, connection):
if isinstance(self.output_field, fields.DecimalField):
# Casting to numeric must be outside of the window expression.
copy = self.copy()
source_expressions = copy.get_source_expressions()
source_expressions[0].output_field = fields.FloatField()
copy.set_source_expressions(source_expressions)
return super(Window, copy).as_sqlite(compiler, connection)
return self.as_sql(compiler, connection)
def __str__(self):
return "{} OVER ({}{}{})".format(
str(self.source_expression),
"PARTITION BY " + str(self.partition_by) if self.partition_by else "",
str(self.order_by or ""),
str(self.frame or ""),
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
return []
class WindowFrame(Expression):
"""
Model the frame clause in window expressions. There are two types of frame
clauses which are subclasses, however, all processing and validation (by no
means intended to be complete) is done here. Thus, providing an end for a
frame is optional (the default is UNBOUNDED FOLLOWING, which is the last
row in the frame).
"""
template = "%(frame_type)s BETWEEN %(start)s AND %(end)s"
def __init__(self, start=None, end=None):
self.start = Value(start)
self.end = Value(end)
def set_source_expressions(self, exprs):
self.start, self.end = exprs
def get_source_expressions(self):
return [self.start, self.end]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
start, end = self.window_frame_start_end(
connection, self.start.value, self.end.value
)
return (
self.template
% {
"frame_type": self.frame_type,
"start": start,
"end": end,
},
[],
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
return []
def __str__(self):
if self.start.value is not None and self.start.value < 0:
start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING)
elif self.start.value is not None and self.start.value == 0:
start = connection.ops.CURRENT_ROW
else:
start = connection.ops.UNBOUNDED_PRECEDING
if self.end.value is not None and self.end.value > 0:
end = "%d %s" % (self.end.value, connection.ops.FOLLOWING)
elif self.end.value is not None and self.end.value == 0:
end = connection.ops.CURRENT_ROW
else:
end = connection.ops.UNBOUNDED_FOLLOWING
return self.template % {
"frame_type": self.frame_type,
"start": start,
"end": end,
}
def window_frame_start_end(self, connection, start, end):
raise NotImplementedError("Subclasses must implement window_frame_start_end().")
class RowRange(WindowFrame):
frame_type = "ROWS"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_rows_start_end(start, end)
class ValueRange(WindowFrame):
frame_type = "RANGE"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_range_start_end(start, end)
|
579de4fd39ebb6496375c48afa041bdeb3ed84548b2ca423c42a072f0621be96 | from enum import Enum
from django.db.models.expressions import ExpressionList, F
from django.db.models.indexes import IndexExpression
from django.db.models.query_utils import Q
from django.db.models.sql.query import Query
__all__ = ["BaseConstraint", "CheckConstraint", "Deferrable", "UniqueConstraint"]
class BaseConstraint:
def __init__(self, name):
self.name = name
@property
def contains_expressions(self):
return False
def constraint_sql(self, model, schema_editor):
raise NotImplementedError("This method must be implemented by a subclass.")
def create_sql(self, model, schema_editor):
raise NotImplementedError("This method must be implemented by a subclass.")
def remove_sql(self, model, schema_editor):
raise NotImplementedError("This method must be implemented by a subclass.")
def deconstruct(self):
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
path = path.replace("django.db.models.constraints", "django.db.models")
return (path, (), {"name": self.name})
def clone(self):
_, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
class CheckConstraint(BaseConstraint):
def __init__(self, *, check, name):
self.check = check
if not getattr(check, "conditional", False):
raise TypeError(
"CheckConstraint.check must be a Q instance or boolean expression."
)
super().__init__(name)
def _get_check_sql(self, model, schema_editor):
query = Query(model=model, alias_cols=False)
where = query.build_where(self.check)
compiler = query.get_compiler(connection=schema_editor.connection)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def constraint_sql(self, model, schema_editor):
check = self._get_check_sql(model, schema_editor)
return schema_editor._check_sql(self.name, check)
def create_sql(self, model, schema_editor):
check = self._get_check_sql(model, schema_editor)
return schema_editor._create_check_sql(model, self.name, check)
def remove_sql(self, model, schema_editor):
return schema_editor._delete_check_sql(model, self.name)
def __repr__(self):
return "<%s: check=%s name=%s>" % (
self.__class__.__qualname__,
self.check,
repr(self.name),
)
def __eq__(self, other):
if isinstance(other, CheckConstraint):
return self.name == other.name and self.check == other.check
return super().__eq__(other)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
kwargs["check"] = self.check
return path, args, kwargs
class Deferrable(Enum):
DEFERRED = "deferred"
IMMEDIATE = "immediate"
# A similar format was proposed for Python 3.10.
def __repr__(self):
return f"{self.__class__.__qualname__}.{self._name_}"
class UniqueConstraint(BaseConstraint):
def __init__(
self,
*expressions,
fields=(),
name=None,
condition=None,
deferrable=None,
include=None,
opclasses=(),
):
if not name:
raise ValueError("A unique constraint must be named.")
if not expressions and not fields:
raise ValueError(
"At least one field or expression is required to define a "
"unique constraint."
)
if expressions and fields:
raise ValueError(
"UniqueConstraint.fields and expressions are mutually exclusive."
)
if not isinstance(condition, (type(None), Q)):
raise ValueError("UniqueConstraint.condition must be a Q instance.")
if condition and deferrable:
raise ValueError("UniqueConstraint with conditions cannot be deferred.")
if include and deferrable:
raise ValueError("UniqueConstraint with include fields cannot be deferred.")
if opclasses and deferrable:
raise ValueError("UniqueConstraint with opclasses cannot be deferred.")
if expressions and deferrable:
raise ValueError("UniqueConstraint with expressions cannot be deferred.")
if expressions and opclasses:
raise ValueError(
"UniqueConstraint.opclasses cannot be used with expressions. "
"Use django.contrib.postgres.indexes.OpClass() instead."
)
if not isinstance(deferrable, (type(None), Deferrable)):
raise ValueError(
"UniqueConstraint.deferrable must be a Deferrable instance."
)
if not isinstance(include, (type(None), list, tuple)):
raise ValueError("UniqueConstraint.include must be a list or tuple.")
if not isinstance(opclasses, (list, tuple)):
raise ValueError("UniqueConstraint.opclasses must be a list or tuple.")
if opclasses and len(fields) != len(opclasses):
raise ValueError(
"UniqueConstraint.fields and UniqueConstraint.opclasses must "
"have the same number of elements."
)
self.fields = tuple(fields)
self.condition = condition
self.deferrable = deferrable
self.include = tuple(include) if include else ()
self.opclasses = opclasses
self.expressions = tuple(
F(expression) if isinstance(expression, str) else expression
for expression in expressions
)
super().__init__(name)
@property
def contains_expressions(self):
return bool(self.expressions)
def _get_condition_sql(self, model, schema_editor):
if self.condition is None:
return None
query = Query(model=model, alias_cols=False)
where = query.build_where(self.condition)
compiler = query.get_compiler(connection=schema_editor.connection)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def _get_index_expressions(self, model, schema_editor):
if not self.expressions:
return None
index_expressions = []
for expression in self.expressions:
index_expression = IndexExpression(expression)
index_expression.set_wrapper_classes(schema_editor.connection)
index_expressions.append(index_expression)
return ExpressionList(*index_expressions).resolve_expression(
Query(model, alias_cols=False),
)
def constraint_sql(self, model, schema_editor):
fields = [model._meta.get_field(field_name) for field_name in self.fields]
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
condition = self._get_condition_sql(model, schema_editor)
expressions = self._get_index_expressions(model, schema_editor)
return schema_editor._unique_sql(
model,
fields,
self.name,
condition=condition,
deferrable=self.deferrable,
include=include,
opclasses=self.opclasses,
expressions=expressions,
)
def create_sql(self, model, schema_editor):
fields = [model._meta.get_field(field_name) for field_name in self.fields]
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
condition = self._get_condition_sql(model, schema_editor)
expressions = self._get_index_expressions(model, schema_editor)
return schema_editor._create_unique_sql(
model,
fields,
self.name,
condition=condition,
deferrable=self.deferrable,
include=include,
opclasses=self.opclasses,
expressions=expressions,
)
def remove_sql(self, model, schema_editor):
condition = self._get_condition_sql(model, schema_editor)
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
expressions = self._get_index_expressions(model, schema_editor)
return schema_editor._delete_unique_sql(
model,
self.name,
condition=condition,
deferrable=self.deferrable,
include=include,
opclasses=self.opclasses,
expressions=expressions,
)
def __repr__(self):
return "<%s:%s%s%s%s%s%s%s>" % (
self.__class__.__qualname__,
"" if not self.fields else " fields=%s" % repr(self.fields),
"" if not self.expressions else " expressions=%s" % repr(self.expressions),
" name=%s" % repr(self.name),
"" if self.condition is None else " condition=%s" % self.condition,
"" if self.deferrable is None else " deferrable=%r" % self.deferrable,
"" if not self.include else " include=%s" % repr(self.include),
"" if not self.opclasses else " opclasses=%s" % repr(self.opclasses),
)
def __eq__(self, other):
if isinstance(other, UniqueConstraint):
return (
self.name == other.name
and self.fields == other.fields
and self.condition == other.condition
and self.deferrable == other.deferrable
and self.include == other.include
and self.opclasses == other.opclasses
and self.expressions == other.expressions
)
return super().__eq__(other)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.fields:
kwargs["fields"] = self.fields
if self.condition:
kwargs["condition"] = self.condition
if self.deferrable:
kwargs["deferrable"] = self.deferrable
if self.include:
kwargs["include"] = self.include
if self.opclasses:
kwargs["opclasses"] = self.opclasses
return path, self.expressions, kwargs
|
281b5304f31db3064117fe690e4ff8bb3b23013146dee03ab245e74f87b9a539 | import json
from django import forms
from django.core import checks, exceptions
from django.db import NotSupportedError, connections, router
from django.db.models import lookups
from django.db.models.lookups import PostgresOperatorLookup, Transform
from django.utils.translation import gettext_lazy as _
from . import Field
from .mixins import CheckFieldDefaultMixin
__all__ = ["JSONField"]
class JSONField(CheckFieldDefaultMixin, Field):
empty_strings_allowed = False
description = _("A JSON object")
default_error_messages = {
"invalid": _("Value must be valid JSON."),
}
_default_hint = ("dict", "{}")
def __init__(
self,
verbose_name=None,
name=None,
encoder=None,
decoder=None,
**kwargs,
):
if encoder and not callable(encoder):
raise ValueError("The encoder parameter must be a callable object.")
if decoder and not callable(decoder):
raise ValueError("The decoder parameter must be a callable object.")
self.encoder = encoder
self.decoder = decoder
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
databases = kwargs.get("databases") or []
errors.extend(self._check_supported(databases))
return errors
def _check_supported(self, databases):
errors = []
for db in databases:
if not router.allow_migrate_model(db, self.model):
continue
connection = connections[db]
if (
self.model._meta.required_db_vendor
and self.model._meta.required_db_vendor != connection.vendor
):
continue
if not (
"supports_json_field" in self.model._meta.required_db_features
or connection.features.supports_json_field
):
errors.append(
checks.Error(
"%s does not support JSONFields." % connection.display_name,
obj=self.model,
id="fields.E180",
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.encoder is not None:
kwargs["encoder"] = self.encoder
if self.decoder is not None:
kwargs["decoder"] = self.decoder
return name, path, args, kwargs
def from_db_value(self, value, expression, connection):
if value is None:
return value
# Some backends (SQLite at least) extract non-string values in their
# SQL datatypes.
if isinstance(expression, KeyTransform) and not isinstance(value, str):
return value
try:
return json.loads(value, cls=self.decoder)
except json.JSONDecodeError:
return value
def get_internal_type(self):
return "JSONField"
def get_prep_value(self, value):
if value is None:
return value
return json.dumps(value, cls=self.encoder)
def get_transform(self, name):
transform = super().get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def validate(self, value, model_instance):
super().validate(value, model_instance)
try:
json.dumps(value, cls=self.encoder)
except TypeError:
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def value_to_string(self, obj):
return self.value_from_object(obj)
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.JSONField,
"encoder": self.encoder,
"decoder": self.decoder,
**kwargs,
}
)
def compile_json_path(key_transforms, include_root=True):
path = ["$"] if include_root else []
for key_transform in key_transforms:
try:
num = int(key_transform)
except ValueError: # non-integer
path.append(".")
path.append(json.dumps(key_transform))
else:
path.append("[%s]" % num)
return "".join(path)
class DataContains(PostgresOperatorLookup):
lookup_name = "contains"
postgres_operator = "@>"
def as_sql(self, compiler, connection):
if not connection.features.supports_json_field_contains:
raise NotSupportedError(
"contains lookup is not supported on this database backend."
)
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = tuple(lhs_params) + tuple(rhs_params)
return "JSON_CONTAINS(%s, %s)" % (lhs, rhs), params
class ContainedBy(PostgresOperatorLookup):
lookup_name = "contained_by"
postgres_operator = "<@"
def as_sql(self, compiler, connection):
if not connection.features.supports_json_field_contains:
raise NotSupportedError(
"contained_by lookup is not supported on this database backend."
)
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = tuple(rhs_params) + tuple(lhs_params)
return "JSON_CONTAINS(%s, %s)" % (rhs, lhs), params
class HasKeyLookup(PostgresOperatorLookup):
logical_operator = None
def compile_json_path_final_key(self, key_transform):
# Compile the final key without interpreting ints as array elements.
return ".%s" % json.dumps(key_transform)
def as_sql(self, compiler, connection, template=None):
# Process JSON path from the left-hand side.
if isinstance(self.lhs, KeyTransform):
lhs, lhs_params, lhs_key_transforms = self.lhs.preprocess_lhs(
compiler, connection
)
lhs_json_path = compile_json_path(lhs_key_transforms)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
lhs_json_path = "$"
sql = template % lhs
# Process JSON path from the right-hand side.
rhs = self.rhs
rhs_params = []
if not isinstance(rhs, (list, tuple)):
rhs = [rhs]
for key in rhs:
if isinstance(key, KeyTransform):
*_, rhs_key_transforms = key.preprocess_lhs(compiler, connection)
else:
rhs_key_transforms = [key]
*rhs_key_transforms, final_key = rhs_key_transforms
rhs_json_path = compile_json_path(rhs_key_transforms, include_root=False)
rhs_json_path += self.compile_json_path_final_key(final_key)
rhs_params.append(lhs_json_path + rhs_json_path)
# Add condition for each key.
if self.logical_operator:
sql = "(%s)" % self.logical_operator.join([sql] * len(rhs_params))
return sql, tuple(lhs_params) + tuple(rhs_params)
def as_mysql(self, compiler, connection):
return self.as_sql(
compiler, connection, template="JSON_CONTAINS_PATH(%s, 'one', %%s)"
)
def as_oracle(self, compiler, connection):
sql, params = self.as_sql(
compiler, connection, template="JSON_EXISTS(%s, '%%s')"
)
# Add paths directly into SQL because path expressions cannot be passed
# as bind variables on Oracle.
return sql % tuple(params), []
def as_postgresql(self, compiler, connection):
if isinstance(self.rhs, KeyTransform):
*_, rhs_key_transforms = self.rhs.preprocess_lhs(compiler, connection)
for key in rhs_key_transforms[:-1]:
self.lhs = KeyTransform(key, self.lhs)
self.rhs = rhs_key_transforms[-1]
return super().as_postgresql(compiler, connection)
def as_sqlite(self, compiler, connection):
return self.as_sql(
compiler, connection, template="JSON_TYPE(%s, %%s) IS NOT NULL"
)
class HasKey(HasKeyLookup):
lookup_name = "has_key"
postgres_operator = "?"
prepare_rhs = False
class HasKeys(HasKeyLookup):
lookup_name = "has_keys"
postgres_operator = "?&"
logical_operator = " AND "
def get_prep_lookup(self):
return [str(item) for item in self.rhs]
class HasAnyKeys(HasKeys):
lookup_name = "has_any_keys"
postgres_operator = "?|"
logical_operator = " OR "
class HasKeyOrArrayIndex(HasKey):
def compile_json_path_final_key(self, key_transform):
return compile_json_path([key_transform], include_root=False)
class CaseInsensitiveMixin:
"""
Mixin to allow case-insensitive comparison of JSON values on MySQL.
MySQL handles strings used in JSON context using the utf8mb4_bin collation.
Because utf8mb4_bin is a binary collation, comparison of JSON values is
case-sensitive.
"""
def process_lhs(self, compiler, connection):
lhs, lhs_params = super().process_lhs(compiler, connection)
if connection.vendor == "mysql":
return "LOWER(%s)" % lhs, lhs_params
return lhs, lhs_params
def process_rhs(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
if connection.vendor == "mysql":
return "LOWER(%s)" % rhs, rhs_params
return rhs, rhs_params
class JSONExact(lookups.Exact):
can_use_none_as_rhs = True
def process_rhs(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
# Treat None lookup values as null.
if rhs == "%s" and rhs_params == [None]:
rhs_params = ["null"]
if connection.vendor == "mysql":
func = ["JSON_EXTRACT(%s, '$')"] * len(rhs_params)
rhs = rhs % tuple(func)
return rhs, rhs_params
class JSONIContains(CaseInsensitiveMixin, lookups.IContains):
pass
JSONField.register_lookup(DataContains)
JSONField.register_lookup(ContainedBy)
JSONField.register_lookup(HasKey)
JSONField.register_lookup(HasKeys)
JSONField.register_lookup(HasAnyKeys)
JSONField.register_lookup(JSONExact)
JSONField.register_lookup(JSONIContains)
class KeyTransform(Transform):
postgres_operator = "->"
postgres_nested_operator = "#>"
def __init__(self, key_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key_name = str(key_name)
def preprocess_lhs(self, compiler, connection):
key_transforms = [self.key_name]
previous = self.lhs
while isinstance(previous, KeyTransform):
key_transforms.insert(0, previous.key_name)
previous = previous.lhs
lhs, params = compiler.compile(previous)
if connection.vendor == "oracle":
# Escape string-formatting.
key_transforms = [key.replace("%", "%%") for key in key_transforms]
return lhs, params, key_transforms
def as_mysql(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = compile_json_path(key_transforms)
return "JSON_EXTRACT(%s, %%s)" % lhs, tuple(params) + (json_path,)
def as_oracle(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = compile_json_path(key_transforms)
return (
"COALESCE(JSON_QUERY(%s, '%s'), JSON_VALUE(%s, '%s'))"
% ((lhs, json_path) * 2)
), tuple(params) * 2
def as_postgresql(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
if len(key_transforms) > 1:
sql = "(%s %s %%s)" % (lhs, self.postgres_nested_operator)
return sql, tuple(params) + (key_transforms,)
try:
lookup = int(self.key_name)
except ValueError:
lookup = self.key_name
return "(%s %s %%s)" % (lhs, self.postgres_operator), tuple(params) + (lookup,)
def as_sqlite(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = compile_json_path(key_transforms)
datatype_values = ",".join(
[repr(datatype) for datatype in connection.ops.jsonfield_datatype_values]
)
return (
"(CASE WHEN JSON_TYPE(%s, %%s) IN (%s) "
"THEN JSON_TYPE(%s, %%s) ELSE JSON_EXTRACT(%s, %%s) END)"
) % (lhs, datatype_values, lhs, lhs), (tuple(params) + (json_path,)) * 3
class KeyTextTransform(KeyTransform):
postgres_operator = "->>"
postgres_nested_operator = "#>>"
class KeyTransformTextLookupMixin:
"""
Mixin for combining with a lookup expecting a text lhs from a JSONField
key lookup. On PostgreSQL, make use of the ->> operator instead of casting
key values to text and performing the lookup on the resulting
representation.
"""
def __init__(self, key_transform, *args, **kwargs):
if not isinstance(key_transform, KeyTransform):
raise TypeError(
"Transform should be an instance of KeyTransform in order to "
"use this lookup."
)
key_text_transform = KeyTextTransform(
key_transform.key_name,
*key_transform.source_expressions,
**key_transform.extra,
)
super().__init__(key_text_transform, *args, **kwargs)
class KeyTransformIsNull(lookups.IsNull):
# key__isnull=False is the same as has_key='key'
def as_oracle(self, compiler, connection):
sql, params = HasKeyOrArrayIndex(
self.lhs.lhs,
self.lhs.key_name,
).as_oracle(compiler, connection)
if not self.rhs:
return sql, params
# Column doesn't have a key or IS NULL.
lhs, lhs_params, _ = self.lhs.preprocess_lhs(compiler, connection)
return "(NOT %s OR %s IS NULL)" % (sql, lhs), tuple(params) + tuple(lhs_params)
def as_sqlite(self, compiler, connection):
template = "JSON_TYPE(%s, %%s) IS NULL"
if not self.rhs:
template = "JSON_TYPE(%s, %%s) IS NOT NULL"
return HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name).as_sql(
compiler,
connection,
template=template,
)
class KeyTransformIn(lookups.In):
def resolve_expression_parameter(self, compiler, connection, sql, param):
sql, params = super().resolve_expression_parameter(
compiler,
connection,
sql,
param,
)
if (
not hasattr(param, "as_sql")
and not connection.features.has_native_json_field
):
if connection.vendor == "oracle":
value = json.loads(param)
sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')"
if isinstance(value, (list, dict)):
sql = sql % "JSON_QUERY"
else:
sql = sql % "JSON_VALUE"
elif connection.vendor == "mysql" or (
connection.vendor == "sqlite"
and params[0] not in connection.ops.jsonfield_datatype_values
):
sql = "JSON_EXTRACT(%s, '$')"
if connection.vendor == "mysql" and connection.mysql_is_mariadb:
sql = "JSON_UNQUOTE(%s)" % sql
return sql, params
class KeyTransformExact(JSONExact):
def process_rhs(self, compiler, connection):
if isinstance(self.rhs, KeyTransform):
return super(lookups.Exact, self).process_rhs(compiler, connection)
rhs, rhs_params = super().process_rhs(compiler, connection)
if connection.vendor == "oracle":
func = []
sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')"
for value in rhs_params:
value = json.loads(value)
if isinstance(value, (list, dict)):
func.append(sql % "JSON_QUERY")
else:
func.append(sql % "JSON_VALUE")
rhs = rhs % tuple(func)
elif connection.vendor == "sqlite":
func = []
for value in rhs_params:
if value in connection.ops.jsonfield_datatype_values:
func.append("%s")
else:
func.append("JSON_EXTRACT(%s, '$')")
rhs = rhs % tuple(func)
return rhs, rhs_params
def as_oracle(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
if rhs_params == ["null"]:
# Field has key and it's NULL.
has_key_expr = HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name)
has_key_sql, has_key_params = has_key_expr.as_oracle(compiler, connection)
is_null_expr = self.lhs.get_lookup("isnull")(self.lhs, True)
is_null_sql, is_null_params = is_null_expr.as_sql(compiler, connection)
return (
"%s AND %s" % (has_key_sql, is_null_sql),
tuple(has_key_params) + tuple(is_null_params),
)
return super().as_sql(compiler, connection)
class KeyTransformIExact(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IExact
):
pass
class KeyTransformIContains(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IContains
):
pass
class KeyTransformStartsWith(KeyTransformTextLookupMixin, lookups.StartsWith):
pass
class KeyTransformIStartsWith(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IStartsWith
):
pass
class KeyTransformEndsWith(KeyTransformTextLookupMixin, lookups.EndsWith):
pass
class KeyTransformIEndsWith(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IEndsWith
):
pass
class KeyTransformRegex(KeyTransformTextLookupMixin, lookups.Regex):
pass
class KeyTransformIRegex(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IRegex
):
pass
class KeyTransformNumericLookupMixin:
def process_rhs(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
if not connection.features.has_native_json_field:
rhs_params = [json.loads(value) for value in rhs_params]
return rhs, rhs_params
class KeyTransformLt(KeyTransformNumericLookupMixin, lookups.LessThan):
pass
class KeyTransformLte(KeyTransformNumericLookupMixin, lookups.LessThanOrEqual):
pass
class KeyTransformGt(KeyTransformNumericLookupMixin, lookups.GreaterThan):
pass
class KeyTransformGte(KeyTransformNumericLookupMixin, lookups.GreaterThanOrEqual):
pass
KeyTransform.register_lookup(KeyTransformIn)
KeyTransform.register_lookup(KeyTransformExact)
KeyTransform.register_lookup(KeyTransformIExact)
KeyTransform.register_lookup(KeyTransformIsNull)
KeyTransform.register_lookup(KeyTransformIContains)
KeyTransform.register_lookup(KeyTransformStartsWith)
KeyTransform.register_lookup(KeyTransformIStartsWith)
KeyTransform.register_lookup(KeyTransformEndsWith)
KeyTransform.register_lookup(KeyTransformIEndsWith)
KeyTransform.register_lookup(KeyTransformRegex)
KeyTransform.register_lookup(KeyTransformIRegex)
KeyTransform.register_lookup(KeyTransformLt)
KeyTransform.register_lookup(KeyTransformLte)
KeyTransform.register_lookup(KeyTransformGt)
KeyTransform.register_lookup(KeyTransformGte)
class KeyTransformFactory:
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
|
1743e62beb447e334bb41d3c09ce27a34b12101634e4af6a227f5a3c9427a65f | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import difflib
import functools
import sys
from collections import Counter, namedtuple
from collections.abc import Iterator, Mapping
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import (
BaseExpression,
Col,
Exists,
F,
OuterRef,
Ref,
ResolvedOuterRef,
)
from django.db.models.fields import Field
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q,
check_rel_lookup_compatibility,
refs_expression,
)
from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE
from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin
from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode
from django.utils.functional import cached_property
from django.utils.tree import Node
__all__ = ["Query", "RawQuery"]
def get_field_names_from_opts(opts):
if opts is None:
return set()
return set(
chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields()
)
)
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
"JoinInfo",
("final_field", "targets", "opts", "joins", "path", "transform_function"),
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=()):
self.params = params
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.identifier_converter
return [converter(column_meta[0]) for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
if self.params is None:
return None
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
if self.params_type is None:
return self.sql
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
elif params_type is None:
params = None
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
ExplainInfo = namedtuple("ExplainInfo", ("format", "options"))
class Query(BaseExpression):
"""A single SQL query."""
alias_prefix = "T"
empty_result_set_value = None
subq_aliases = frozenset([alias_prefix])
compiler = "SQLCompiler"
base_table_class = BaseTable
join_class = Join
default_cols = True
default_ordering = True
standard_ordering = True
filter_is_sticky = False
subquery = False
# SQL-related attributes.
# Select and related select clauses are expressions to use in the SELECT
# clause of the query. The select is used for cases where we want to set up
# the select clause to contain other than default fields (values(),
# subqueries...). Note that annotations go to annotations dictionary.
select = ()
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
group_by = None
order_by = ()
low_mark = 0 # Used for offset/limit.
high_mark = None # Used for offset/limit.
distinct = False
distinct_fields = ()
select_for_update = False
select_for_update_nowait = False
select_for_update_skip_locked = False
select_for_update_of = ()
select_for_no_key_update = False
select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
values_select = ()
# SQL annotation-related attributes.
annotation_select_mask = None
_annotation_select_cache = None
# Set combination attributes.
combinator = None
combinator_all = False
combined_queries = ()
# These are for extensions. The contents are more or less appended verbatim
# to the appropriate clause.
extra_select_mask = None
_extra_select_cache = None
extra_tables = ()
extra_order_by = ()
# A tuple that is a set of model field names and either True, if these are
# the fields to defer, or False if these are the only fields to load.
deferred_loading = (frozenset(), True)
explain_info = None
def __init__(self, model, alias_cols=True):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Whether to provide alias to columns during reference resolving.
self.alias_cols = alias_cols
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
# Map external tables to whether they are aliased.
self.external_aliases = {}
self.table_map = {} # Maps table names to list of aliases.
self.used_aliases = set()
self.where = WhereNode()
# Maps alias -> Annotation Expression.
self.annotations = {}
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = {} # Maps col_alias -> (col_sql, params).
self._filtered_relations = {}
@property
def output_field(self):
if len(self.select) == 1:
select = self.select[0]
return getattr(select, "target", None) or select.field
elif len(self.annotation_select) == 1:
return next(iter(self.annotation_select.values())).output_field
@property
def has_select_fields(self):
return bool(
self.select or self.annotation_select_mask or self.extra_select_mask
)
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def get_compiler(self, using=None, connection=None, elide_empty=True):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(
self, connection, using, elide_empty
)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
if self.model:
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
to deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj.annotations = self.annotations.copy()
if self.annotation_select_mask is not None:
obj.annotation_select_mask = self.annotation_select_mask.copy()
if self.combined_queries:
obj.combined_queries = tuple(
[query.clone() for query in self.combined_queries]
)
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.extra = self.extra.copy()
if self.extra_select_mask is not None:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is not None:
obj._extra_select_cache = self._extra_select_cache.copy()
if self.select_related is not False:
# Use deepcopy because select_related stores fields in nested
# dicts.
obj.select_related = copy.deepcopy(obj.select_related)
if "subq_aliases" in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
obj._filtered_relations = self._filtered_relations.copy()
# Clear the cached_property, if it exists.
obj.__dict__.pop("base_table", None)
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, "_setup_query"):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def _get_col(self, target, field, alias):
if not self.alias_cols:
alias = None
return target.get_col(alias, field)
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
else:
# Reuse aliases of expressions already selected in subquery.
for col_alias, selected_annotation in self.annotation_select.items():
if selected_annotation is expr:
new_expr = Ref(col_alias, expr)
break
else:
# An expression that is not selected the subquery.
if isinstance(expr, Col) or (
expr.contains_aggregate and not expr.is_summary
):
# Reference column or another aggregate. Select it
# under a non-conflicting alias.
col_cnt += 1
col_alias = "__col%d" % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_expr = Ref(col_alias, expr)
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
existing_annotations = [
annotation
for alias, annotation in self.annotations.items()
if alias not in added_aggregate_names
]
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (
isinstance(self.group_by, tuple)
or self.is_sliced
or existing_annotations
or self.distinct
or self.combinator
):
from django.db.models.sql.subqueries import AggregateQuery
inner_query = self.clone()
inner_query.subquery = True
outer_query = AggregateQuery(self.model, inner_query)
inner_query.select_for_update = False
inner_query.select_related = False
inner_query.set_annotation_mask(self.annotation_select)
# Queries with distinct_fields need ordering and when a limit is
# applied we must take the slice from the ordered query. Otherwise
# no need for ordering.
inner_query.clear_ordering(force=False)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
has_existing_aggregate_annotations = any(
annotation
for annotation in existing_annotations
if getattr(annotation, "contains_aggregate", True)
)
if inner_query.default_cols and has_existing_aggregate_annotations:
inner_query.group_by = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
inner_query.default_cols = False
relabels = {t: "subquery" for t in inner_query.alias_map}
relabels[None] = "subquery"
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
annotation_select_mask = inner_query.annotation_select_mask
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(
relabels
)
del inner_query.annotations[alias]
annotation_select_mask.remove(alias)
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if (
inner_query.select == ()
and not inner_query.default_cols
and not inner_query.annotation_select_mask
):
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
else:
outer_query = self
self.select = ()
self.default_cols = False
self.extra = {}
empty_set_result = [
expression.empty_result_set_value
for expression in outer_query.annotation_select.values()
]
elide_empty = not any(result is NotImplemented for result in empty_set_result)
outer_query.clear_ordering(force=True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using, elide_empty=elide_empty)
result = compiler.execute_sql(SINGLE)
if result is None:
result = empty_set_result
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return dict(zip(outer_query.annotation_select, result))
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count("*"), alias="__count", is_summary=True)
return obj.get_aggregation(using, ["__count"])["__count"]
def has_filters(self):
return self.where
def exists(self, using, limit=True):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
q.set_group_by(allow_aliases=False)
q.clear_select_clause()
if q.combined_queries and q.combinator == "union":
limit_combined = connections[
using
].features.supports_slicing_ordering_in_compound
q.combined_queries = tuple(
combined_query.exists(using, limit=limit_combined)
for combined_query in q.combined_queries
)
q.clear_ordering(force=True)
if limit:
q.set_limits(high=1)
q.add_extra({"a": 1}, None, None, None, None, None)
q.set_extra_mask(["a"])
return q
def has_results(self, using):
q = self.exists(using)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def explain(self, using, format=None, **options):
q = self.clone()
q.explain_info = ExplainInfo(format, options)
compiler = q.get_compiler(using=using)
return "\n".join(compiler.explain_query())
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
if self.model != rhs.model:
raise TypeError("Cannot combine queries on two different base models.")
if self.is_sliced:
raise TypeError("Cannot combine queries once a slice has been taken.")
if self.distinct != rhs.distinct:
raise TypeError("Cannot combine a unique query with a non-unique query.")
if self.distinct_fields != rhs.distinct_fields:
raise TypeError("Cannot combine queries with different distinct fields.")
# If lhs and rhs shares the same alias prefix, it is possible to have
# conflicting alias changes like T4 -> T5, T5 -> T6, which might end up
# as T4 -> T6 while combining two querysets. To prevent this, change an
# alias prefix of the rhs and update current aliases accordingly,
# except if the alias is the base table since it must be present in the
# query on both sides.
initial_alias = self.get_initial_alias()
rhs.bump_prefix(self, exclude={initial_alias})
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = connector == AND
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER
)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Combine subqueries aliases to ensure aliases relabelling properly
# handle subqueries when combining where and select clauses.
self.subq_aliases |= rhs.subq_aliases
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError(
"When merging querysets using 'or', you cannot have "
"extra(select=...) on both sides."
)
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
if name in self._filtered_relations:
name = self._filtered_relations[name].relation_name
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.items():
for field in model._meta.local_fields:
if field not in values:
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in must_include.items():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.items():
callback(target, model, values)
else:
for model, values in must_include.items():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
seen.setdefault(model, set())
for model, values in seen.items():
callback(target, model, values)
def table_alias(self, table_name, create=False, filtered_relation=None):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = (
filtered_relation.alias if filtered_relation is not None else table_name
)
self.table_map[table_name] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = (
parent_alias and self.alias_map[parent_alias].join_type == LOUTER
)
already_louter = self.alias_map[alias].join_type == LOUTER
if (self.alias_map[alias].nullable or parent_louter) and not already_louter:
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join
for join in self.alias_map
if self.alias_map[join].parent_alias == alias
and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
# If keys and values of change_map were to intersect, an alias might be
# updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending
# on their order in change_map.
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple(
[col.relabeled_clone(change_map) for col in self.group_by]
)
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
self.annotations = self.annotations and {
key: col.relabeled_clone(change_map)
for key, col in self.annotations.items()
}
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {
# Table is aliased or it's being changed and thus is aliased.
change_map.get(alias, alias): (aliased or alias in change_map)
for alias, aliased in self.external_aliases.items()
}
def bump_prefix(self, other_query, exclude=None):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the other query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call. To prevent changing aliases use the exclude parameter.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet
for s in product(seq, repeat=n):
yield "".join(s)
prefix = None
if self.alias_prefix != other_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
# Explicitly avoid infinite loop. The constant divider is based on how
# much depth recursive subquery references add to the stack. This value
# might need to be adjusted when adding or removing function calls from
# the code path in charge of performing these operations.
local_recursion_limit = sys.getrecursionlimit() // 16
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RecursionError(
"Maximum recursion depth exceeded: too many subqueries."
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases)
if exclude is None:
exclude = {}
self.change_aliases(
{
alias: "%s%d" % (self.alias_prefix, pos)
for pos, alias in enumerate(self.alias_map)
if alias not in exclude
}
)
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
elif self.model:
alias = self.join(self.base_table_class(self.get_meta().db_table, None))
else:
alias = None
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a base_table_class or
join_class.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
reuse_aliases = [
a
for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j.equals(join)
]
if reuse_aliases:
if join.table_alias in reuse_aliases:
reuse_alias = join.table_alias
else:
# Reuse the most recent alias of the joined table
# (a many-to-many relation may be joined multiple times).
reuse_alias = reuse_aliases[-1]
self.ref_alias(reuse_alias)
return reuse_alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(
join.table_name, create=True, filtered_relation=join.filtered_relation
)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False, select=True):
"""Add a single annotation expression to the Query."""
annotation = annotation.resolve_expression(
self, allow_joins=True, reuse=None, summarize=is_summary
)
if select:
self.append_annotation_mask([alias])
else:
self.set_annotation_mask(set(self.annotation_select).difference({alias}))
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
clone.where.resolve_expression(query, *args, **kwargs)
# Resolve combined queries.
if clone.combinator:
clone.combined_queries = tuple(
[
combined_query.resolve_expression(query, *args, **kwargs)
for combined_query in clone.combined_queries
]
)
for key, value in clone.annotations.items():
resolved = value.resolve_expression(query, *args, **kwargs)
if hasattr(resolved, "external_aliases"):
resolved.external_aliases.update(clone.external_aliases)
clone.annotations[key] = resolved
# Outer query's aliases are considered external.
for alias, table in query.alias_map.items():
clone.external_aliases[alias] = (
isinstance(table, Join)
and table.join_field.related_model._meta.db_table != alias
) or (
isinstance(table, BaseTable) and table.table_name != table.table_alias
)
return clone
def get_external_cols(self):
exprs = chain(self.annotations.values(), self.where.children)
return [
col
for col in self._gen_cols(exprs, include_external=True)
if col.alias in self.external_aliases
]
def get_group_by_cols(self, alias=None):
if alias:
return [Ref(alias, self)]
external_cols = self.get_external_cols()
if any(col.possibly_multivalued for col in external_cols):
return [self]
return external_cols
def as_sql(self, compiler, connection):
# Some backends (e.g. Oracle) raise an error when a subquery contains
# unnecessary ORDER BY clause.
if (
self.subquery
and not connection.features.ignores_unnecessary_order_by_in_subqueries
):
self.clear_ordering(force=False)
sql, params = self.get_compiler(connection=connection).as_sql()
if self.subquery:
sql = "(%s)" % sql
return sql, params
def resolve_lookup_value(self, value, can_reuse, allow_joins):
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self,
reuse=can_reuse,
allow_joins=allow_joins,
)
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
values = (
self.resolve_lookup_value(sub_value, can_reuse, allow_joins)
for sub_value in value
)
type_ = type(value)
if hasattr(type_, "_make"): # namedtuple
return type_(*values)
return type_(values)
return value
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self.annotations:
expression, expression_lookups = refs_expression(
lookup_splitted, self.annotations
)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".'
% (lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, "_meta"):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.'
% (value, opts.object_name)
)
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (
isinstance(value, Query)
and not value.has_select_fields
and not check_rel_lookup_compatibility(value.model, opts, field)
):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".'
% (value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, "_meta"):
self.check_query_object_type(value, opts, field)
elif hasattr(value, "__iter__"):
for v in value:
self.check_query_object_type(v, opts, field)
def check_filterable(self, expression):
"""Raise an error if expression cannot be used in a WHERE clause."""
if hasattr(expression, "resolve_expression") and not getattr(
expression, "filterable", True
):
raise NotSupportedError(
expression.__class__.__name__ + " is disallowed in the filter "
"clause."
)
if hasattr(expression, "get_source_expressions"):
for expr in expression.get_source_expressions():
self.check_filterable(expr)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
*transforms, lookup_name = lookups or ["exact"]
for name in transforms:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
if lhs.field.is_relation:
raise FieldError(
"Related Field got invalid lookup: {}".format(lookup_name)
)
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookup_name)
lookup_name = "exact"
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value unless the lookup supports it.
if lookup.rhs is None and not lookup.can_use_none_as_rhs:
if lookup_name not in ("exact", "iexact"):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup("isnull")(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (
lookup_name == "exact"
and lookup.rhs == ""
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
):
return lhs.get_lookup("isnull")(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
output_field = lhs.output_field.__class__
suggested_lookups = difflib.get_close_matches(
name, output_field.get_lookups()
)
if suggested_lookups:
suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups)
else:
suggestion = "."
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted%s" % (name, output_field.__name__, suggestion)
)
def build_filter(
self,
filter_expr,
branch_negated=False,
current_negated=False,
can_reuse=None,
allow_joins=True,
split_subq=True,
check_filterable=True,
):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
if isinstance(filter_expr, Q):
return self._add_q(
filter_expr,
branch_negated=branch_negated,
current_negated=current_negated,
used_aliases=can_reuse,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
)
if hasattr(filter_expr, "resolve_expression"):
if not getattr(filter_expr, "conditional", False):
raise TypeError("Cannot filter against a non-conditional expression.")
condition = filter_expr.resolve_expression(self, allow_joins=allow_joins)
if not isinstance(condition, Lookup):
condition = self.build_lookup(["exact"], condition, True)
return WhereNode([condition], connector=AND), []
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if check_filterable:
self.check_filterable(reffed_expression)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
pre_joins = self.alias_refcount.copy()
value = self.resolve_lookup_value(value, can_reuse, allow_joins)
used_joins = {
k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)
}
if check_filterable:
self.check_filterable(value)
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
return WhereNode([condition], connector=AND), []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(
parts,
opts,
alias,
can_reuse=can_reuse,
allow_many=allow_many,
)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError(
"Related Field got invalid lookup: {}".format(lookups[0])
)
if len(targets) == 1:
col = self._get_col(targets[0], join_info.final_field, alias)
else:
col = MultiColSource(
alias, targets, join_info.targets, join_info.final_field
)
else:
col = self._get_col(targets[0], join_info.final_field, alias)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause = WhereNode([condition], connector=AND)
require_outer = (
lookup_type == "isnull" and condition.rhs is True and not current_negated
)
if (
current_negated
and (lookup_type != "isnull" or condition.rhs is False)
and condition.rhs is not None
):
require_outer = True
if lookup_type != "isnull":
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
if (
self.is_nullable(targets[0])
or self.alias_map[join_list[-1]].join_type == LOUTER
):
lookup_class = targets[0].get_lookup("isnull")
col = self._get_col(targets[0], join_info.targets[0], alias)
clause.add(lookup_class(col, False), AND)
# If someval is a nullable column, someval IS NOT NULL is
# added.
if isinstance(value, Col) and self.is_nullable(value.target):
lookup_class = value.target.get_lookup("isnull")
clause.add(lookup_class(value, False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_lhs, filter_rhs):
self.add_q(Q((filter_lhs, filter_rhs)))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {
a for a in self.alias_map if self.alias_map[a].join_type == INNER
}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def build_where(self, filter_expr):
return self.build_filter(filter_expr, allow_joins=False)[0]
def clear_where(self):
self.where = WhereNode()
def _add_q(
self,
q_object,
used_aliases,
branch_negated=False,
current_negated=False,
allow_joins=True,
split_subq=True,
check_filterable=True,
):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
joinpromoter = JoinPromoter(
q_object.connector, len(q_object.children), current_negated
)
for child in q_object.children:
child_clause, needed_inner = self.build_filter(
child,
can_reuse=used_aliases,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def build_filtered_relation_q(
self, q_object, reuse, branch_negated=False, current_negated=False
):
"""Add a FilteredRelation object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause = self.build_filtered_relation_q(
child,
reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
)
else:
child_clause, _ = self.build_filter(
child,
can_reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=True,
split_subq=False,
)
target_clause.add(child_clause, connector)
return target_clause
def add_filtered_relation(self, filtered_relation, alias):
filtered_relation.alias = alias
lookups = dict(get_children_from_q(filtered_relation.condition))
relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(
filtered_relation.relation_name
)
if relation_lookup_parts:
raise ValueError(
"FilteredRelation's relation_name cannot contain lookups "
"(got %r)." % filtered_relation.relation_name
)
for lookup in chain(lookups):
lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup)
shift = 2 if not lookup_parts else 1
lookup_field_path = lookup_field_parts[:-shift]
for idx, lookup_field_part in enumerate(lookup_field_path):
if len(relation_field_parts) > idx:
if relation_field_parts[idx] != lookup_field_part:
raise ValueError(
"FilteredRelation's condition doesn't support "
"relations outside the %r (got %r)."
% (filtered_relation.relation_name, lookup)
)
else:
raise ValueError(
"FilteredRelation's condition doesn't support nested "
"relations deeper than the relation_name (got %r for "
"%r)." % (lookup, filtered_relation.relation_name)
)
self._filtered_relations[filtered_relation.alias] = filtered_relation
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == "pk":
name = opts.pk.name
field = None
filtered_relation = None
try:
if opts is None:
raise FieldDoesNotExist
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif name in self._filtered_relations and pos == 0:
filtered_relation = self._filtered_relations[name]
if LOOKUP_SEP in filtered_relation.relation_name:
parts = filtered_relation.relation_name.split(LOOKUP_SEP)
filtered_relation_path, field, _, _ = self.names_to_path(
parts,
opts,
allow_many,
fail_on_missing,
)
path.extend(filtered_relation_path[:-1])
else:
field = opts.get_field(filtered_relation.relation_name)
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
available = sorted(
[
*get_field_names_from_opts(opts),
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available))
)
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if opts is not None and model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, "path_infos"):
if filtered_relation:
pathinfos = field.get_path_info(filtered_relation)
else:
pathinfos = field.path_infos
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name)
)
break
return path, final_field, targets, names[pos + 1 :]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins, the
field path traveled to generate the joins, and a transform function
that takes a field and alias and is equivalent to `field.get_col(alias)`
in the simple case but wraps field transforms if they were included in
names.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# The transform can't be applied yet, as joins must be trimmed later.
# To avoid making every caller of this method look up transforms
# directly, compute transforms here and create a partial that converts
# fields to the appropriate wrapped version.
def final_transformer(field, alias):
if not self.alias_cols:
alias = None
return field.get_col(alias)
# Try resolving all the names as fields first. If there's an error,
# treat trailing names as lookups until a field can be resolved.
last_field_exception = None
for pivot in range(len(names), 0, -1):
try:
path, final_field, targets, rest = self.names_to_path(
names[:pivot],
opts,
allow_many,
fail_on_missing=True,
)
except FieldError as exc:
if pivot == 1:
# The first item cannot be a lookup, so it's safe
# to raise the field error here.
raise
else:
last_field_exception = exc
else:
# The transforms are the remaining items that couldn't be
# resolved into fields.
transforms = names[pivot:]
break
for name in transforms:
def transform(field, alias, *, name, previous):
try:
wrapped = previous(field, alias)
return self.try_transform(wrapped, name)
except FieldError:
# FieldError is raised if the transform doesn't exist.
if isinstance(final_field, Field) and last_field_exception:
raise last_field_exception
else:
raise
final_transformer = functools.partial(
transform, name=name, previous=final_transformer
)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
if join.filtered_relation:
filtered_relation = join.filtered_relation.clone()
table_alias = filtered_relation.alias
else:
filtered_relation = None
table_alias = None
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = self.join_class(
opts.db_table,
alias,
table_alias,
INNER,
join.join_field,
nullable,
filtered_relation=filtered_relation,
)
reuse = can_reuse if join.m2m else None
alias = self.join(connection, reuse=reuse)
joins.append(alias)
if filtered_relation:
filtered_relation.path = joins[:]
return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
if info.filtered_relation:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {
r[1].column: r[0]
for r in info.join_field.related_fields
if r[1].column in cur_targets
}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
@classmethod
def _gen_cols(cls, exprs, include_external=False):
for expr in exprs:
if isinstance(expr, Col):
yield expr
elif include_external and callable(
getattr(expr, "get_external_cols", None)
):
yield from expr.get_external_cols()
elif hasattr(expr, "get_source_expressions"):
yield from cls._gen_cols(
expr.get_source_expressions(),
include_external=include_external,
)
@classmethod
def _gen_col_aliases(cls, exprs):
yield from (expr.alias for expr in cls._gen_cols(exprs))
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
annotation = self.annotations.get(name)
if annotation is not None:
if not allow_joins:
for alias in self._gen_col_aliases([annotation]):
if isinstance(self.alias_map[alias], Join):
raise FieldError(
"Joined field references are not permitted in this query"
)
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
if name not in self.annotation_select:
raise FieldError(
"Cannot aggregate over the '%s' alias. Use annotate() "
"to promote it." % name
)
return Ref(name, self.annotation_select[name])
else:
return annotation
else:
field_list = name.split(LOOKUP_SEP)
annotation = self.annotations.get(field_list[0])
if annotation is not None:
for transform in field_list[1:]:
annotation = self.try_transform(annotation, transform)
return annotation
join_info = self.setup_joins(
field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse
)
targets, final_alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if not allow_joins and len(join_list) > 1:
raise FieldError(
"Joined field references are not permitted in this query"
)
if len(targets) > 1:
raise FieldError(
"Referencing multicolumn fields with F() objects isn't supported"
)
# Verify that the last lookup in name is a field or a transform:
# transform_function() raises FieldError if not.
transform = join_info.transform_function(targets[0], final_alias)
if reuse is not None:
reuse.update(join_list)
return transform
def split_exclude(self, filter_expr, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
For example, if the origin filter is ~Q(child__name='foo'), filter_expr
is ('child__name', 'foo') and can_reuse is a set of joins usable for
filters in the original query.
We will turn this into equivalent of:
WHERE NOT EXISTS(
SELECT 1
FROM child
WHERE name = 'foo' AND child.parent_id = parent.id
LIMIT 1
)
"""
# Generate the inner query.
query = self.__class__(self.model)
query._filtered_relations = self._filtered_relations
filter_lhs, filter_rhs = filter_expr
if isinstance(filter_rhs, OuterRef):
filter_rhs = OuterRef(filter_rhs)
elif isinstance(filter_rhs, F):
filter_rhs = OuterRef(filter_rhs.name)
query.add_filter(filter_lhs, filter_rhs)
query.clear_ordering(force=True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
col = query.select[0]
select_field = col.target
alias = col.alias
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup("exact")
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases[alias] = True
lookup_class = select_field.get_lookup("exact")
lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix))
query.where.add(lookup, AND)
condition, needed_inner = self.build_filter(Exists(query))
if contains_louter:
or_null_condition, _ = self.build_filter(
("%s__isnull" % trimmed_prefix, True),
current_negated=True,
branch_negated=True,
can_reuse=can_reuse,
)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
for query in self.combined_queries:
query.set_empty()
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
@property
def is_sliced(self):
return self.low_mark != 0 or self.high_mark is not None
def has_limit_one(self):
return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.is_sliced
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def add_select_col(self, col, name):
self.select += (col,)
self.values_select += (name,)
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m
)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(join_info.transform_function(target, final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
elif name in self.annotations:
raise FieldError(
"Cannot select the '%s' alias. Use annotate() to promote "
"it." % name
)
else:
names = sorted(
[
*get_field_names_from_opts(opts),
*self.extra,
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names))
)
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if isinstance(item, str):
if item == "?":
continue
if item.startswith("-"):
item = item[1:]
if item in self.annotations:
continue
if self.extra and item in self.extra:
continue
# names_to_path() validates the lookup. A descriptive
# FieldError will be raise if it's not.
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
elif not hasattr(item, "resolve_expression"):
errors.append(item)
if getattr(item, "contains_aggregate", False):
raise FieldError(
"Using an aggregate in order_by() without also including "
"it in annotate() is not allowed: %s" % item
)
if errors:
raise FieldError("Invalid order_by arguments: %s" % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force=False, clear_default=True):
"""
Remove any ordering settings if the current query allows it without
side effects, set 'force' to True to clear the ordering regardless.
If 'clear_default' is True, there will be no ordering in the resulting
query (not even the model's default).
"""
if not force and (
self.is_sliced or self.distinct_fields or self.select_for_update
):
return
self.order_by = ()
self.extra_order_by = ()
if clear_default:
self.default_ordering = False
def set_group_by(self, allow_aliases=True):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
# Column names from JOINs to check collisions with aliases.
if allow_aliases:
column_names = set()
seen_models = set()
for join in list(self.alias_map.values())[1:]: # Skip base table.
model = join.join_field.related_model
if model not in seen_models:
column_names.update(
{field.column for field in model._meta.local_concrete_fields}
)
seen_models.add(model)
group_by = list(self.select)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
if not allow_aliases or alias in column_names:
alias = None
group_by_cols = annotation.get_group_by_cols(alias=alias)
group_by.extend(group_by_cols)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = {}
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = str(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != "%":
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
if new_existing := existing.difference(field_names):
self.deferred_loading = new_existing, False
else:
self.clear_deferred_loading()
if new_only := set(field_names).difference(existing):
self.deferred_loading = new_only, True
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if "pk" in field_names:
field_names.remove("pk")
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, return a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, return an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""Callback used by get_deferred_field_names()."""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(self.annotation_select_mask.union(names))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self.extra and not self.annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
selected = frozenset(field_names + extra_names + annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
selected = frozenset(field_names)
# Selected annotations must be known before setting the GROUP BY
# clause.
if self.group_by is True:
self.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
self.set_group_by(allow_aliases=False)
self.clear_select_fields()
elif self.group_by:
# Resolve GROUP BY annotation references if they are not part of
# the selected fields anymore.
group_by = []
for expr in self.group_by:
if isinstance(expr, Ref) and expr.refs not in selected:
expr = self.annotations[expr.refs]
group_by.append(expr)
self.group_by = tuple(group_by)
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the dictionary of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self.annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = {
k: v
for k, v in self.annotations.items()
if k in self.annotation_select_mask
}
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self.extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = {
k: v for k, v in self.extra.items() if k in self.extra_select_mask
}
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for:
# - LEFT JOINs because we would miss those rows that have nothing on
# the outer side,
# - INNER JOINs from filtered relations because we would miss their
# filters.
first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]
if first_join.join_type != LOUTER and not first_join.filtered_relation:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
None, lookup_tables[trimmed_paths + 1]
)
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a join_class instead of a
# base_table_class reference. But the first entry in the query's FROM
# clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = self.base_table_class(
self.alias_map[table].table_name,
table,
)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
return field.null or (
field.empty_strings_allowed
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
)
def get_order_dir(field, default="ASC"):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == "-":
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
Add "value" to the set of values for "key", whether or not "key" already
exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
Check if the given field is reverse-o2o. The field is expected to be some
sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def __repr__(self):
return (
f"{self.__class__.__qualname__}(connector={self.connector!r}, "
f"num_children={self.num_children!r}, negated={self.negated!r})"
)
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == "OR" and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == "AND" or (
self.effective_connector == "OR" and votes == self.num_children
):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
|
f428b83e77f92a6bc4f4721cdc44a4a5c9dff8dc87c9e6ce4328dccea777436b | import collections
import json
import re
from functools import partial
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value
from django.db.models.functions import Cast, Random
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import (
CURSOR,
GET_ITERATOR_CHUNK_SIZE,
MULTI,
NO_RESULTS,
ORDER_DIR,
SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
from django.utils.regex_helper import _lazy_re_compile
class SQLCompiler:
# Multiline ordering SQL clause may appear from RawSQL.
ordering_parts = _lazy_re_compile(
r"^(.*)\s(?:ASC|DESC).*",
re.MULTILINE | re.DOTALL,
)
def __init__(self, query, connection, using, elide_empty=True):
self.query = query
self.connection = connection
self.using = using
# Some queries, e.g. coalesced aggregation, need to be executed even if
# they would return an empty result set.
self.elide_empty = elide_empty
self.quote_cache = {"*": "*"}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self._meta_ordering = None
def __repr__(self):
return (
f"<{self.__class__.__qualname__} "
f"model={self.query.model.__qualname__} "
f"connection={self.connection!r} using={self.using!r}>"
)
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk')
# .annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, "as_sql"):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)}
for expr, _, _ in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
if not self._meta_ordering:
for expr, (sql, params, is_ref) in order_by:
# Skip references to the SELECT clause, as all expressions in
# the SELECT clause are already part of the GROUP BY.
if not is_ref:
expressions.extend(expr.get_group_by_cols())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
sql, params = expr.select_format(self, sql, params)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (
getattr(expr, "target", None) == self.query.model._meta.pk
and getattr(expr, "alias", None) == self.query.base_table
):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias
for expr in expressions
if hasattr(expr, "target") and expr.target.primary_key
}
expressions = [pk] + [
expr
for expr in expressions
if expr in having
or (
getattr(expr, "alias", None) is not None
and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr
for expr in expressions
if (
hasattr(expr, "target")
and expr.target.primary_key
and self.connection.features.allows_group_by_selected_pks_on_model(
expr.target.model
)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr
for expr in expressions
if expr in pks or getattr(expr, "alias", None) not in aliases
]
return expressions
def get_select(self):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
cols = self.get_default_columns()
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
"model": self.query.model,
"select_fields": select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info["related_klass_infos"] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info["related_klass_infos"]:
if ki["from_parent"]:
ki["select_fields"] = (
klass_info["select_fields"] + ki["select_fields"]
)
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
empty_result_set_value = getattr(
col, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
# Select a predicate that's always False.
sql, params = "0", ()
else:
sql, params = self.compile(Value(empty_result_set_value))
else:
sql, params = col.select_format(self, sql, params)
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def _order_by_pairs(self):
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif (meta := self.query.get_meta()) and meta.ordering:
ordering = meta.ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
default_order, _ = ORDER_DIR["ASC"]
else:
default_order, _ = ORDER_DIR["DESC"]
for field in ordering:
if hasattr(field, "resolve_expression"):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
yield field, False
continue
if field == "?": # random
yield OrderBy(Random()), False
continue
col, order = get_order_dir(field, default_order)
descending = order == "DESC"
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
yield (
OrderBy(
Ref(col, self.query.annotation_select[col]),
descending=descending,
),
True,
)
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
if self.query.combinator and self.select:
# Don't use the resolved annotation because other
# combinated queries might define it differently.
expr = F(col)
else:
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
yield OrderBy(expr, descending=descending), False
continue
if "." in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split(".", 1)
yield (
OrderBy(
RawSQL(
"%s.%s" % (self.quote_name_unless_alias(table), col), []
),
descending=descending,
),
False,
)
continue
if self.query.extra and col in self.query.extra:
if col in self.query.extra_select:
yield (
OrderBy(
Ref(col, RawSQL(*self.query.extra[col])),
descending=descending,
),
True,
)
else:
yield (
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False,
)
else:
if self.query.combinator and self.select:
# Don't use the first model's field because other
# combinated queries might define it differently.
yield OrderBy(F(col), descending=descending), False
else:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
yield from self.find_ordering_name(
field,
self.query.get_meta(),
default_order=default_order,
)
def get_order_by(self):
"""
Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for
the ORDER BY clause.
The order_by clause can alter the select clause (for example it can add
aliases to clauses that do not yet have one, or it can add totally new
select clauses).
"""
result = []
seen = set()
for expr, is_ref in self._order_by_pairs():
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator and self.select:
src = resolved.get_source_expressions()[0]
expr_src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias and not (
isinstance(expr_src, F) and col_alias == expr_src.name
):
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL("%d" % (idx + 1), ())])
break
else:
if col_alias:
raise DatabaseError(
"ORDER BY term does not match any column in the result set."
)
# Add column used in ORDER BY clause to the selected
# columns and to each combined query.
order_by_idx = len(self.query.select) + 1
col_name = f"__orderbycol{order_by_idx}"
for q in self.query.combined_queries:
q.add_annotation(expr_src, col_name)
self.query.add_select_col(resolved, col_name)
resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql)[1]
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if (
(name in self.query.alias_map and name not in self.query.table_map)
or name in self.query.extra_select
or (
self.query.external_aliases.get(name)
and name not in self.query.table_map
)
):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, "as_" + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection, self.elide_empty)
for query in self.query.combined_queries
if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError(
"LIMIT/OFFSET not allowed in subqueries of compound statements."
)
if compiler.get_order_by():
raise DatabaseError(
"ORDER BY not allowed in subqueries of compound statements."
)
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values(
(
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
)
)
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = "SELECT * FROM ({})".format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif (
self.query.subquery
or not features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == "union" or (combinator == "difference" and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == "union":
combinator_sql += " ALL"
braces = "{}"
if not self.query.subquery and features.supports_slicing_ordering_in_compound:
braces = "({})"
sql_parts, args_parts = zip(
*((braces.format(sql), args) for sql, args in parts)
)
result = [" {} ".format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (
self.query.high_mark is not None or self.query.low_mark
)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, "supports_select_{}".format(combinator)):
raise NotSupportedError(
"{} is not supported on this database backend.".format(
combinator
)
)
result, params = self.get_combinator_sql(
combinator, self.query.combinator_all
)
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
try:
where, w_params = (
self.compile(self.where) if self.where is not None else ("", [])
)
except EmptyResultSet:
if self.elide_empty:
raise
# Use a predicate that's always False.
where, w_params = "0 = 1", []
having, h_params = (
self.compile(self.having) if self.having is not None else ("", [])
)
result = ["SELECT"]
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = "%s AS %s" % (
s_sql,
self.connection.ops.quote_name(alias),
)
elif with_col_aliases:
s_sql = "%s AS %s" % (
s_sql,
self.connection.ops.quote_name("col%d" % col_idx),
)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result += [", ".join(out_cols)]
if from_:
result += ["FROM", *from_]
elif self.connection.features.bare_select_suffix:
result += [self.connection.features.bare_select_suffix]
params.extend(f_params)
if self.query.select_for_update and features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
if (
with_limit_offset
and not features.supports_select_for_update_with_limit
):
raise NotSupportedError(
"LIMIT/OFFSET is not supported with "
"select_for_update on this database backend."
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
no_key = self.query.select_for_no_key_update
# If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the
# backend doesn't support it, raise NotSupportedError to
# prevent a possible deadlock.
if nowait and not features.has_select_for_update_nowait:
raise NotSupportedError(
"NOWAIT is not supported on this database backend."
)
elif skip_locked and not features.has_select_for_update_skip_locked:
raise NotSupportedError(
"SKIP LOCKED is not supported on this database backend."
)
elif of and not features.has_select_for_update_of:
raise NotSupportedError(
"FOR UPDATE OF is not supported on this database backend."
)
elif no_key and not features.has_select_for_no_key_update:
raise NotSupportedError(
"FOR NO KEY UPDATE is not supported on this "
"database backend."
)
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
no_key=no_key,
)
if for_update_part and features.for_update_after_from:
result.append(for_update_part)
if where:
result.append("WHERE %s" % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented."
)
order_by = order_by or self.connection.ops.force_no_ordering()
result.append("GROUP BY %s" % ", ".join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append("HAVING %s" % having)
params.extend(h_params)
if self.query.explain_info:
result.insert(
0,
self.connection.ops.explain_query_prefix(
self.query.explain_info.format,
**self.query.explain_info.options,
),
)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append("ORDER BY %s" % ", ".join(ordering))
if with_limit_offset:
result.append(
self.connection.ops.limit_offset_sql(
self.query.low_mark, self.query.high_mark
)
)
if for_update_part and not features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if not alias and with_col_aliases:
alias = "col%d" % index
if alias:
sub_selects.append(
"%s.%s"
% (
self.connection.ops.quote_name("subquery"),
self.connection.ops.quote_name(alias),
)
)
else:
select_clone = select.relabeled_clone(
{select.alias: "subquery"}
)
subselect, subparams = select_clone.as_sql(
self, self.connection
)
sub_selects.append(subselect)
sub_params.extend(subparams)
return "SELECT %s FROM (%s) subquery" % (
", ".join(sub_selects),
" ".join(result),
), tuple(sub_params + params)
return " ".join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
if (opts := self.query.get_meta()) is None:
return result
only_load = self.deferred_to_columns()
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if (
from_parent
and model is not None
and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model
)
):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(
parts, opts, None
)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(self.connection.ops.quote_name(name))
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(
self, name, opts, alias=None, default_order="ASC", already_seen=None
):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == "DESC"
pieces = name.split(LOOKUP_SEP)
(
field,
targets,
alias,
joins,
path,
opts,
transform_function,
) = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified.
if (
field.is_relation
and opts.ordering
and getattr(field, "attname", None) != pieces[-1]
and name != "pk"
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(
getattr(self.query.alias_map[j], "join_cols", None) for j in joins
)
if join_tuple in already_seen:
raise FieldError("Infinite loop caused by ordering.")
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, "resolve_expression") and not isinstance(
item, OrderBy
):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append((item, False))
continue
results.extend(
self.find_ordering_name(item, opts, alias, order, already_seen)
)
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [
(OrderBy(transform_function(t, alias), descending=descending), False)
for t in targets
]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(
pieces, opts, alias
)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if (
alias not in self.query.alias_map
or self.query.alias_refcount[alias] == 1
):
result.append(", %s" % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(
self,
select,
opts=None,
root_alias=None,
cur_depth=1,
requested=None,
restricted=None,
):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects
if f.field.unique
)
return chain(
direct_choices, reverse_choices, self.query._filtered_relations
)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info["related_klass_infos"] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s"
% (
f.name,
", ".join(_get_field_choices()) or "(none)",
)
)
else:
next = False
if not select_related_descend(
f, restricted, requested, only_load.get(field_model)
):
continue
klass_info = {
"model": f.remote_field.model,
"field": f,
"reverse": False,
"local_setter": f.set_cached_value,
"remote_setter": f.remote_field.set_cached_value
if f.unique
else lambda x, y: None,
"from_parent": False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(
start_alias=alias, opts=f.remote_field.model._meta
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_klass_infos = self.get_related_selections(
select,
f.remote_field.model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(
f, restricted, requested, only_load.get(model), reverse=True
):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins(
[related_field_name], opts, root_alias
)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": f.remote_field.set_cached_value,
"remote_setter": f.set_cached_value,
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1, next, restricted
)
get_related_klass_infos(klass_info, next_klass_infos)
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(name, obj, from_obj):
setattr(from_obj, name, obj)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins(
[name], opts, root_alias
)
model = join_opts.model
alias = joins[-1]
from_parent = (
issubclass(model, opts.model) and model is not opts.model
)
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": local_setter,
"remote_setter": partial(remote_setter, name),
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select,
opts=model._meta,
root_alias=alias,
cur_depth=cur_depth + 1,
requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
"Invalid field name(s) given in select_related: %s. "
"Choices are: %s"
% (
", ".join(invalid_fields),
", ".join(_get_field_choices()) or "(none)",
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_parent_klass_info(klass_info):
concrete_model = klass_info["model"]._meta.concrete_model
for parent_model, parent_link in concrete_model._meta.parents.items():
parent_list = parent_model._meta.get_parent_list()
yield {
"model": parent_model,
"field": parent_link,
"reverse": False,
"select_fields": [
select_index
for select_index in klass_info["select_fields"]
# Selected columns from a model or its parents.
if (
self.select[select_index][0].target.model == parent_model
or self.select[select_index][0].target.model in parent_list
)
],
}
def _get_first_selected_col_from_model(klass_info):
"""
Find the first selected column from a model. If it doesn't exist,
don't lock a model.
select_fields is filled recursively, so it also contains fields
from the parent models.
"""
concrete_model = klass_info["model"]._meta.concrete_model
for select_index in klass_info["select_fields"]:
if self.select[select_index][0].target.model == concrete_model:
return self.select[select_index][0]
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield "self"
else:
field = klass_info["field"]
if klass_info["reverse"]:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in _get_parent_klass_info(klass_info)
)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get("related_klass_infos", [])
)
if not self.klass_info:
return []
result = []
invalid_names = []
for name in self.query.select_for_update_of:
klass_info = self.klass_info
if name == "self":
col = _get_first_selected_col_from_model(klass_info)
else:
for part in name.split(LOOKUP_SEP):
klass_infos = (
*klass_info.get("related_klass_infos", []),
*_get_parent_klass_info(klass_info),
)
for related_klass_info in klass_infos:
field = related_klass_info["field"]
if related_klass_info["reverse"]:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
col = _get_first_selected_col_from_model(klass_info)
if col is not None:
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
"Invalid field name(s) given in select_for_update(of=(...)): %s. "
"Only relational fields followed in the query are allowed. "
"Choices are: %s."
% (
", ".join(invalid_names),
", ".join(_get_field_choices()),
)
)
return result
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(
self,
results=None,
tuple_expected=False,
chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE,
):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(
MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size
)
fields = [s[0] for s in self.select[0 : self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
return bool(self.execute_sql(SINGLE))
def execute_sql(
self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0 : self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor,
self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = "%s.%s" % (qn(alias), qn2(columns[index]))
self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), "AND")
sql, params = self.as_sql()
return "EXISTS (%s)" % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
output_formatter = (
json.dumps if self.query.explain_info.format == "json" else str
)
for row in result[0]:
if not isinstance(row, str):
yield " ".join(output_formatter(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
returning_params = tuple()
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, "as_sql"):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, "get_placeholder"):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = "%s", [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self.query, allow_joins=False, for_save=True
)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
"can only be used to update, not to insert." % (value, field)
)
if value.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query (%s=%r)."
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(
on_conflict=self.query.on_conflict,
)
result = ["%s %s" % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append("(%s)" % ", ".join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[
self.prepare_value(field, self.pre_save_val(field, obj))
for field in fields
]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [
[self.connection.ops.pk_default_value()] for _ in self.query.objs
]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (
not self.returning_fields and self.connection.features.has_bulk_insert
)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql(
fields,
self.query.on_conflict,
self.query.update_fields,
self.query.unique_fields,
)
if (
self.returning_fields
and self.connection.features.can_return_columns_from_insert
):
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(
self.connection.ops.bulk_insert_sql(fields, placeholder_rows)
)
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, self.returning_params = self.connection.ops.return_insert_columns(
self.returning_fields
)
if r_sql:
result.append(r_sql)
params += [self.returning_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields
and len(self.query.objs) != 1
and not self.connection.features.can_return_rows_from_bulk_insert
)
opts = self.query.get_meta()
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if (
self.connection.features.can_return_rows_from_bulk_insert
and len(self.query.objs) > 1
):
rows = self.connection.ops.fetch_returned_insert_rows(cursor)
elif self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
rows = [
self.connection.ops.fetch_returned_insert_columns(
cursor,
self.returning_params,
)
]
else:
rows = [
(
self.connection.ops.last_insert_id(
cursor,
opts.db_table,
opts.pk.column,
),
)
]
cols = [field.get_col(opts.db_table) for field in self.returning_fields]
converters = self.get_converters(cols)
if converters:
rows = list(self.apply_converters(rows, converters))
return rows
class SQLDeleteCompiler(SQLCompiler):
@cached_property
def single_alias(self):
# Ensure base table is in aliases.
self.query.get_initial_alias()
return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1
@classmethod
def _expr_refs_base_model(cls, expr, base_model):
if isinstance(expr, Query):
return expr.model == base_model
if not hasattr(expr, "get_source_expressions"):
return False
return any(
cls._expr_refs_base_model(source_expr, base_model)
for source_expr in expr.get_source_expressions()
)
@cached_property
def contains_self_reference_subquery(self):
return any(
self._expr_refs_base_model(expr, self.query.model)
for expr in chain(
self.query.annotations.values(), self.query.where.children
)
)
def _as_sql(self, query):
result = ["DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)]
where, params = self.compile(query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(params)
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
if self.single_alias and not self.contains_self_reference_subquery:
return self._as_sql(self.query)
innerq = self.query.clone()
innerq.__class__ = Query
innerq.clear_select_clause()
pk = self.query.model._meta.pk
innerq.select = [pk.get_col(self.query.get_initial_alias())]
outerq = Query(self.query.model)
if not self.connection.features.update_can_self_select:
# Force the materialization of the inner query to allow reference
# to the target table on MySQL.
sql, params = innerq.get_compiler(connection=self.connection).as_sql()
innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params)
outerq.add_filter("pk__in", innerq)
return self._as_sql(outerq)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return "", ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, "resolve_expression"):
val = val.resolve_expression(
self.query, allow_joins=False, for_save=True
)
if val.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
elif hasattr(val, "prepare_database_save"):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, "get_placeholder"):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = "%s"
name = field.column
if hasattr(val, "as_sql"):
sql, params = self.compile(val)
values.append("%s = %s" % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append("%s = %s" % (qn(name), placeholder))
update_params.append(val)
else:
values.append("%s = NULL" % qn(name))
table = self.query.base_table
result = [
"UPDATE %s SET" % qn(table),
", ".join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(force=True)
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super().pre_sql_setup()
must_pre_select = (
count > 1 and not self.connection.features.update_can_self_select
)
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.clear_where()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter("pk__in", idents)
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter("pk__in", query)
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ", ".join(sql)
params = tuple(params)
inner_query_sql, inner_query_params = self.query.inner_query.get_compiler(
self.using,
elide_empty=self.elide_empty,
).as_sql(with_col_aliases=True)
sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql)
params = params + inner_query_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
|
7ba7689e0fd8a4df71efcdc80d1ad5e0d45abff8cfa893f4c7a25466b37c03a7 | import multiprocessing
import os
import shutil
import sqlite3
import sys
from pathlib import Path
from django.db import NotSupportedError
from django.db.backends.base.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
@staticmethod
def is_in_memory_db(database_name):
return not isinstance(database_name, Path) and (
database_name == ":memory:" or "mode=memory" in database_name
)
def _get_test_db_name(self):
test_database_name = self.connection.settings_dict["TEST"]["NAME"] or ":memory:"
if test_database_name == ":memory:":
return "file:memorydb_%s?mode=memory&cache=shared" % self.connection.alias
return test_database_name
def _create_test_db(self, verbosity, autoclobber, keepdb=False):
test_database_name = self._get_test_db_name()
if keepdb:
return test_database_name
if not self.is_in_memory_db(test_database_name):
# Erase the old test database
if verbosity >= 1:
self.log(
"Destroying old test database for alias %s..."
% (self._get_database_display_str(verbosity, test_database_name),)
)
if os.access(test_database_name, os.F_OK):
if not autoclobber:
confirm = input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name
)
if autoclobber or confirm == "yes":
try:
os.remove(test_database_name)
except Exception as e:
self.log("Got an error deleting the old test database: %s" % e)
sys.exit(2)
else:
self.log("Tests cancelled.")
sys.exit(1)
return test_database_name
def get_test_db_clone_settings(self, suffix):
orig_settings_dict = self.connection.settings_dict
source_database_name = orig_settings_dict["NAME"]
if not self.is_in_memory_db(source_database_name):
root, ext = os.path.splitext(source_database_name)
return {**orig_settings_dict, "NAME": f"{root}_{suffix}{ext}"}
start_method = multiprocessing.get_start_method()
if start_method == "fork":
return orig_settings_dict
if start_method == "spawn":
return {
**orig_settings_dict,
"NAME": f"{self.connection.alias}_{suffix}.sqlite3",
}
raise NotSupportedError(
f"Cloning with start method {start_method!r} is not supported."
)
def _clone_test_db(self, suffix, verbosity, keepdb=False):
source_database_name = self.connection.settings_dict["NAME"]
target_database_name = self.get_test_db_clone_settings(suffix)["NAME"]
if not self.is_in_memory_db(source_database_name):
# Erase the old test database
if os.access(target_database_name, os.F_OK):
if keepdb:
return
if verbosity >= 1:
self.log(
"Destroying old test database for alias %s..."
% (
self._get_database_display_str(
verbosity, target_database_name
),
)
)
try:
os.remove(target_database_name)
except Exception as e:
self.log("Got an error deleting the old test database: %s" % e)
sys.exit(2)
try:
shutil.copy(source_database_name, target_database_name)
except Exception as e:
self.log("Got an error cloning the test database: %s" % e)
sys.exit(2)
# Forking automatically makes a copy of an in-memory database.
# Spawn requires migrating to disk which will be re-opened in
# setup_worker_connection.
elif multiprocessing.get_start_method() == "spawn":
ondisk_db = sqlite3.connect(target_database_name, uri=True)
self.connection.connection.backup(ondisk_db)
def _destroy_test_db(self, test_database_name, verbosity):
if test_database_name and not self.is_in_memory_db(test_database_name):
# Remove the SQLite database file
os.remove(test_database_name)
def test_db_signature(self):
"""
Return a tuple that uniquely identifies a test database.
This takes into account the special cases of ":memory:" and "" for
SQLite since the databases will be distinct despite having the same
TEST NAME. See https://www.sqlite.org/inmemorydb.html
"""
test_database_name = self._get_test_db_name()
sig = [self.connection.settings_dict["NAME"]]
if self.is_in_memory_db(test_database_name):
sig.append(self.connection.alias)
else:
sig.append(test_database_name)
return tuple(sig)
def setup_worker_connection(self, _worker_id):
settings_dict = self.get_test_db_clone_settings(_worker_id)
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. Otherwise new threads would
# connect to the default database instead of the appropriate clone.
start_method = multiprocessing.get_start_method()
if start_method == "fork":
# Update settings_dict in place.
self.connection.settings_dict.update(settings_dict)
self.connection.close()
elif start_method == "spawn":
alias = self.connection.alias
connection_str = (
f"file:memorydb_{alias}_{_worker_id}?mode=memory&cache=shared"
)
source_db = self.connection.Database.connect(
f"file:{alias}_{_worker_id}.sqlite3", uri=True
)
target_db = sqlite3.connect(connection_str, uri=True)
source_db.backup(target_db)
source_db.close()
# Update settings_dict in place.
self.connection.settings_dict.update(settings_dict)
self.connection.settings_dict["NAME"] = connection_str
# Re-open connection to in-memory database before closing copy
# connection.
self.connection.connect()
target_db.close()
if os.environ.get("RUNNING_DJANGOS_TEST_SUITE") == "true":
self.mark_expected_failures_and_skips()
|
11f6b52baee477977095cb360e09e40d3b59cb416e5ba9d2a3ba1a56c2cdf41c | """
Sets up the terminal color scheme.
"""
import functools
import os
import sys
from django.utils import termcolors
try:
import colorama
colorama.init()
except (ImportError, OSError):
HAS_COLORAMA = False
else:
HAS_COLORAMA = True
def supports_color():
"""
Return True if the running system's terminal supports color,
and False otherwise.
"""
def vt_codes_enabled_in_windows_registry():
"""
Check the Windows Registry to see if VT code handling has been enabled
by default, see https://superuser.com/a/1300251/447564.
"""
try:
# winreg is only available on Windows.
import winreg
except ImportError:
return False
else:
try:
reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Console")
reg_key_value, _ = winreg.QueryValueEx(reg_key, "VirtualTerminalLevel")
except FileNotFoundError:
return False
else:
return reg_key_value == 1
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
return is_a_tty and (
sys.platform != "win32"
or HAS_COLORAMA
or "ANSICON" in os.environ
or
# Windows Terminal supports VT codes.
"WT_SESSION" in os.environ
or
# Microsoft Visual Studio Code's built-in terminal supports colors.
os.environ.get("TERM_PROGRAM") == "vscode"
or vt_codes_enabled_in_windows_registry()
)
class Style:
pass
def make_style(config_string=""):
"""
Create a Style object from the given config_string.
If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used.
"""
style = Style()
color_settings = termcolors.parse_color_setting(config_string)
# The nocolor palette has all available roles.
# Use that palette as the basis for populating
# the palette as defined in the environment.
for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]:
if color_settings:
format = color_settings.get(role, {})
style_func = termcolors.make_style(**format)
else:
def style_func(x):
return x
setattr(style, role, style_func)
# For backwards compatibility,
# set style for ERROR_OUTPUT == ERROR
style.ERROR_OUTPUT = style.ERROR
return style
@functools.lru_cache(maxsize=None)
def no_style():
"""
Return a Style object with no color scheme.
"""
return make_style("nocolor")
def color_style(force_color=False):
"""
Return a Style object from the Django color scheme.
"""
if not force_color and not supports_color():
return no_style()
return make_style(os.environ.get("DJANGO_COLORS", ""))
|
f7d1a20c4aaec571a4033e52855a3576bc63a440ff7be911c1e5c661018f85cd | """
Caching framework.
This package defines set of cache backends that all conform to a simple API.
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
the abstract BaseCache class in django.core.cache.backends.base.
Client code should use the `cache` variable defined here to access the default
cache backend and look up non-default cache backends in the `caches` dict-like
object.
See docs/topics/cache.txt for information on the public API.
"""
from django.core import signals
from django.core.cache.backends.base import (
BaseCache,
CacheKeyWarning,
InvalidCacheBackendError,
InvalidCacheKey,
)
from django.utils.connection import BaseConnectionHandler, ConnectionProxy
from django.utils.module_loading import import_string
__all__ = [
"cache",
"caches",
"DEFAULT_CACHE_ALIAS",
"InvalidCacheBackendError",
"CacheKeyWarning",
"BaseCache",
"InvalidCacheKey",
]
DEFAULT_CACHE_ALIAS = "default"
class CacheHandler(BaseConnectionHandler):
settings_name = "CACHES"
exception_class = InvalidCacheBackendError
def create_connection(self, alias):
params = self.settings[alias].copy()
backend = params.pop("BACKEND")
location = params.pop("LOCATION", "")
try:
backend_cls = import_string(backend)
except ImportError as e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e)
) from e
return backend_cls(location, params)
caches = CacheHandler()
cache = ConnectionProxy(caches, DEFAULT_CACHE_ALIAS)
def close_caches(**kwargs):
# Some caches need to do a cleanup at the end of a request cycle. If not
# implemented in a particular backend cache.close() is a no-op.
for cache in caches.all(initialized_only=True):
cache.close()
signals.request_finished.connect(close_caches)
|
4aa933ce814b95e3ae02417c9e99770897d40c6ec918a320de3ab8abd62dd8ea | import asyncio
import logging
import types
from asgiref.sync import async_to_sync, sync_to_async
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, MiddlewareNotUsed
from django.core.signals import request_finished
from django.db import connections, transaction
from django.urls import get_resolver, set_urlconf
from django.utils.log import log_response
from django.utils.module_loading import import_string
from .exception import convert_exception_to_response
logger = logging.getLogger("django.request")
class BaseHandler:
_view_middleware = None
_template_response_middleware = None
_exception_middleware = None
_middleware_chain = None
def load_middleware(self, is_async=False):
"""
Populate middleware lists from settings.MIDDLEWARE.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._view_middleware = []
self._template_response_middleware = []
self._exception_middleware = []
get_response = self._get_response_async if is_async else self._get_response
handler = convert_exception_to_response(get_response)
handler_is_async = is_async
for middleware_path in reversed(settings.MIDDLEWARE):
middleware = import_string(middleware_path)
middleware_can_sync = getattr(middleware, "sync_capable", True)
middleware_can_async = getattr(middleware, "async_capable", False)
if not middleware_can_sync and not middleware_can_async:
raise RuntimeError(
"Middleware %s must have at least one of "
"sync_capable/async_capable set to True." % middleware_path
)
elif not handler_is_async and middleware_can_sync:
middleware_is_async = False
else:
middleware_is_async = middleware_can_async
try:
# Adapt handler, if needed.
adapted_handler = self.adapt_method_mode(
middleware_is_async,
handler,
handler_is_async,
debug=settings.DEBUG,
name="middleware %s" % middleware_path,
)
mw_instance = middleware(adapted_handler)
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if str(exc):
logger.debug("MiddlewareNotUsed(%r): %s", middleware_path, exc)
else:
logger.debug("MiddlewareNotUsed: %r", middleware_path)
continue
else:
handler = adapted_handler
if mw_instance is None:
raise ImproperlyConfigured(
"Middleware factory %s returned None." % middleware_path
)
if hasattr(mw_instance, "process_view"):
self._view_middleware.insert(
0,
self.adapt_method_mode(is_async, mw_instance.process_view),
)
if hasattr(mw_instance, "process_template_response"):
self._template_response_middleware.append(
self.adapt_method_mode(
is_async, mw_instance.process_template_response
),
)
if hasattr(mw_instance, "process_exception"):
# The exception-handling stack is still always synchronous for
# now, so adapt that way.
self._exception_middleware.append(
self.adapt_method_mode(False, mw_instance.process_exception),
)
handler = convert_exception_to_response(mw_instance)
handler_is_async = middleware_is_async
# Adapt the top of the stack, if needed.
handler = self.adapt_method_mode(is_async, handler, handler_is_async)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._middleware_chain = handler
def adapt_method_mode(
self,
is_async,
method,
method_is_async=None,
debug=False,
name=None,
):
"""
Adapt a method to be in the correct "mode":
- If is_async is False:
- Synchronous methods are left alone
- Asynchronous methods are wrapped with async_to_sync
- If is_async is True:
- Synchronous methods are wrapped with sync_to_async()
- Asynchronous methods are left alone
"""
if method_is_async is None:
method_is_async = asyncio.iscoroutinefunction(method)
if debug and not name:
name = name or "method %s()" % method.__qualname__
if is_async:
if not method_is_async:
if debug:
logger.debug("Synchronous handler adapted for %s.", name)
return sync_to_async(method, thread_sensitive=True)
elif method_is_async:
if debug:
logger.debug("Asynchronous handler adapted for %s.", name)
return async_to_sync(method)
return method
def get_response(self, request):
"""Return an HttpResponse object for the given HttpRequest."""
# Setup default url resolver for this thread
set_urlconf(settings.ROOT_URLCONF)
response = self._middleware_chain(request)
response._resource_closers.append(request.close)
if response.status_code >= 400:
log_response(
"%s: %s",
response.reason_phrase,
request.path,
response=response,
request=request,
)
return response
async def get_response_async(self, request):
"""
Asynchronous version of get_response.
Funneling everything, including WSGI, into a single async
get_response() is too slow. Avoid the context switch by using
a separate async response path.
"""
# Setup default url resolver for this thread.
set_urlconf(settings.ROOT_URLCONF)
response = await self._middleware_chain(request)
response._resource_closers.append(request.close)
if response.status_code >= 400:
await sync_to_async(log_response, thread_sensitive=False)(
"%s: %s",
response.reason_phrase,
request.path,
response=response,
request=request,
)
return response
def _get_response(self, request):
"""
Resolve and call the view, then apply view, exception, and
template_response middleware. This method is everything that happens
inside the request/response middleware.
"""
response = None
callback, callback_args, callback_kwargs = self.resolve_request(request)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(
request, callback, callback_args, callback_kwargs
)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
# If it is an asynchronous view, run it in a subthread.
if asyncio.iscoroutinefunction(wrapped_callback):
wrapped_callback = async_to_sync(wrapped_callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
response = self.process_exception_by_middleware(e, request)
if response is None:
raise
# Complain if the view returned None (a common error).
self.check_response(response, callback)
# If the response supports deferred rendering, apply template
# response middleware and then render the response
if hasattr(response, "render") and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
# Complain if the template response middleware returned None
# (a common error).
self.check_response(
response,
middleware_method,
name="%s.process_template_response"
% (middleware_method.__self__.__class__.__name__,),
)
try:
response = response.render()
except Exception as e:
response = self.process_exception_by_middleware(e, request)
if response is None:
raise
return response
async def _get_response_async(self, request):
"""
Resolve and call the view, then apply view, exception, and
template_response middleware. This method is everything that happens
inside the request/response middleware.
"""
response = None
callback, callback_args, callback_kwargs = self.resolve_request(request)
# Apply view middleware.
for middleware_method in self._view_middleware:
response = await middleware_method(
request, callback, callback_args, callback_kwargs
)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
# If it is a synchronous view, run it in a subthread
if not asyncio.iscoroutinefunction(wrapped_callback):
wrapped_callback = sync_to_async(
wrapped_callback, thread_sensitive=True
)
try:
response = await wrapped_callback(
request, *callback_args, **callback_kwargs
)
except Exception as e:
response = await sync_to_async(
self.process_exception_by_middleware,
thread_sensitive=True,
)(e, request)
if response is None:
raise
# Complain if the view returned None or an uncalled coroutine.
self.check_response(response, callback)
# If the response supports deferred rendering, apply template
# response middleware and then render the response
if hasattr(response, "render") and callable(response.render):
for middleware_method in self._template_response_middleware:
response = await middleware_method(request, response)
# Complain if the template response middleware returned None or
# an uncalled coroutine.
self.check_response(
response,
middleware_method,
name="%s.process_template_response"
% (middleware_method.__self__.__class__.__name__,),
)
try:
if asyncio.iscoroutinefunction(response.render):
response = await response.render()
else:
response = await sync_to_async(
response.render, thread_sensitive=True
)()
except Exception as e:
response = await sync_to_async(
self.process_exception_by_middleware,
thread_sensitive=True,
)(e, request)
if response is None:
raise
# Make sure the response is not a coroutine
if asyncio.iscoroutine(response):
raise RuntimeError("Response is still a coroutine.")
return response
def resolve_request(self, request):
"""
Retrieve/set the urlconf for the request. Return the view resolved,
with its args and kwargs.
"""
# Work out the resolver.
if hasattr(request, "urlconf"):
urlconf = request.urlconf
set_urlconf(urlconf)
resolver = get_resolver(urlconf)
else:
resolver = get_resolver()
# Resolve the view, and assign the match object back to the request.
resolver_match = resolver.resolve(request.path_info)
request.resolver_match = resolver_match
return resolver_match
def check_response(self, response, callback, name=None):
"""
Raise an error if the view returned None or an uncalled coroutine.
"""
if not (response is None or asyncio.iscoroutine(response)):
return
if not name:
if isinstance(callback, types.FunctionType): # FBV
name = "The view %s.%s" % (callback.__module__, callback.__name__)
else: # CBV
name = "The view %s.%s.__call__" % (
callback.__module__,
callback.__class__.__name__,
)
if response is None:
raise ValueError(
"%s didn't return an HttpResponse object. It returned None "
"instead." % name
)
elif asyncio.iscoroutine(response):
raise ValueError(
"%s didn't return an HttpResponse object. It returned an "
"unawaited coroutine instead. You may need to add an 'await' "
"into your view." % name
)
# Other utility methods.
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, "_non_atomic_requests", set())
for alias, settings_dict in connections.settings.items():
if settings_dict["ATOMIC_REQUESTS"] and alias not in non_atomic_requests:
if asyncio.iscoroutinefunction(view):
raise RuntimeError(
"You cannot use ATOMIC_REQUESTS with async views."
)
view = transaction.atomic(using=alias)(view)
return view
def process_exception_by_middleware(self, exception, request):
"""
Pass the exception to the exception middleware. If no middleware
return a response for this exception, return None.
"""
for middleware_method in self._exception_middleware:
response = middleware_method(request, exception)
if response:
return response
return None
def reset_urlconf(sender, **kwargs):
"""Reset the URLconf after each request is finished."""
set_urlconf(None)
request_finished.connect(reset_urlconf)
|
41a58dacfc4aaae29aa637e648e5b17bb3616a885226b7116225109451fb3a56 | from psycopg2.extras import DateRange, DateTimeRange, DateTimeTZRange, NumericRange
from django.apps import AppConfig
from django.core.signals import setting_changed
from django.db import connections
from django.db.backends.signals import connection_created
from django.db.migrations.writer import MigrationWriter
from django.db.models import CharField, OrderBy, TextField
from django.db.models.functions import Collate
from django.db.models.indexes import IndexExpression
from django.utils.translation import gettext_lazy as _
from .indexes import OpClass
from .lookups import SearchLookup, TrigramSimilar, TrigramWordSimilar, Unaccent
from .serializers import RangeSerializer
from .signals import register_type_handlers
RANGE_TYPES = (DateRange, DateTimeRange, DateTimeTZRange, NumericRange)
def uninstall_if_needed(setting, value, enter, **kwargs):
"""
Undo the effects of PostgresConfig.ready() when django.contrib.postgres
is "uninstalled" by override_settings().
"""
if (
not enter
and setting == "INSTALLED_APPS"
and "django.contrib.postgres" not in set(value)
):
connection_created.disconnect(register_type_handlers)
CharField._unregister_lookup(Unaccent)
TextField._unregister_lookup(Unaccent)
CharField._unregister_lookup(SearchLookup)
TextField._unregister_lookup(SearchLookup)
CharField._unregister_lookup(TrigramSimilar)
TextField._unregister_lookup(TrigramSimilar)
CharField._unregister_lookup(TrigramWordSimilar)
TextField._unregister_lookup(TrigramWordSimilar)
# Disconnect this receiver until the next time this app is installed
# and ready() connects it again to prevent unnecessary processing on
# each setting change.
setting_changed.disconnect(uninstall_if_needed)
MigrationWriter.unregister_serializer(RANGE_TYPES)
class PostgresConfig(AppConfig):
name = "django.contrib.postgres"
verbose_name = _("PostgreSQL extensions")
def ready(self):
setting_changed.connect(uninstall_if_needed)
# Connections may already exist before we are called.
for conn in connections.all(initialized_only=True):
if conn.vendor == "postgresql":
conn.introspection.data_types_reverse.update(
{
3904: "django.contrib.postgres.fields.IntegerRangeField",
3906: "django.contrib.postgres.fields.DecimalRangeField",
3910: "django.contrib.postgres.fields.DateTimeRangeField",
3912: "django.contrib.postgres.fields.DateRangeField",
3926: "django.contrib.postgres.fields.BigIntegerRangeField",
}
)
if conn.connection is not None:
register_type_handlers(conn)
connection_created.connect(register_type_handlers)
CharField.register_lookup(Unaccent)
TextField.register_lookup(Unaccent)
CharField.register_lookup(SearchLookup)
TextField.register_lookup(SearchLookup)
CharField.register_lookup(TrigramSimilar)
TextField.register_lookup(TrigramSimilar)
CharField.register_lookup(TrigramWordSimilar)
TextField.register_lookup(TrigramWordSimilar)
MigrationWriter.register_serializer(RANGE_TYPES, RangeSerializer)
IndexExpression.register_wrappers(OrderBy, OpClass, Collate)
|
11bee21f515ae8907979687001a6c5d7f94854641e6060ed4edb5657491dbb36 | import warnings
from django.contrib.postgres.indexes import OpClass
from django.db import NotSupportedError
from django.db.backends.ddl_references import Expressions, Statement, Table
from django.db.models import BaseConstraint, Deferrable, F, Q
from django.db.models.expressions import ExpressionList
from django.db.models.indexes import IndexExpression
from django.db.models.sql import Query
from django.utils.deprecation import RemovedInDjango50Warning
__all__ = ["ExclusionConstraint"]
class ExclusionConstraintExpression(IndexExpression):
template = "%(expressions)s WITH %(operator)s"
class ExclusionConstraint(BaseConstraint):
template = (
"CONSTRAINT %(name)s EXCLUDE USING %(index_type)s "
"(%(expressions)s)%(include)s%(where)s%(deferrable)s"
)
def __init__(
self,
*,
name,
expressions,
index_type=None,
condition=None,
deferrable=None,
include=None,
opclasses=(),
):
if index_type and index_type.lower() not in {"gist", "spgist"}:
raise ValueError(
"Exclusion constraints only support GiST or SP-GiST indexes."
)
if not expressions:
raise ValueError(
"At least one expression is required to define an exclusion "
"constraint."
)
if not all(
isinstance(expr, (list, tuple)) and len(expr) == 2 for expr in expressions
):
raise ValueError("The expressions must be a list of 2-tuples.")
if not isinstance(condition, (type(None), Q)):
raise ValueError("ExclusionConstraint.condition must be a Q instance.")
if condition and deferrable:
raise ValueError("ExclusionConstraint with conditions cannot be deferred.")
if not isinstance(deferrable, (type(None), Deferrable)):
raise ValueError(
"ExclusionConstraint.deferrable must be a Deferrable instance."
)
if not isinstance(include, (type(None), list, tuple)):
raise ValueError("ExclusionConstraint.include must be a list or tuple.")
if not isinstance(opclasses, (list, tuple)):
raise ValueError("ExclusionConstraint.opclasses must be a list or tuple.")
if opclasses and len(expressions) != len(opclasses):
raise ValueError(
"ExclusionConstraint.expressions and "
"ExclusionConstraint.opclasses must have the same number of "
"elements."
)
self.expressions = expressions
self.index_type = index_type or "GIST"
self.condition = condition
self.deferrable = deferrable
self.include = tuple(include) if include else ()
self.opclasses = opclasses
if self.opclasses:
warnings.warn(
"The opclasses argument is deprecated in favor of using "
"django.contrib.postgres.indexes.OpClass in "
"ExclusionConstraint.expressions.",
category=RemovedInDjango50Warning,
stacklevel=2,
)
super().__init__(name=name)
def _get_expressions(self, schema_editor, query):
expressions = []
for idx, (expression, operator) in enumerate(self.expressions):
if isinstance(expression, str):
expression = F(expression)
try:
expression = OpClass(expression, self.opclasses[idx])
except IndexError:
pass
expression = ExclusionConstraintExpression(expression, operator=operator)
expression.set_wrapper_classes(schema_editor.connection)
expressions.append(expression)
return ExpressionList(*expressions).resolve_expression(query)
def _get_condition_sql(self, compiler, schema_editor, query):
if self.condition is None:
return None
where = query.build_where(self.condition)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def constraint_sql(self, model, schema_editor):
query = Query(model, alias_cols=False)
compiler = query.get_compiler(connection=schema_editor.connection)
expressions = self._get_expressions(schema_editor, query)
table = model._meta.db_table
condition = self._get_condition_sql(compiler, schema_editor, query)
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
return Statement(
self.template,
table=Table(table, schema_editor.quote_name),
name=schema_editor.quote_name(self.name),
index_type=self.index_type,
expressions=Expressions(
table, expressions, compiler, schema_editor.quote_value
),
where=" WHERE (%s)" % condition if condition else "",
include=schema_editor._index_include_sql(model, include),
deferrable=schema_editor._deferrable_constraint_sql(self.deferrable),
)
def create_sql(self, model, schema_editor):
self.check_supported(schema_editor)
return Statement(
"ALTER TABLE %(table)s ADD %(constraint)s",
table=Table(model._meta.db_table, schema_editor.quote_name),
constraint=self.constraint_sql(model, schema_editor),
)
def remove_sql(self, model, schema_editor):
return schema_editor._delete_constraint_sql(
schema_editor.sql_delete_check,
model,
schema_editor.quote_name(self.name),
)
def check_supported(self, schema_editor):
if (
self.include
and self.index_type.lower() == "gist"
and not schema_editor.connection.features.supports_covering_gist_indexes
):
raise NotSupportedError(
"Covering exclusion constraints using a GiST index require "
"PostgreSQL 12+."
)
if (
self.include
and self.index_type.lower() == "spgist"
and not schema_editor.connection.features.supports_covering_spgist_indexes
):
raise NotSupportedError(
"Covering exclusion constraints using an SP-GiST index "
"require PostgreSQL 14+."
)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
kwargs["expressions"] = self.expressions
if self.condition is not None:
kwargs["condition"] = self.condition
if self.index_type.lower() != "gist":
kwargs["index_type"] = self.index_type
if self.deferrable:
kwargs["deferrable"] = self.deferrable
if self.include:
kwargs["include"] = self.include
if self.opclasses:
kwargs["opclasses"] = self.opclasses
return path, args, kwargs
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.name == other.name
and self.index_type == other.index_type
and self.expressions == other.expressions
and self.condition == other.condition
and self.deferrable == other.deferrable
and self.include == other.include
and self.opclasses == other.opclasses
)
return super().__eq__(other)
def __repr__(self):
return "<%s: index_type=%s expressions=%s name=%s%s%s%s%s>" % (
self.__class__.__qualname__,
repr(self.index_type),
repr(self.expressions),
repr(self.name),
"" if self.condition is None else " condition=%s" % self.condition,
"" if self.deferrable is None else " deferrable=%r" % self.deferrable,
"" if not self.include else " include=%s" % repr(self.include),
"" if not self.opclasses else " opclasses=%s" % repr(self.opclasses),
)
|
e930c3012f02ead9aca784b933cc4cda86e59b8f6e3c655590b74fcb9a314935 | from collections import defaultdict
from django.apps import apps
from django.db import models
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
class ContentTypeManager(models.Manager):
use_in_migrations = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Cache shared by all the get_for_* methods to speed up
# ContentType retrieval.
self._cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
return model._meta
def _get_from_cache(self, opts):
key = (opts.app_label, opts.model_name)
return self._cache[self.db][key]
def get_for_model(self, model, for_concrete_model=True):
"""
Return the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
return self._get_from_cache(opts)
except KeyError:
pass
# The ContentType entry was not found in the cache, therefore we
# proceed to load or create it.
try:
# Start with get() and not get_or_create() in order to use
# the db_for_read (see #20401).
ct = self.get(app_label=opts.app_label, model=opts.model_name)
except self.model.DoesNotExist:
# Not found in the database; we proceed to create it. This time
# use get_or_create to take care of any race conditions.
ct, created = self.get_or_create(
app_label=opts.app_label,
model=opts.model_name,
)
self._add_to_cache(self.db, ct)
return ct
def get_for_models(self, *models, for_concrete_models=True):
"""
Given *models, return a dictionary mapping {model: content_type}.
"""
results = {}
# Models that aren't already in the cache grouped by app labels.
needed_models = defaultdict(set)
# Mapping of opts to the list of models requiring it.
needed_opts = defaultdict(list)
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except KeyError:
needed_models[opts.app_label].add(opts.model_name)
needed_opts[opts].append(model)
else:
results[model] = ct
if needed_opts:
# Lookup required content types from the DB.
condition = Q(
*(
Q(("app_label", app_label), ("model__in", models))
for app_label, models in needed_models.items()
),
_connector=Q.OR,
)
cts = self.filter(condition)
for ct in cts:
opts_models = needed_opts.pop(ct.model_class()._meta, [])
for model in opts_models:
results[model] = ct
self._add_to_cache(self.db, ct)
# Create content types that weren't in the cache or DB.
for opts, opts_models in needed_opts.items():
ct = self.create(
app_label=opts.app_label,
model=opts.model_name,
)
self._add_to_cache(self.db, ct)
for model in opts_models:
results[model] = ct
return results
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Use the same shared cache as get_for_model
(though ContentTypes are not created on-the-fly by get_by_id).
"""
try:
ct = self._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache.
"""
self._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
# Note it's possible for ContentType objects to be stale; model_class()
# will return None. Hence, there is no reliance on
# model._meta.app_label here, just using the model fields instead.
key = (ct.app_label, ct.model)
self._cache.setdefault(using, {})[key] = ct
self._cache.setdefault(using, {})[ct.id] = ct
class ContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(_("python model class name"), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _("content type")
verbose_name_plural = _("content types")
db_table = "django_content_type"
unique_together = [["app_label", "model"]]
def __str__(self):
return self.app_labeled_name
@property
def name(self):
model = self.model_class()
if not model:
return self.model
return str(model._meta.verbose_name)
@property
def app_labeled_name(self):
model = self.model_class()
if not model:
return self.model
return "%s | %s" % (model._meta.app_label, model._meta.verbose_name)
def model_class(self):
"""Return the model class for this type of content."""
try:
return apps.get_model(self.app_label, self.model)
except LookupError:
return None
def get_object_for_this_type(self, **kwargs):
"""
Return an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._base_manager.using(self._state.db).get(**kwargs)
def get_all_objects_for_this_type(self, **kwargs):
"""
Return all objects of this type for the keyword arguments given.
"""
return self.model_class()._base_manager.using(self._state.db).filter(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
|
f355b318ce8c523c9c3216d033a9ff41c142ce7fd8fef9ce8b685dffd583603c | import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, c_char_p, c_int
from ctypes.util import find_library
from django.contrib.gis.gdal.error import GDALException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger("django.contrib.gis")
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GDAL_LIBRARY_PATH
except (AttributeError, ImportError, ImproperlyConfigured, OSError):
lib_path = None
if lib_path:
lib_names = None
elif os.name == "nt":
# Windows NT shared libraries
lib_names = [
"gdal304",
"gdal303",
"gdal302",
"gdal301",
"gdal300",
"gdal204",
"gdal203",
"gdal202",
]
elif os.name == "posix":
# *NIX library names.
lib_names = [
"gdal",
"GDAL",
"gdal3.4.0",
"gdal3.3.0",
"gdal3.2.0",
"gdal3.1.0",
"gdal3.0.0",
"gdal2.4.0",
"gdal2.3.0",
"gdal2.2.0",
]
else:
raise ImproperlyConfigured('GDAL is unsupported on OS "%s".' % os.name)
# Using the ctypes `find_library` utility to find the
# path to the GDAL library from the list of library names.
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
if lib_path is None:
raise ImproperlyConfigured(
'Could not find the GDAL library (tried "%s"). Is GDAL installed? '
"If it is, try setting GDAL_LIBRARY_PATH in your settings."
% '", "'.join(lib_names)
)
# This loads the GDAL/OGR C library
lgdal = CDLL(lib_path)
# On Windows, the GDAL binaries have some OSR routines exported with
# STDCALL, while others are not. Thus, the library will also need to
# be loaded up as WinDLL for said OSR functions that require the
# different calling convention.
if os.name == "nt":
from ctypes import WinDLL
lwingdal = WinDLL(lib_path)
def std_call(func):
"""
Return the correct STDCALL function for certain OSR routines on Win32
platforms.
"""
if os.name == "nt":
return lwingdal[func]
else:
return lgdal[func]
# #### Version-information functions. ####
# Return GDAL library version information with the given key.
_version_info = std_call("GDALVersionInfo")
_version_info.argtypes = [c_char_p]
_version_info.restype = c_char_p
def gdal_version():
"Return only the GDAL version number information."
return _version_info(b"RELEASE_NAME")
def gdal_full_version():
"Return the full GDAL version information."
return _version_info(b"")
def gdal_version_info():
ver = gdal_version()
m = re.match(rb"^(?P<major>\d+)\.(?P<minor>\d+)(?:\.(?P<subminor>\d+))?", ver)
if not m:
raise GDALException('Could not parse GDAL version string "%s"' % ver)
major, minor, subminor = m.groups()
return (int(major), int(minor), subminor and int(subminor))
GDAL_VERSION = gdal_version_info()
# Set library error handling so as errors are logged
CPLErrorHandler = CFUNCTYPE(None, c_int, c_int, c_char_p)
def err_handler(error_class, error_number, message):
logger.error("GDAL_ERROR %d: %s", error_number, message)
err_handler = CPLErrorHandler(err_handler)
def function(name, args, restype):
func = std_call(name)
func.argtypes = args
func.restype = restype
return func
set_error_handler = function("CPLSetErrorHandler", [CPLErrorHandler], CPLErrorHandler)
set_error_handler(err_handler)
|
e64f1ad05f5d2d1b4597fa9a908ca0bd2a4a7bcb1a8145d51bc795c69711162e | from datetime import date
from django.test import modify_settings
from . import PostgreSQLTestCase
from .models import (
HStoreModel,
IntegerArrayModel,
NestedIntegerArrayModel,
NullableIntegerArrayModel,
OtherTypesArrayModel,
RangesModel,
)
try:
from psycopg2.extras import DateRange, NumericRange
except ImportError:
pass # psycopg2 isn't installed.
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
class BulkSaveTests(PostgreSQLTestCase):
def test_bulk_update(self):
test_data = [
(IntegerArrayModel, "field", [], [1, 2, 3]),
(NullableIntegerArrayModel, "field", [1, 2, 3], None),
(NestedIntegerArrayModel, "field", [], [[1, 2, 3]]),
(HStoreModel, "field", {}, {1: 2}),
(RangesModel, "ints", None, NumericRange(lower=1, upper=10)),
(
RangesModel,
"dates",
None,
DateRange(lower=date.today(), upper=date.today()),
),
(OtherTypesArrayModel, "ips", [], ["1.2.3.4"]),
(OtherTypesArrayModel, "json", [], [{"a": "b"}]),
]
for Model, field, initial, new in test_data:
with self.subTest(model=Model, field=field):
instances = Model.objects.bulk_create(
Model(**{field: initial}) for _ in range(20)
)
for instance in instances:
setattr(instance, field, new)
Model.objects.bulk_update(instances, [field])
self.assertSequenceEqual(
Model.objects.filter(**{field: new}), instances
)
|
0646c6b61111b1057f3fc94015e477505bec4f9e4639325f8828e7c3d733d471 | from unittest import mock
from django.apps.registry import Apps, apps
from django.contrib.contenttypes import management as contenttypes_management
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from django.test import TestCase, modify_settings
from django.test.utils import captured_stdout
from .models import ModelWithNullFKToSite, Post
@modify_settings(INSTALLED_APPS={"append": ["empty_models", "no_models"]})
class RemoveStaleContentTypesTests(TestCase):
# Speed up tests by avoiding retrieving ContentTypes for all test apps.
available_apps = [
"contenttypes_tests",
"empty_models",
"no_models",
"django.contrib.contenttypes",
]
@classmethod
def setUpTestData(cls):
with captured_stdout():
call_command(
"remove_stale_contenttypes",
interactive=False,
include_stale_apps=True,
verbosity=2,
)
cls.before_count = ContentType.objects.count()
cls.content_type = ContentType.objects.create(
app_label="contenttypes_tests", model="Fake"
)
def setUp(self):
self.app_config = apps.get_app_config("contenttypes_tests")
def test_interactive_true_with_dependent_objects(self):
"""
interactive mode (the default) deletes stale content types and warns of
dependent objects.
"""
post = Post.objects.create(title="post", content_type=self.content_type)
# A related object is needed to show that a custom collector with
# can_fast_delete=False is needed.
ModelWithNullFKToSite.objects.create(post=post)
with mock.patch("builtins.input", return_value="yes"):
with captured_stdout() as stdout:
call_command("remove_stale_contenttypes", verbosity=2, stdout=stdout)
self.assertEqual(Post.objects.count(), 0)
output = stdout.getvalue()
self.assertIn("- Content type for contenttypes_tests.Fake", output)
self.assertIn("- 1 contenttypes_tests.Post object(s)", output)
self.assertIn("- 1 contenttypes_tests.ModelWithNullFKToSite", output)
self.assertIn("Deleting stale content type", output)
self.assertEqual(ContentType.objects.count(), self.before_count)
def test_interactive_true_without_dependent_objects(self):
"""
interactive mode deletes stale content types even if there aren't any
dependent objects.
"""
with mock.patch("builtins.input", return_value="yes"):
with captured_stdout() as stdout:
call_command("remove_stale_contenttypes", verbosity=2)
self.assertIn("Deleting stale content type", stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count)
def test_interactive_false(self):
"""non-interactive mode deletes stale content types."""
with captured_stdout() as stdout:
call_command("remove_stale_contenttypes", interactive=False, verbosity=2)
self.assertIn("Deleting stale content type", stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count)
def test_unavailable_content_type_model(self):
"""A ContentType isn't created if the model isn't available."""
apps = Apps()
with self.assertNumQueries(0):
contenttypes_management.create_contenttypes(
self.app_config, interactive=False, verbosity=0, apps=apps
)
self.assertEqual(ContentType.objects.count(), self.before_count + 1)
@modify_settings(INSTALLED_APPS={"remove": ["empty_models"]})
def test_contenttypes_removed_in_installed_apps_without_models(self):
ContentType.objects.create(app_label="empty_models", model="Fake 1")
ContentType.objects.create(app_label="no_models", model="Fake 2")
with mock.patch(
"builtins.input", return_value="yes"
), captured_stdout() as stdout:
call_command("remove_stale_contenttypes", verbosity=2)
self.assertNotIn(
"Deleting stale content type 'empty_models | Fake 1'",
stdout.getvalue(),
)
self.assertIn(
"Deleting stale content type 'no_models | Fake 2'",
stdout.getvalue(),
)
self.assertEqual(ContentType.objects.count(), self.before_count + 1)
@modify_settings(INSTALLED_APPS={"remove": ["empty_models"]})
def test_contenttypes_removed_for_apps_not_in_installed_apps(self):
ContentType.objects.create(app_label="empty_models", model="Fake 1")
ContentType.objects.create(app_label="no_models", model="Fake 2")
with mock.patch(
"builtins.input", return_value="yes"
), captured_stdout() as stdout:
call_command(
"remove_stale_contenttypes", include_stale_apps=True, verbosity=2
)
self.assertIn(
"Deleting stale content type 'empty_models | Fake 1'",
stdout.getvalue(),
)
self.assertIn(
"Deleting stale content type 'no_models | Fake 2'",
stdout.getvalue(),
)
self.assertEqual(ContentType.objects.count(), self.before_count)
|
872092bab2328824589c574b2903bb3357e0578087f0df4eea6cd82162a3f93a | from unittest import mock
from django.contrib.contenttypes.checks import check_model_name_lengths
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.db import models
from django.test import SimpleTestCase, override_settings
from django.test.utils import isolate_apps
@isolate_apps("contenttypes_tests", attr_name="apps")
class GenericForeignKeyTests(SimpleTestCase):
databases = "__all__"
def test_missing_content_type_field(self):
class TaggedItem(models.Model):
# no content_type field
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
expected = [
checks.Error(
"The GenericForeignKey content type references the nonexistent "
"field 'TaggedItem.content_type'.",
obj=TaggedItem.content_object,
id="contenttypes.E002",
)
]
self.assertEqual(TaggedItem.content_object.check(), expected)
def test_invalid_content_type_field(self):
class Model(models.Model):
content_type = models.IntegerField() # should be ForeignKey
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
self.assertEqual(
Model.content_object.check(),
[
checks.Error(
"'Model.content_type' is not a ForeignKey.",
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=Model.content_object,
id="contenttypes.E003",
)
],
)
def test_content_type_field_pointing_to_wrong_model(self):
class Model(models.Model):
content_type = models.ForeignKey(
"self", models.CASCADE
) # should point to ContentType
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
self.assertEqual(
Model.content_object.check(),
[
checks.Error(
"'Model.content_type' is not a ForeignKey to "
"'contenttypes.ContentType'.",
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=Model.content_object,
id="contenttypes.E004",
)
],
)
def test_missing_object_id_field(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
# missing object_id field
content_object = GenericForeignKey()
self.assertEqual(
TaggedItem.content_object.check(),
[
checks.Error(
"The GenericForeignKey object ID references the nonexistent "
"field 'object_id'.",
obj=TaggedItem.content_object,
id="contenttypes.E001",
)
],
)
def test_field_name_ending_with_underscore(self):
class Model(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object_ = GenericForeignKey("content_type", "object_id")
self.assertEqual(
Model.content_object_.check(),
[
checks.Error(
"Field names must not end with an underscore.",
obj=Model.content_object_,
id="fields.E001",
)
],
)
@override_settings(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"contenttypes_tests",
]
)
def test_generic_foreign_key_checks_are_performed(self):
class Model(models.Model):
content_object = GenericForeignKey()
with mock.patch.object(GenericForeignKey, "check") as check:
checks.run_checks(app_configs=self.apps.get_app_configs())
check.assert_called_once_with()
@isolate_apps("contenttypes_tests")
class GenericRelationTests(SimpleTestCase):
def test_valid_generic_relationship(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Bookmark(models.Model):
tags = GenericRelation("TaggedItem")
self.assertEqual(Bookmark.tags.field.check(), [])
def test_valid_generic_relationship_with_explicit_fields(self):
class TaggedItem(models.Model):
custom_content_type = models.ForeignKey(ContentType, models.CASCADE)
custom_object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
"custom_content_type", "custom_object_id"
)
class Bookmark(models.Model):
tags = GenericRelation(
"TaggedItem",
content_type_field="custom_content_type",
object_id_field="custom_object_id",
)
self.assertEqual(Bookmark.tags.field.check(), [])
def test_pointing_to_missing_model(self):
class Model(models.Model):
rel = GenericRelation("MissingModel")
self.assertEqual(
Model.rel.field.check(),
[
checks.Error(
"Field defines a relation with model 'MissingModel', "
"which is either not installed, or is abstract.",
obj=Model.rel.field,
id="fields.E300",
)
],
)
def test_valid_self_referential_generic_relationship(self):
class Model(models.Model):
rel = GenericRelation("Model")
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
self.assertEqual(Model.rel.field.check(), [])
def test_missing_generic_foreign_key(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
class Bookmark(models.Model):
tags = GenericRelation("TaggedItem")
self.assertEqual(
Bookmark.tags.field.check(),
[
checks.Error(
"The GenericRelation defines a relation with the model "
"'contenttypes_tests.TaggedItem', but that model does not have a "
"GenericForeignKey.",
obj=Bookmark.tags.field,
id="contenttypes.E004",
)
],
)
@override_settings(TEST_SWAPPED_MODEL="contenttypes_tests.Replacement")
def test_pointing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
swappable = "TEST_SWAPPED_MODEL"
class Model(models.Model):
rel = GenericRelation("SwappedModel")
self.assertEqual(
Model.rel.field.check(),
[
checks.Error(
"Field defines a relation with the model "
"'contenttypes_tests.SwappedModel', "
"which has been swapped out.",
hint=(
"Update the relation to point at 'settings.TEST_SWAPPED_MODEL'."
),
obj=Model.rel.field,
id="fields.E301",
)
],
)
def test_field_name_ending_with_underscore(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class InvalidBookmark(models.Model):
tags_ = GenericRelation("TaggedItem")
self.assertEqual(
InvalidBookmark.tags_.field.check(),
[
checks.Error(
"Field names must not end with an underscore.",
obj=InvalidBookmark.tags_.field,
id="fields.E001",
)
],
)
@isolate_apps("contenttypes_tests", attr_name="apps")
class ModelCheckTests(SimpleTestCase):
def test_model_name_too_long(self):
model = type("A" * 101, (models.Model,), {"__module__": self.__module__})
self.assertEqual(
check_model_name_lengths(self.apps.get_app_configs()),
[
checks.Error(
"Model names must be at most 100 characters (got 101).",
obj=model,
id="contenttypes.E005",
)
],
)
def test_model_name_max_length(self):
type("A" * 100, (models.Model,), {"__module__": self.__module__})
self.assertEqual(check_model_name_lengths(self.apps.get_app_configs()), [])
|
b8f497aa9abb1054065aca302fa1b9fca2c383ba83d9615124cbab3ad57a2c54 | from django.test import SimpleTestCase
from django.utils.connection import BaseConnectionHandler
class BaseConnectionHandlerTests(SimpleTestCase):
def test_create_connection(self):
handler = BaseConnectionHandler()
msg = "Subclasses must implement create_connection()."
with self.assertRaisesMessage(NotImplementedError, msg):
handler.create_connection(None)
def test_all_initialized_only(self):
handler = BaseConnectionHandler({"default": {}})
self.assertEqual(handler.all(initialized_only=True), [])
|
d9eb215513b7bb460ce901fc6826d203649d41a393f84a8d1c348151a21c4f55 | """
Tests for django test runner
"""
import collections.abc
import multiprocessing
import os
import sys
import unittest
from unittest import mock
from admin_scripts.tests import AdminScriptTestCase
from django import db
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.core.management.base import SystemCheckError
from django.test import SimpleTestCase, TransactionTestCase, skipUnlessDBFeature
from django.test.runner import (
DiscoverRunner,
Shuffler,
reorder_test_bin,
reorder_tests,
shuffle_tests,
)
from django.test.testcases import connections_support_transactions
from django.test.utils import (
captured_stderr,
dependency_ordered,
get_unique_databases_and_mirrors,
iter_test_cases,
)
from django.utils.deprecation import RemovedInDjango50Warning
from .models import B, Person, Through
class MySuite:
def __init__(self):
self.tests = []
def addTest(self, test):
self.tests.append(test)
def __iter__(self):
yield from self.tests
class TestSuiteTests(SimpleTestCase):
def build_test_suite(self, test_classes, suite=None, suite_class=None):
if suite_class is None:
suite_class = unittest.TestSuite
if suite is None:
suite = suite_class()
loader = unittest.defaultTestLoader
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
subsuite = suite_class()
# Only use addTest() to simplify testing a custom TestSuite.
for test in tests:
subsuite.addTest(test)
suite.addTest(subsuite)
return suite
def make_test_suite(self, suite=None, suite_class=None):
class Tests1(unittest.TestCase):
def test1(self):
pass
def test2(self):
pass
class Tests2(unittest.TestCase):
def test1(self):
pass
def test2(self):
pass
return self.build_test_suite(
(Tests1, Tests2),
suite=suite,
suite_class=suite_class,
)
def assertTestNames(self, tests, expected):
# Each test.id() has a form like the following:
# "test_runner.tests.IterTestCasesTests.test_iter_test_cases.<locals>.Tests1.test1".
# It suffices to check only the last two parts.
names = [".".join(test.id().split(".")[-2:]) for test in tests]
self.assertEqual(names, expected)
def test_iter_test_cases_basic(self):
suite = self.make_test_suite()
tests = iter_test_cases(suite)
self.assertTestNames(
tests,
expected=[
"Tests1.test1",
"Tests1.test2",
"Tests2.test1",
"Tests2.test2",
],
)
def test_iter_test_cases_string_input(self):
msg = (
"Test 'a' must be a test case or test suite not string (was found "
"in 'abc')."
)
with self.assertRaisesMessage(TypeError, msg):
list(iter_test_cases("abc"))
def test_iter_test_cases_iterable_of_tests(self):
class Tests(unittest.TestCase):
def test1(self):
pass
def test2(self):
pass
tests = list(unittest.defaultTestLoader.loadTestsFromTestCase(Tests))
actual_tests = iter_test_cases(tests)
self.assertTestNames(
actual_tests,
expected=[
"Tests.test1",
"Tests.test2",
],
)
def test_iter_test_cases_custom_test_suite_class(self):
suite = self.make_test_suite(suite_class=MySuite)
tests = iter_test_cases(suite)
self.assertTestNames(
tests,
expected=[
"Tests1.test1",
"Tests1.test2",
"Tests2.test1",
"Tests2.test2",
],
)
def test_iter_test_cases_mixed_test_suite_classes(self):
suite = self.make_test_suite(suite=MySuite())
child_suite = list(suite)[0]
self.assertNotIsInstance(child_suite, MySuite)
tests = list(iter_test_cases(suite))
self.assertEqual(len(tests), 4)
self.assertNotIsInstance(tests[0], unittest.TestSuite)
def make_tests(self):
"""Return an iterable of tests."""
suite = self.make_test_suite()
tests = list(iter_test_cases(suite))
return tests
def test_shuffle_tests(self):
tests = self.make_tests()
# Choose a seed that shuffles both the classes and methods.
shuffler = Shuffler(seed=9)
shuffled_tests = shuffle_tests(tests, shuffler)
self.assertIsInstance(shuffled_tests, collections.abc.Iterator)
self.assertTestNames(
shuffled_tests,
expected=[
"Tests2.test1",
"Tests2.test2",
"Tests1.test2",
"Tests1.test1",
],
)
def test_reorder_test_bin_no_arguments(self):
tests = self.make_tests()
reordered_tests = reorder_test_bin(tests)
self.assertIsInstance(reordered_tests, collections.abc.Iterator)
self.assertTestNames(
reordered_tests,
expected=[
"Tests1.test1",
"Tests1.test2",
"Tests2.test1",
"Tests2.test2",
],
)
def test_reorder_test_bin_reverse(self):
tests = self.make_tests()
reordered_tests = reorder_test_bin(tests, reverse=True)
self.assertIsInstance(reordered_tests, collections.abc.Iterator)
self.assertTestNames(
reordered_tests,
expected=[
"Tests2.test2",
"Tests2.test1",
"Tests1.test2",
"Tests1.test1",
],
)
def test_reorder_test_bin_random(self):
tests = self.make_tests()
# Choose a seed that shuffles both the classes and methods.
shuffler = Shuffler(seed=9)
reordered_tests = reorder_test_bin(tests, shuffler=shuffler)
self.assertIsInstance(reordered_tests, collections.abc.Iterator)
self.assertTestNames(
reordered_tests,
expected=[
"Tests2.test1",
"Tests2.test2",
"Tests1.test2",
"Tests1.test1",
],
)
def test_reorder_test_bin_random_and_reverse(self):
tests = self.make_tests()
# Choose a seed that shuffles both the classes and methods.
shuffler = Shuffler(seed=9)
reordered_tests = reorder_test_bin(tests, shuffler=shuffler, reverse=True)
self.assertIsInstance(reordered_tests, collections.abc.Iterator)
self.assertTestNames(
reordered_tests,
expected=[
"Tests1.test1",
"Tests1.test2",
"Tests2.test2",
"Tests2.test1",
],
)
def test_reorder_tests_same_type_consecutive(self):
"""Tests of the same type are made consecutive."""
tests = self.make_tests()
# Move the last item to the front.
tests.insert(0, tests.pop())
self.assertTestNames(
tests,
expected=[
"Tests2.test2",
"Tests1.test1",
"Tests1.test2",
"Tests2.test1",
],
)
reordered_tests = reorder_tests(tests, classes=[])
self.assertTestNames(
reordered_tests,
expected=[
"Tests2.test2",
"Tests2.test1",
"Tests1.test1",
"Tests1.test2",
],
)
def test_reorder_tests_random(self):
tests = self.make_tests()
# Choose a seed that shuffles both the classes and methods.
shuffler = Shuffler(seed=9)
reordered_tests = reorder_tests(tests, classes=[], shuffler=shuffler)
self.assertIsInstance(reordered_tests, collections.abc.Iterator)
self.assertTestNames(
reordered_tests,
expected=[
"Tests2.test1",
"Tests2.test2",
"Tests1.test2",
"Tests1.test1",
],
)
def test_reorder_tests_random_mixed_classes(self):
tests = self.make_tests()
# Move the last item to the front.
tests.insert(0, tests.pop())
shuffler = Shuffler(seed=9)
self.assertTestNames(
tests,
expected=[
"Tests2.test2",
"Tests1.test1",
"Tests1.test2",
"Tests2.test1",
],
)
reordered_tests = reorder_tests(tests, classes=[], shuffler=shuffler)
self.assertTestNames(
reordered_tests,
expected=[
"Tests2.test1",
"Tests2.test2",
"Tests1.test2",
"Tests1.test1",
],
)
def test_reorder_tests_reverse_with_duplicates(self):
class Tests1(unittest.TestCase):
def test1(self):
pass
class Tests2(unittest.TestCase):
def test2(self):
pass
def test3(self):
pass
suite = self.build_test_suite((Tests1, Tests2))
subsuite = list(suite)[0]
suite.addTest(subsuite)
tests = list(iter_test_cases(suite))
self.assertTestNames(
tests,
expected=[
"Tests1.test1",
"Tests2.test2",
"Tests2.test3",
"Tests1.test1",
],
)
reordered_tests = reorder_tests(tests, classes=[])
self.assertTestNames(
reordered_tests,
expected=[
"Tests1.test1",
"Tests2.test2",
"Tests2.test3",
],
)
reordered_tests = reorder_tests(tests, classes=[], reverse=True)
self.assertTestNames(
reordered_tests,
expected=[
"Tests2.test3",
"Tests2.test2",
"Tests1.test1",
],
)
class DependencyOrderingTests(unittest.TestCase):
def test_simple_dependencies(self):
raw = [
("s1", ("s1_db", ["alpha"])),
("s2", ("s2_db", ["bravo"])),
("s3", ("s3_db", ["charlie"])),
]
dependencies = {
"alpha": ["charlie"],
"bravo": ["charlie"],
}
ordered = dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, value in ordered]
self.assertIn("s1", ordered_sigs)
self.assertIn("s2", ordered_sigs)
self.assertIn("s3", ordered_sigs)
self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s1"))
self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s2"))
def test_chained_dependencies(self):
raw = [
("s1", ("s1_db", ["alpha"])),
("s2", ("s2_db", ["bravo"])),
("s3", ("s3_db", ["charlie"])),
]
dependencies = {
"alpha": ["bravo"],
"bravo": ["charlie"],
}
ordered = dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, value in ordered]
self.assertIn("s1", ordered_sigs)
self.assertIn("s2", ordered_sigs)
self.assertIn("s3", ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index("s2"), ordered_sigs.index("s1"))
self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s2"))
# Implied dependencies
self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s1"))
def test_multiple_dependencies(self):
raw = [
("s1", ("s1_db", ["alpha"])),
("s2", ("s2_db", ["bravo"])),
("s3", ("s3_db", ["charlie"])),
("s4", ("s4_db", ["delta"])),
]
dependencies = {
"alpha": ["bravo", "delta"],
"bravo": ["charlie"],
"delta": ["charlie"],
}
ordered = dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, aliases in ordered]
self.assertIn("s1", ordered_sigs)
self.assertIn("s2", ordered_sigs)
self.assertIn("s3", ordered_sigs)
self.assertIn("s4", ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index("s2"), ordered_sigs.index("s1"))
self.assertLess(ordered_sigs.index("s4"), ordered_sigs.index("s1"))
self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s2"))
self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s4"))
# Implicit dependencies
self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s1"))
def test_circular_dependencies(self):
raw = [
("s1", ("s1_db", ["alpha"])),
("s2", ("s2_db", ["bravo"])),
]
dependencies = {
"bravo": ["alpha"],
"alpha": ["bravo"],
}
with self.assertRaises(ImproperlyConfigured):
dependency_ordered(raw, dependencies=dependencies)
def test_own_alias_dependency(self):
raw = [("s1", ("s1_db", ["alpha", "bravo"]))]
dependencies = {"alpha": ["bravo"]}
with self.assertRaises(ImproperlyConfigured):
dependency_ordered(raw, dependencies=dependencies)
# reordering aliases shouldn't matter
raw = [("s1", ("s1_db", ["bravo", "alpha"]))]
with self.assertRaises(ImproperlyConfigured):
dependency_ordered(raw, dependencies=dependencies)
class MockTestRunner:
def __init__(self, *args, **kwargs):
if parallel := kwargs.get("parallel"):
sys.stderr.write(f"parallel={parallel}")
MockTestRunner.run_tests = mock.Mock(return_value=[])
class ManageCommandTests(unittest.TestCase):
def test_custom_test_runner(self):
call_command("test", "sites", testrunner="test_runner.tests.MockTestRunner")
MockTestRunner.run_tests.assert_called_with(("sites",))
def test_bad_test_runner(self):
with self.assertRaises(AttributeError):
call_command("test", "sites", testrunner="test_runner.NonexistentRunner")
def test_time_recorded(self):
with captured_stderr() as stderr:
call_command(
"test",
"--timing",
"sites",
testrunner="test_runner.tests.MockTestRunner",
)
self.assertIn("Total run took", stderr.getvalue())
# Isolate from the real environment.
@mock.patch.dict(os.environ, {}, clear=True)
@mock.patch.object(multiprocessing, "cpu_count", return_value=12)
class ManageCommandParallelTests(SimpleTestCase):
def test_parallel_default(self, *mocked_objects):
with captured_stderr() as stderr:
call_command(
"test",
"--parallel",
testrunner="test_runner.tests.MockTestRunner",
)
self.assertIn("parallel=12", stderr.getvalue())
def test_parallel_auto(self, *mocked_objects):
with captured_stderr() as stderr:
call_command(
"test",
"--parallel=auto",
testrunner="test_runner.tests.MockTestRunner",
)
self.assertIn("parallel=12", stderr.getvalue())
def test_no_parallel(self, *mocked_objects):
with captured_stderr() as stderr:
call_command("test", testrunner="test_runner.tests.MockTestRunner")
# Parallel is disabled by default.
self.assertEqual(stderr.getvalue(), "")
@mock.patch.object(multiprocessing, "get_start_method", return_value="spawn")
def test_parallel_spawn(self, *mocked_objects):
with captured_stderr() as stderr:
call_command(
"test",
"--parallel=auto",
testrunner="test_runner.tests.MockTestRunner",
)
self.assertIn("parallel=1", stderr.getvalue())
@mock.patch.object(multiprocessing, "get_start_method", return_value="spawn")
def test_no_parallel_spawn(self, *mocked_objects):
with captured_stderr() as stderr:
call_command(
"test",
testrunner="test_runner.tests.MockTestRunner",
)
self.assertEqual(stderr.getvalue(), "")
@mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "7"})
def test_no_parallel_django_test_processes_env(self, *mocked_objects):
with captured_stderr() as stderr:
call_command("test", testrunner="test_runner.tests.MockTestRunner")
self.assertEqual(stderr.getvalue(), "")
@mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "invalid"})
def test_django_test_processes_env_non_int(self, *mocked_objects):
with self.assertRaises(ValueError):
call_command(
"test",
"--parallel",
testrunner="test_runner.tests.MockTestRunner",
)
@mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "7"})
def test_django_test_processes_parallel_default(self, *mocked_objects):
for parallel in ["--parallel", "--parallel=auto"]:
with self.subTest(parallel=parallel):
with captured_stderr() as stderr:
call_command(
"test",
parallel,
testrunner="test_runner.tests.MockTestRunner",
)
self.assertIn("parallel=7", stderr.getvalue())
class CustomTestRunnerOptionsSettingsTests(AdminScriptTestCase):
"""
Custom runners can add command line arguments. The runner is specified
through a settings file.
"""
def setUp(self):
super().setUp()
settings = {
"TEST_RUNNER": "'test_runner.runner.CustomOptionsTestRunner'",
}
self.write_settings("settings.py", sdict=settings)
def test_default_options(self):
args = ["test", "--settings=test_project.settings"]
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "1:2:3")
def test_default_and_given_options(self):
args = ["test", "--settings=test_project.settings", "--option_b=foo"]
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "1:foo:3")
def test_option_name_and_value_separated(self):
args = ["test", "--settings=test_project.settings", "--option_b", "foo"]
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "1:foo:3")
def test_all_options_given(self):
args = [
"test",
"--settings=test_project.settings",
"--option_a=bar",
"--option_b=foo",
"--option_c=31337",
]
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "bar:foo:31337")
class CustomTestRunnerOptionsCmdlineTests(AdminScriptTestCase):
"""
Custom runners can add command line arguments when the runner is specified
using --testrunner.
"""
def setUp(self):
super().setUp()
self.write_settings("settings.py")
def test_testrunner_option(self):
args = [
"test",
"--testrunner",
"test_runner.runner.CustomOptionsTestRunner",
"--option_a=bar",
"--option_b=foo",
"--option_c=31337",
]
out, err = self.run_django_admin(args, "test_project.settings")
self.assertNoOutput(err)
self.assertOutput(out, "bar:foo:31337")
def test_testrunner_equals(self):
args = [
"test",
"--testrunner=test_runner.runner.CustomOptionsTestRunner",
"--option_a=bar",
"--option_b=foo",
"--option_c=31337",
]
out, err = self.run_django_admin(args, "test_project.settings")
self.assertNoOutput(err)
self.assertOutput(out, "bar:foo:31337")
def test_no_testrunner(self):
args = ["test", "--testrunner"]
out, err = self.run_django_admin(args, "test_project.settings")
self.assertIn("usage", err)
self.assertNotIn("Traceback", err)
self.assertNoOutput(out)
class NoInitializeSuiteTestRunnerTests(SimpleTestCase):
@mock.patch.object(multiprocessing, "get_start_method", return_value="spawn")
@mock.patch(
"django.test.runner.ParallelTestSuite.initialize_suite",
side_effect=Exception("initialize_suite() is called."),
)
def test_no_initialize_suite_test_runner(self, *mocked_objects):
"""
The test suite's initialize_suite() method must always be called when
using spawn. It cannot rely on a test runner implementation.
"""
class NoInitializeSuiteTestRunner(DiscoverRunner):
def setup_test_environment(self, **kwargs):
return
def setup_databases(self, **kwargs):
return
def run_checks(self, databases):
return
def teardown_databases(self, old_config, **kwargs):
return
def teardown_test_environment(self, **kwargs):
return
def run_suite(self, suite, **kwargs):
kwargs = self.get_test_runner_kwargs()
runner = self.test_runner(**kwargs)
return runner.run(suite)
with self.assertRaisesMessage(Exception, "initialize_suite() is called."):
runner = NoInitializeSuiteTestRunner(
verbosity=0, interactive=False, parallel=2
)
runner.run_tests(
[
"test_runner_apps.sample.tests_sample.TestDjangoTestCase",
"test_runner_apps.simple.tests",
]
)
class Ticket17477RegressionTests(AdminScriptTestCase):
def setUp(self):
super().setUp()
self.write_settings("settings.py")
def test_ticket_17477(self):
"""'manage.py help test' works after r16352."""
args = ["help", "test"]
out, err = self.run_manage(args)
self.assertNoOutput(err)
class SQLiteInMemoryTestDbs(TransactionTestCase):
available_apps = ["test_runner"]
databases = {"default", "other"}
@unittest.skipUnless(
all(db.connections[conn].vendor == "sqlite" for conn in db.connections),
"This is an sqlite-specific issue",
)
def test_transaction_support(self):
# Assert connections mocking is appropriately applied by preventing
# any attempts at calling create_test_db on the global connection
# objects.
for connection in db.connections.all():
create_test_db = mock.patch.object(
connection.creation,
"create_test_db",
side_effect=AssertionError(
"Global connection object shouldn't be manipulated."
),
)
create_test_db.start()
self.addCleanup(create_test_db.stop)
for option_key, option_value in (
("NAME", ":memory:"),
("TEST", {"NAME": ":memory:"}),
):
tested_connections = db.ConnectionHandler(
{
"default": {
"ENGINE": "django.db.backends.sqlite3",
option_key: option_value,
},
"other": {
"ENGINE": "django.db.backends.sqlite3",
option_key: option_value,
},
}
)
with mock.patch("django.test.utils.connections", new=tested_connections):
other = tested_connections["other"]
DiscoverRunner(verbosity=0).setup_databases()
msg = (
"DATABASES setting '%s' option set to sqlite3's ':memory:' value "
"shouldn't interfere with transaction support detection."
% option_key
)
# Transaction support is properly initialized for the 'other' DB.
self.assertTrue(other.features.supports_transactions, msg)
# And all the DBs report that they support transactions.
self.assertTrue(connections_support_transactions(), msg)
class DummyBackendTest(unittest.TestCase):
def test_setup_databases(self):
"""
setup_databases() doesn't fail with dummy database backend.
"""
tested_connections = db.ConnectionHandler({})
with mock.patch("django.test.utils.connections", new=tested_connections):
runner_instance = DiscoverRunner(verbosity=0)
old_config = runner_instance.setup_databases()
runner_instance.teardown_databases(old_config)
class AliasedDefaultTestSetupTest(unittest.TestCase):
def test_setup_aliased_default_database(self):
"""
setup_databases() doesn't fail when 'default' is aliased
"""
tested_connections = db.ConnectionHandler(
{"default": {"NAME": "dummy"}, "aliased": {"NAME": "dummy"}}
)
with mock.patch("django.test.utils.connections", new=tested_connections):
runner_instance = DiscoverRunner(verbosity=0)
old_config = runner_instance.setup_databases()
runner_instance.teardown_databases(old_config)
class SetupDatabasesTests(SimpleTestCase):
def setUp(self):
self.runner_instance = DiscoverRunner(verbosity=0)
def test_setup_aliased_databases(self):
tested_connections = db.ConnectionHandler(
{
"default": {
"ENGINE": "django.db.backends.dummy",
"NAME": "dbname",
},
"other": {
"ENGINE": "django.db.backends.dummy",
"NAME": "dbname",
},
}
)
with mock.patch(
"django.db.backends.dummy.base.DatabaseWrapper.creation_class"
) as mocked_db_creation:
with mock.patch("django.test.utils.connections", new=tested_connections):
old_config = self.runner_instance.setup_databases()
self.runner_instance.teardown_databases(old_config)
mocked_db_creation.return_value.destroy_test_db.assert_called_once_with(
"dbname", 0, False
)
def test_setup_test_database_aliases(self):
"""
The default database must be the first because data migrations
use the default alias by default.
"""
tested_connections = db.ConnectionHandler(
{
"other": {
"ENGINE": "django.db.backends.dummy",
"NAME": "dbname",
},
"default": {
"ENGINE": "django.db.backends.dummy",
"NAME": "dbname",
},
}
)
with mock.patch("django.test.utils.connections", new=tested_connections):
test_databases, _ = get_unique_databases_and_mirrors()
self.assertEqual(
test_databases,
{
("", "", "django.db.backends.dummy", "test_dbname"): (
"dbname",
["default", "other"],
),
},
)
def test_destroy_test_db_restores_db_name(self):
tested_connections = db.ConnectionHandler(
{
"default": {
"ENGINE": settings.DATABASES[db.DEFAULT_DB_ALIAS]["ENGINE"],
"NAME": "xxx_test_database",
},
}
)
# Using the real current name as old_name to not mess with the test suite.
old_name = settings.DATABASES[db.DEFAULT_DB_ALIAS]["NAME"]
with mock.patch("django.db.connections", new=tested_connections):
tested_connections["default"].creation.destroy_test_db(
old_name, verbosity=0, keepdb=True
)
self.assertEqual(
tested_connections["default"].settings_dict["NAME"], old_name
)
def test_serialization(self):
tested_connections = db.ConnectionHandler(
{
"default": {
"ENGINE": "django.db.backends.dummy",
},
}
)
with mock.patch(
"django.db.backends.dummy.base.DatabaseWrapper.creation_class"
) as mocked_db_creation:
with mock.patch("django.test.utils.connections", new=tested_connections):
self.runner_instance.setup_databases()
mocked_db_creation.return_value.create_test_db.assert_called_once_with(
verbosity=0, autoclobber=False, serialize=True, keepdb=False
)
def test_serialized_off(self):
tested_connections = db.ConnectionHandler(
{
"default": {
"ENGINE": "django.db.backends.dummy",
"TEST": {"SERIALIZE": False},
},
}
)
msg = (
"The SERIALIZE test database setting is deprecated as it can be "
"inferred from the TestCase/TransactionTestCase.databases that "
"enable the serialized_rollback feature."
)
with mock.patch(
"django.db.backends.dummy.base.DatabaseWrapper.creation_class"
) as mocked_db_creation:
with mock.patch("django.test.utils.connections", new=tested_connections):
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
self.runner_instance.setup_databases()
mocked_db_creation.return_value.create_test_db.assert_called_once_with(
verbosity=0, autoclobber=False, serialize=False, keepdb=False
)
@skipUnlessDBFeature("supports_sequence_reset")
class AutoIncrementResetTest(TransactionTestCase):
"""
Creating the same models in different test methods receive the same PK
values since the sequences are reset before each test method.
"""
available_apps = ["test_runner"]
reset_sequences = True
def _test(self):
# Regular model
p = Person.objects.create(first_name="Jack", last_name="Smith")
self.assertEqual(p.pk, 1)
# Auto-created many-to-many through model
p.friends.add(Person.objects.create(first_name="Jacky", last_name="Smith"))
self.assertEqual(p.friends.through.objects.first().pk, 1)
# Many-to-many through model
b = B.objects.create()
t = Through.objects.create(person=p, b=b)
self.assertEqual(t.pk, 1)
def test_autoincrement_reset1(self):
self._test()
def test_autoincrement_reset2(self):
self._test()
class EmptyDefaultDatabaseTest(unittest.TestCase):
def test_empty_default_database(self):
"""
An empty default database in settings does not raise an ImproperlyConfigured
error when running a unit test that does not use a database.
"""
tested_connections = db.ConnectionHandler({"default": {}})
with mock.patch("django.db.connections", new=tested_connections):
connection = tested_connections[db.utils.DEFAULT_DB_ALIAS]
self.assertEqual(
connection.settings_dict["ENGINE"], "django.db.backends.dummy"
)
connections_support_transactions()
class RunTestsExceptionHandlingTests(unittest.TestCase):
def test_run_checks_raises(self):
"""
Teardown functions are run when run_checks() raises SystemCheckError.
"""
with mock.patch(
"django.test.runner.DiscoverRunner.setup_test_environment"
), mock.patch("django.test.runner.DiscoverRunner.setup_databases"), mock.patch(
"django.test.runner.DiscoverRunner.build_suite"
), mock.patch(
"django.test.runner.DiscoverRunner.run_checks", side_effect=SystemCheckError
), mock.patch(
"django.test.runner.DiscoverRunner.teardown_databases"
) as teardown_databases, mock.patch(
"django.test.runner.DiscoverRunner.teardown_test_environment"
) as teardown_test_environment:
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(SystemCheckError):
runner.run_tests(
["test_runner_apps.sample.tests_sample.TestDjangoTestCase"]
)
self.assertTrue(teardown_databases.called)
self.assertTrue(teardown_test_environment.called)
def test_run_checks_raises_and_teardown_raises(self):
"""
SystemCheckError is surfaced when run_checks() raises SystemCheckError
and teardown databases() raises ValueError.
"""
with mock.patch(
"django.test.runner.DiscoverRunner.setup_test_environment"
), mock.patch("django.test.runner.DiscoverRunner.setup_databases"), mock.patch(
"django.test.runner.DiscoverRunner.build_suite"
), mock.patch(
"django.test.runner.DiscoverRunner.run_checks", side_effect=SystemCheckError
), mock.patch(
"django.test.runner.DiscoverRunner.teardown_databases",
side_effect=ValueError,
) as teardown_databases, mock.patch(
"django.test.runner.DiscoverRunner.teardown_test_environment"
) as teardown_test_environment:
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(SystemCheckError):
runner.run_tests(
["test_runner_apps.sample.tests_sample.TestDjangoTestCase"]
)
self.assertTrue(teardown_databases.called)
self.assertFalse(teardown_test_environment.called)
def test_run_checks_passes_and_teardown_raises(self):
"""
Exceptions on teardown are surfaced if no exceptions happen during
run_checks().
"""
with mock.patch(
"django.test.runner.DiscoverRunner.setup_test_environment"
), mock.patch("django.test.runner.DiscoverRunner.setup_databases"), mock.patch(
"django.test.runner.DiscoverRunner.build_suite"
), mock.patch(
"django.test.runner.DiscoverRunner.run_checks"
), mock.patch(
"django.test.runner.DiscoverRunner.teardown_databases",
side_effect=ValueError,
) as teardown_databases, mock.patch(
"django.test.runner.DiscoverRunner.teardown_test_environment"
) as teardown_test_environment:
runner = DiscoverRunner(verbosity=0, interactive=False)
with self.assertRaises(ValueError):
# Suppress the output when running TestDjangoTestCase.
with mock.patch("sys.stderr"):
runner.run_tests(
["test_runner_apps.sample.tests_sample.TestDjangoTestCase"]
)
self.assertTrue(teardown_databases.called)
self.assertFalse(teardown_test_environment.called)
# RemovedInDjango50Warning
class NoOpTestRunner(DiscoverRunner):
def setup_test_environment(self, **kwargs):
return
def setup_databases(self, **kwargs):
return
def run_checks(self, databases):
return
def teardown_databases(self, old_config, **kwargs):
return
def teardown_test_environment(self, **kwargs):
return
class DiscoverRunnerExtraTestsDeprecationTests(SimpleTestCase):
msg = "The extra_tests argument is deprecated."
def get_runner(self):
return NoOpTestRunner(verbosity=0, interactive=False)
def test_extra_tests_build_suite(self):
runner = self.get_runner()
with self.assertWarnsMessage(RemovedInDjango50Warning, self.msg):
runner.build_suite(extra_tests=[])
def test_extra_tests_run_tests(self):
runner = self.get_runner()
with captured_stderr():
with self.assertWarnsMessage(RemovedInDjango50Warning, self.msg):
runner.run_tests(
test_labels=["test_runner_apps.sample.tests_sample.EmptyTestCase"],
extra_tests=[],
)
|
debcff58042ace20f39f126fcc3b1f3db922fef20fea87b57769e0e0984e2df1 | import logging
import multiprocessing
import os
import unittest.loader
from argparse import ArgumentParser
from contextlib import contextmanager
from importlib import import_module
from unittest import TestSuite, TextTestRunner, defaultTestLoader, mock
from django.db import connections
from django.test import SimpleTestCase
from django.test.runner import DiscoverRunner, get_max_test_processes
from django.test.utils import (
NullTimeKeeper,
TimeKeeper,
captured_stderr,
captured_stdout,
)
@contextmanager
def change_cwd(directory):
current_dir = os.path.abspath(os.path.dirname(__file__))
new_dir = os.path.join(current_dir, directory)
old_cwd = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(old_cwd)
@contextmanager
def change_loader_patterns(patterns):
original_patterns = DiscoverRunner.test_loader.testNamePatterns
DiscoverRunner.test_loader.testNamePatterns = patterns
try:
yield
finally:
DiscoverRunner.test_loader.testNamePatterns = original_patterns
# Isolate from the real environment.
@mock.patch.dict(os.environ, {}, clear=True)
@mock.patch.object(multiprocessing, "cpu_count", return_value=12)
# Python 3.8 on macOS defaults to 'spawn' mode.
@mock.patch.object(multiprocessing, "get_start_method", return_value="fork")
class DiscoverRunnerParallelArgumentTests(SimpleTestCase):
def get_parser(self):
parser = ArgumentParser()
DiscoverRunner.add_arguments(parser)
return parser
def test_parallel_default(self, *mocked_objects):
result = self.get_parser().parse_args([])
self.assertEqual(result.parallel, 0)
def test_parallel_flag(self, *mocked_objects):
result = self.get_parser().parse_args(["--parallel"])
self.assertEqual(result.parallel, "auto")
def test_parallel_auto(self, *mocked_objects):
result = self.get_parser().parse_args(["--parallel", "auto"])
self.assertEqual(result.parallel, "auto")
def test_parallel_count(self, *mocked_objects):
result = self.get_parser().parse_args(["--parallel", "17"])
self.assertEqual(result.parallel, 17)
def test_parallel_invalid(self, *mocked_objects):
with self.assertRaises(SystemExit), captured_stderr() as stderr:
self.get_parser().parse_args(["--parallel", "unaccepted"])
msg = "argument --parallel: 'unaccepted' is not an integer or the string 'auto'"
self.assertIn(msg, stderr.getvalue())
def test_get_max_test_processes(self, *mocked_objects):
self.assertEqual(get_max_test_processes(), 12)
@mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "7"})
def test_get_max_test_processes_env_var(self, *mocked_objects):
self.assertEqual(get_max_test_processes(), 7)
def test_get_max_test_processes_spawn(
self,
mocked_get_start_method,
mocked_cpu_count,
):
mocked_get_start_method.return_value = "spawn"
self.assertEqual(get_max_test_processes(), 12)
with mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "7"}):
self.assertEqual(get_max_test_processes(), 7)
def test_get_max_test_processes_forkserver(
self,
mocked_get_start_method,
mocked_cpu_count,
):
mocked_get_start_method.return_value = "forkserver"
self.assertEqual(get_max_test_processes(), 1)
with mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "7"}):
self.assertEqual(get_max_test_processes(), 1)
class DiscoverRunnerTests(SimpleTestCase):
@staticmethod
def get_test_methods_names(suite):
return [t.__class__.__name__ + "." + t._testMethodName for t in suite._tests]
def test_init_debug_mode(self):
runner = DiscoverRunner()
self.assertFalse(runner.debug_mode)
def test_add_arguments_shuffle(self):
parser = ArgumentParser()
DiscoverRunner.add_arguments(parser)
ns = parser.parse_args([])
self.assertIs(ns.shuffle, False)
ns = parser.parse_args(["--shuffle"])
self.assertIsNone(ns.shuffle)
ns = parser.parse_args(["--shuffle", "5"])
self.assertEqual(ns.shuffle, 5)
def test_add_arguments_debug_mode(self):
parser = ArgumentParser()
DiscoverRunner.add_arguments(parser)
ns = parser.parse_args([])
self.assertFalse(ns.debug_mode)
ns = parser.parse_args(["--debug-mode"])
self.assertTrue(ns.debug_mode)
def test_setup_shuffler_no_shuffle_argument(self):
runner = DiscoverRunner()
self.assertIs(runner.shuffle, False)
runner.setup_shuffler()
self.assertIsNone(runner.shuffle_seed)
def test_setup_shuffler_shuffle_none(self):
runner = DiscoverRunner(shuffle=None)
self.assertIsNone(runner.shuffle)
with mock.patch("random.randint", return_value=1):
with captured_stdout() as stdout:
runner.setup_shuffler()
self.assertEqual(stdout.getvalue(), "Using shuffle seed: 1 (generated)\n")
self.assertEqual(runner.shuffle_seed, 1)
def test_setup_shuffler_shuffle_int(self):
runner = DiscoverRunner(shuffle=2)
self.assertEqual(runner.shuffle, 2)
with captured_stdout() as stdout:
runner.setup_shuffler()
expected_out = "Using shuffle seed: 2 (given)\n"
self.assertEqual(stdout.getvalue(), expected_out)
self.assertEqual(runner.shuffle_seed, 2)
def test_load_tests_for_label_file_path(self):
with change_cwd("."):
msg = (
"One of the test labels is a path to a file: "
"'test_discover_runner.py', which is not supported. Use a "
"dotted module name or path to a directory instead."
)
with self.assertRaisesMessage(RuntimeError, msg):
DiscoverRunner().load_tests_for_label("test_discover_runner.py", {})
def test_dotted_test_module(self):
count = (
DiscoverRunner(verbosity=0)
.build_suite(
["test_runner_apps.sample.tests_sample"],
)
.countTestCases()
)
self.assertEqual(count, 4)
def test_dotted_test_class_vanilla_unittest(self):
count = (
DiscoverRunner(verbosity=0)
.build_suite(
["test_runner_apps.sample.tests_sample.TestVanillaUnittest"],
)
.countTestCases()
)
self.assertEqual(count, 1)
def test_dotted_test_class_django_testcase(self):
count = (
DiscoverRunner(verbosity=0)
.build_suite(
["test_runner_apps.sample.tests_sample.TestDjangoTestCase"],
)
.countTestCases()
)
self.assertEqual(count, 1)
def test_dotted_test_method_django_testcase(self):
count = (
DiscoverRunner(verbosity=0)
.build_suite(
["test_runner_apps.sample.tests_sample.TestDjangoTestCase.test_sample"],
)
.countTestCases()
)
self.assertEqual(count, 1)
def test_pattern(self):
count = (
DiscoverRunner(
pattern="*_tests.py",
verbosity=0,
)
.build_suite(["test_runner_apps.sample"])
.countTestCases()
)
self.assertEqual(count, 1)
def test_name_patterns(self):
all_test_1 = [
"DjangoCase1.test_1",
"DjangoCase2.test_1",
"SimpleCase1.test_1",
"SimpleCase2.test_1",
"UnittestCase1.test_1",
"UnittestCase2.test_1",
]
all_test_2 = [
"DjangoCase1.test_2",
"DjangoCase2.test_2",
"SimpleCase1.test_2",
"SimpleCase2.test_2",
"UnittestCase1.test_2",
"UnittestCase2.test_2",
]
all_tests = sorted([*all_test_1, *all_test_2, "UnittestCase2.test_3_test"])
for pattern, expected in [
[["test_1"], all_test_1],
[["UnittestCase1"], ["UnittestCase1.test_1", "UnittestCase1.test_2"]],
[["*test"], ["UnittestCase2.test_3_test"]],
[["test*"], all_tests],
[["test"], all_tests],
[["test_1", "test_2"], sorted([*all_test_1, *all_test_2])],
[["test*1"], all_test_1],
]:
with self.subTest(pattern):
suite = DiscoverRunner(
test_name_patterns=pattern,
verbosity=0,
).build_suite(["test_runner_apps.simple"])
self.assertEqual(expected, self.get_test_methods_names(suite))
def test_loader_patterns_not_mutated(self):
runner = DiscoverRunner(test_name_patterns=["test_sample"], verbosity=0)
tests = [
("test_runner_apps.sample.tests", 1),
("test_runner_apps.sample.tests.Test.test_sample", 1),
("test_runner_apps.sample.empty", 0),
("test_runner_apps.sample.tests_sample.EmptyTestCase", 0),
]
for test_labels, tests_count in tests:
with self.subTest(test_labels=test_labels):
with change_loader_patterns(["UnittestCase1"]):
count = runner.build_suite([test_labels]).countTestCases()
self.assertEqual(count, tests_count)
self.assertEqual(
runner.test_loader.testNamePatterns, ["UnittestCase1"]
)
def test_loader_patterns_not_mutated_when_test_label_is_file_path(self):
runner = DiscoverRunner(test_name_patterns=["test_sample"], verbosity=0)
with change_cwd("."), change_loader_patterns(["UnittestCase1"]):
with self.assertRaises(RuntimeError):
runner.build_suite(["test_discover_runner.py"])
self.assertEqual(runner.test_loader.testNamePatterns, ["UnittestCase1"])
def test_file_path(self):
with change_cwd(".."):
count = (
DiscoverRunner(verbosity=0)
.build_suite(
["test_runner_apps/sample/"],
)
.countTestCases()
)
self.assertEqual(count, 5)
def test_empty_label(self):
"""
If the test label is empty, discovery should happen on the current
working directory.
"""
with change_cwd("."):
suite = DiscoverRunner(verbosity=0).build_suite([])
self.assertEqual(
suite._tests[0].id().split(".")[0],
os.path.basename(os.getcwd()),
)
def test_empty_test_case(self):
count = (
DiscoverRunner(verbosity=0)
.build_suite(
["test_runner_apps.sample.tests_sample.EmptyTestCase"],
)
.countTestCases()
)
self.assertEqual(count, 0)
def test_discovery_on_package(self):
count = (
DiscoverRunner(verbosity=0)
.build_suite(
["test_runner_apps.sample.tests"],
)
.countTestCases()
)
self.assertEqual(count, 1)
def test_ignore_adjacent(self):
"""
When given a dotted path to a module, unittest discovery searches
not just the module, but also the directory containing the module.
This results in tests from adjacent modules being run when they
should not. The discover runner avoids this behavior.
"""
count = (
DiscoverRunner(verbosity=0)
.build_suite(
["test_runner_apps.sample.empty"],
)
.countTestCases()
)
self.assertEqual(count, 0)
def test_testcase_ordering(self):
with change_cwd(".."):
suite = DiscoverRunner(verbosity=0).build_suite(
["test_runner_apps/sample/"]
)
self.assertEqual(
suite._tests[0].__class__.__name__,
"TestDjangoTestCase",
msg="TestDjangoTestCase should be the first test case",
)
self.assertEqual(
suite._tests[1].__class__.__name__,
"TestZimpleTestCase",
msg="TestZimpleTestCase should be the second test case",
)
# All others can follow in unspecified order, including doctests
self.assertIn(
"DocTestCase", [t.__class__.__name__ for t in suite._tests[2:]]
)
def test_duplicates_ignored(self):
"""
Tests shouldn't be discovered twice when discovering on overlapping paths.
"""
base_app = "forms_tests"
sub_app = "forms_tests.field_tests"
runner = DiscoverRunner(verbosity=0)
with self.modify_settings(INSTALLED_APPS={"append": sub_app}):
single = runner.build_suite([base_app]).countTestCases()
dups = runner.build_suite([base_app, sub_app]).countTestCases()
self.assertEqual(single, dups)
def test_reverse(self):
"""
Reverse should reorder tests while maintaining the grouping specified
by ``DiscoverRunner.reorder_by``.
"""
runner = DiscoverRunner(reverse=True, verbosity=0)
suite = runner.build_suite(
test_labels=("test_runner_apps.sample", "test_runner_apps.simple")
)
self.assertIn(
"test_runner_apps.simple",
next(iter(suite)).id(),
msg="Test labels should be reversed.",
)
suite = runner.build_suite(test_labels=("test_runner_apps.simple",))
suite = tuple(suite)
self.assertIn(
"DjangoCase", suite[0].id(), msg="Test groups should not be reversed."
)
self.assertIn(
"SimpleCase", suite[4].id(), msg="Test groups order should be preserved."
)
self.assertIn(
"DjangoCase2", suite[0].id(), msg="Django test cases should be reversed."
)
self.assertIn(
"SimpleCase2", suite[4].id(), msg="Simple test cases should be reversed."
)
self.assertIn(
"UnittestCase2",
suite[8].id(),
msg="Unittest test cases should be reversed.",
)
self.assertIn(
"test_2", suite[0].id(), msg="Methods of Django cases should be reversed."
)
self.assertIn(
"test_2", suite[4].id(), msg="Methods of simple cases should be reversed."
)
self.assertIn(
"test_2", suite[9].id(), msg="Methods of unittest cases should be reversed."
)
def test_build_suite_failed_tests_first(self):
# The "doesnotexist" label results in a _FailedTest instance.
suite = DiscoverRunner(verbosity=0).build_suite(
test_labels=["test_runner_apps.sample", "doesnotexist"],
)
tests = list(suite)
self.assertIsInstance(tests[0], unittest.loader._FailedTest)
self.assertNotIsInstance(tests[-1], unittest.loader._FailedTest)
def test_build_suite_shuffling(self):
# These will result in unittest.loader._FailedTest instances rather
# than TestCase objects, but they are sufficient for testing.
labels = ["label1", "label2", "label3", "label4"]
cases = [
({}, ["label1", "label2", "label3", "label4"]),
({"reverse": True}, ["label4", "label3", "label2", "label1"]),
({"shuffle": 8}, ["label4", "label1", "label3", "label2"]),
({"shuffle": 8, "reverse": True}, ["label2", "label3", "label1", "label4"]),
]
for kwargs, expected in cases:
with self.subTest(kwargs=kwargs):
# Prevent writing the seed to stdout.
runner = DiscoverRunner(**kwargs, verbosity=0)
tests = runner.build_suite(test_labels=labels)
# The ids have the form "unittest.loader._FailedTest.label1".
names = [test.id().split(".")[-1] for test in tests]
self.assertEqual(names, expected)
def test_overridable_get_test_runner_kwargs(self):
self.assertIsInstance(DiscoverRunner().get_test_runner_kwargs(), dict)
def test_overridable_test_suite(self):
self.assertEqual(DiscoverRunner().test_suite, TestSuite)
def test_overridable_test_runner(self):
self.assertEqual(DiscoverRunner().test_runner, TextTestRunner)
def test_overridable_test_loader(self):
self.assertEqual(DiscoverRunner().test_loader, defaultTestLoader)
def test_tags(self):
runner = DiscoverRunner(tags=["core"], verbosity=0)
self.assertEqual(
runner.build_suite(["test_runner_apps.tagged.tests"]).countTestCases(), 1
)
runner = DiscoverRunner(tags=["fast"], verbosity=0)
self.assertEqual(
runner.build_suite(["test_runner_apps.tagged.tests"]).countTestCases(), 2
)
runner = DiscoverRunner(tags=["slow"], verbosity=0)
self.assertEqual(
runner.build_suite(["test_runner_apps.tagged.tests"]).countTestCases(), 2
)
def test_exclude_tags(self):
runner = DiscoverRunner(tags=["fast"], exclude_tags=["core"], verbosity=0)
self.assertEqual(
runner.build_suite(["test_runner_apps.tagged.tests"]).countTestCases(), 1
)
runner = DiscoverRunner(tags=["fast"], exclude_tags=["slow"], verbosity=0)
self.assertEqual(
runner.build_suite(["test_runner_apps.tagged.tests"]).countTestCases(), 0
)
runner = DiscoverRunner(exclude_tags=["slow"], verbosity=0)
self.assertEqual(
runner.build_suite(["test_runner_apps.tagged.tests"]).countTestCases(), 0
)
def test_tag_inheritance(self):
def count_tests(**kwargs):
kwargs.setdefault("verbosity", 0)
suite = DiscoverRunner(**kwargs).build_suite(
["test_runner_apps.tagged.tests_inheritance"]
)
return suite.countTestCases()
self.assertEqual(count_tests(tags=["foo"]), 4)
self.assertEqual(count_tests(tags=["bar"]), 2)
self.assertEqual(count_tests(tags=["baz"]), 2)
self.assertEqual(count_tests(tags=["foo"], exclude_tags=["bar"]), 2)
self.assertEqual(count_tests(tags=["foo"], exclude_tags=["bar", "baz"]), 1)
self.assertEqual(count_tests(exclude_tags=["foo"]), 0)
def test_tag_fail_to_load(self):
with self.assertRaises(SyntaxError):
import_module("test_runner_apps.tagged.tests_syntax_error")
runner = DiscoverRunner(tags=["syntax_error"], verbosity=0)
# A label that doesn't exist or cannot be loaded due to syntax errors
# is always considered matching.
suite = runner.build_suite(["doesnotexist", "test_runner_apps.tagged"])
self.assertEqual(
[test.id() for test in suite],
[
"unittest.loader._FailedTest.doesnotexist",
"unittest.loader._FailedTest.test_runner_apps.tagged."
"tests_syntax_error",
],
)
def test_included_tags_displayed(self):
runner = DiscoverRunner(tags=["foo", "bar"], verbosity=2)
with captured_stdout() as stdout:
runner.build_suite(["test_runner_apps.tagged.tests"])
self.assertIn("Including test tag(s): bar, foo.\n", stdout.getvalue())
def test_excluded_tags_displayed(self):
runner = DiscoverRunner(exclude_tags=["foo", "bar"], verbosity=3)
with captured_stdout() as stdout:
runner.build_suite(["test_runner_apps.tagged.tests"])
self.assertIn("Excluding test tag(s): bar, foo.\n", stdout.getvalue())
def test_number_of_tests_found_displayed(self):
runner = DiscoverRunner()
with captured_stdout() as stdout:
runner.build_suite(
[
"test_runner_apps.sample.tests_sample.TestDjangoTestCase",
"test_runner_apps.simple",
]
)
self.assertIn("Found 14 test(s).\n", stdout.getvalue())
def test_pdb_with_parallel(self):
msg = "You cannot use --pdb with parallel tests; pass --parallel=1 to use it."
with self.assertRaisesMessage(ValueError, msg):
DiscoverRunner(pdb=True, parallel=2)
def test_number_of_parallel_workers(self):
"""Number of processes doesn't exceed the number of TestCases."""
runner = DiscoverRunner(parallel=5, verbosity=0)
suite = runner.build_suite(["test_runner_apps.tagged"])
self.assertEqual(suite.processes, len(suite.subsuites))
def test_number_of_databases_parallel_test_suite(self):
"""
Number of databases doesn't exceed the number of TestCases with
parallel tests.
"""
runner = DiscoverRunner(parallel=8, verbosity=0)
suite = runner.build_suite(["test_runner_apps.tagged"])
self.assertEqual(suite.processes, len(suite.subsuites))
self.assertEqual(runner.parallel, suite.processes)
def test_number_of_databases_no_parallel_test_suite(self):
"""
Number of databases doesn't exceed the number of TestCases with
non-parallel tests.
"""
runner = DiscoverRunner(parallel=8, verbosity=0)
suite = runner.build_suite(["test_runner_apps.simple.tests.DjangoCase1"])
self.assertEqual(runner.parallel, 1)
self.assertIsInstance(suite, TestSuite)
def test_buffer_mode_test_pass(self):
runner = DiscoverRunner(buffer=True, verbosity=0)
with captured_stdout() as stdout, captured_stderr() as stderr:
suite = runner.build_suite(
[
"test_runner_apps.buffer.tests_buffer.WriteToStdoutStderrTestCase."
"test_pass",
]
)
runner.run_suite(suite)
self.assertNotIn("Write to stderr.", stderr.getvalue())
self.assertNotIn("Write to stdout.", stdout.getvalue())
def test_buffer_mode_test_fail(self):
runner = DiscoverRunner(buffer=True, verbosity=0)
with captured_stdout() as stdout, captured_stderr() as stderr:
suite = runner.build_suite(
[
"test_runner_apps.buffer.tests_buffer.WriteToStdoutStderrTestCase."
"test_fail",
]
)
runner.run_suite(suite)
self.assertIn("Write to stderr.", stderr.getvalue())
self.assertIn("Write to stdout.", stdout.getvalue())
def run_suite_with_runner(self, runner_class, **kwargs):
class MyRunner(DiscoverRunner):
def test_runner(self, *args, **kwargs):
return runner_class()
runner = MyRunner(**kwargs)
# Suppress logging "Using shuffle seed" to the console.
with captured_stdout():
runner.setup_shuffler()
with captured_stdout() as stdout:
try:
result = runner.run_suite(None)
except RuntimeError as exc:
result = str(exc)
output = stdout.getvalue()
return result, output
def test_run_suite_logs_seed(self):
class TestRunner:
def run(self, suite):
return "<fake-result>"
expected_prefix = "Used shuffle seed"
# Test with and without shuffling enabled.
result, output = self.run_suite_with_runner(TestRunner)
self.assertEqual(result, "<fake-result>")
self.assertNotIn(expected_prefix, output)
result, output = self.run_suite_with_runner(TestRunner, shuffle=2)
self.assertEqual(result, "<fake-result>")
expected_output = f"{expected_prefix}: 2 (given)\n"
self.assertEqual(output, expected_output)
def test_run_suite_logs_seed_exception(self):
"""
run_suite() logs the seed when TestRunner.run() raises an exception.
"""
class TestRunner:
def run(self, suite):
raise RuntimeError("my exception")
result, output = self.run_suite_with_runner(TestRunner, shuffle=2)
self.assertEqual(result, "my exception")
expected_output = "Used shuffle seed: 2 (given)\n"
self.assertEqual(output, expected_output)
@mock.patch("faulthandler.enable")
def test_faulthandler_enabled(self, mocked_enable):
with mock.patch("faulthandler.is_enabled", return_value=False):
DiscoverRunner(enable_faulthandler=True)
mocked_enable.assert_called()
@mock.patch("faulthandler.enable")
def test_faulthandler_already_enabled(self, mocked_enable):
with mock.patch("faulthandler.is_enabled", return_value=True):
DiscoverRunner(enable_faulthandler=True)
mocked_enable.assert_not_called()
@mock.patch("faulthandler.enable")
def test_faulthandler_enabled_fileno(self, mocked_enable):
# sys.stderr that is not an actual file.
with mock.patch(
"faulthandler.is_enabled", return_value=False
), captured_stderr():
DiscoverRunner(enable_faulthandler=True)
mocked_enable.assert_called()
@mock.patch("faulthandler.enable")
def test_faulthandler_disabled(self, mocked_enable):
with mock.patch("faulthandler.is_enabled", return_value=False):
DiscoverRunner(enable_faulthandler=False)
mocked_enable.assert_not_called()
def test_timings_not_captured(self):
runner = DiscoverRunner(timing=False)
with captured_stderr() as stderr:
with runner.time_keeper.timed("test"):
pass
runner.time_keeper.print_results()
self.assertIsInstance(runner.time_keeper, NullTimeKeeper)
self.assertNotIn("test", stderr.getvalue())
def test_timings_captured(self):
runner = DiscoverRunner(timing=True)
with captured_stderr() as stderr:
with runner.time_keeper.timed("test"):
pass
runner.time_keeper.print_results()
self.assertIsInstance(runner.time_keeper, TimeKeeper)
self.assertIn("test", stderr.getvalue())
def test_log(self):
custom_low_level = 5
custom_high_level = 45
msg = "logging message"
cases = [
(0, None, False),
(0, custom_low_level, False),
(0, logging.DEBUG, False),
(0, logging.INFO, False),
(0, logging.WARNING, False),
(0, custom_high_level, False),
(1, None, True),
(1, custom_low_level, False),
(1, logging.DEBUG, False),
(1, logging.INFO, True),
(1, logging.WARNING, True),
(1, custom_high_level, True),
(2, None, True),
(2, custom_low_level, True),
(2, logging.DEBUG, True),
(2, logging.INFO, True),
(2, logging.WARNING, True),
(2, custom_high_level, True),
(3, None, True),
(3, custom_low_level, True),
(3, logging.DEBUG, True),
(3, logging.INFO, True),
(3, logging.WARNING, True),
(3, custom_high_level, True),
]
for verbosity, level, output in cases:
with self.subTest(verbosity=verbosity, level=level):
with captured_stdout() as stdout:
runner = DiscoverRunner(verbosity=verbosity)
runner.log(msg, level)
self.assertEqual(stdout.getvalue(), f"{msg}\n" if output else "")
def test_log_logger(self):
logger = logging.getLogger("test.logging")
cases = [
(None, "INFO:test.logging:log message"),
# Test a low custom logging level.
(5, "Level 5:test.logging:log message"),
(logging.DEBUG, "DEBUG:test.logging:log message"),
(logging.INFO, "INFO:test.logging:log message"),
(logging.WARNING, "WARNING:test.logging:log message"),
# Test a high custom logging level.
(45, "Level 45:test.logging:log message"),
]
for level, expected in cases:
with self.subTest(level=level):
runner = DiscoverRunner(logger=logger)
# Pass a logging level smaller than the smallest level in cases
# in order to capture all messages.
with self.assertLogs("test.logging", level=1) as cm:
runner.log("log message", level)
self.assertEqual(cm.output, [expected])
def test_suite_result_with_failure(self):
cases = [
(1, "FailureTestCase"),
(1, "ErrorTestCase"),
(0, "ExpectedFailureTestCase"),
(1, "UnexpectedSuccessTestCase"),
]
runner = DiscoverRunner(verbosity=0)
for expected_failures, testcase in cases:
with self.subTest(testcase=testcase):
suite = runner.build_suite(
[
f"test_runner_apps.failures.tests_failures.{testcase}",
]
)
with captured_stderr():
result = runner.run_suite(suite)
failures = runner.suite_result(suite, result)
self.assertEqual(failures, expected_failures)
class DiscoverRunnerGetDatabasesTests(SimpleTestCase):
runner = DiscoverRunner(verbosity=2)
skip_msg = "Skipping setup of unused database(s): "
def get_databases(self, test_labels):
with captured_stdout() as stdout:
suite = self.runner.build_suite(test_labels)
databases = self.runner.get_databases(suite)
return databases, stdout.getvalue()
def assertSkippedDatabases(self, test_labels, expected_databases):
databases, output = self.get_databases(test_labels)
self.assertEqual(databases, expected_databases)
skipped_databases = set(connections) - set(expected_databases)
if skipped_databases:
self.assertIn(self.skip_msg + ", ".join(sorted(skipped_databases)), output)
else:
self.assertNotIn(self.skip_msg, output)
def test_mixed(self):
databases, output = self.get_databases(["test_runner_apps.databases.tests"])
self.assertEqual(databases, {"default": True, "other": False})
self.assertNotIn(self.skip_msg, output)
def test_all(self):
databases, output = self.get_databases(
["test_runner_apps.databases.tests.AllDatabasesTests"]
)
self.assertEqual(databases, {alias: False for alias in connections})
self.assertNotIn(self.skip_msg, output)
def test_default_and_other(self):
self.assertSkippedDatabases(
[
"test_runner_apps.databases.tests.DefaultDatabaseTests",
"test_runner_apps.databases.tests.OtherDatabaseTests",
],
{"default": False, "other": False},
)
def test_default_only(self):
self.assertSkippedDatabases(
[
"test_runner_apps.databases.tests.DefaultDatabaseTests",
],
{"default": False},
)
def test_other_only(self):
self.assertSkippedDatabases(
["test_runner_apps.databases.tests.OtherDatabaseTests"], {"other": False}
)
def test_no_databases_required(self):
self.assertSkippedDatabases(
["test_runner_apps.databases.tests.NoDatabaseTests"], {}
)
def test_serialize(self):
databases, _ = self.get_databases(
["test_runner_apps.databases.tests.DefaultDatabaseSerializedTests"]
)
self.assertEqual(databases, {"default": True})
|
6ef30cdd0ade03537ff1c79a4133a52ed12f6765e0c12dd4678ebcee9db53a87 | from datetime import datetime
from django.core.exceptions import FieldError
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.models import BooleanField, CharField, F, Q
from django.db.models.expressions import (
Col,
Exists,
ExpressionWrapper,
Func,
RawSQL,
Value,
)
from django.db.models.fields.related_lookups import RelatedIsNull
from django.db.models.functions import Lower
from django.db.models.lookups import Exact, GreaterThan, IsNull, LessThan
from django.db.models.sql.constants import SINGLE
from django.db.models.sql.query import JoinPromoter, Query, get_field_names_from_opts
from django.db.models.sql.where import OR
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import register_lookup
from .models import Author, Item, ObjectC, Ranking
class TestQuery(SimpleTestCase):
def test_simple_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
def test_non_alias_cols_query(self):
query = Query(Author, alias_cols=False)
where = query.build_where(Q(num__gt=2, name__isnull=False) | Q(num__lt=F("id")))
name_isnull_lookup, num_gt_lookup = where.children[0].children
self.assertIsInstance(num_gt_lookup, GreaterThan)
self.assertIsInstance(num_gt_lookup.lhs, Col)
self.assertIsNone(num_gt_lookup.lhs.alias)
self.assertIsInstance(name_isnull_lookup, IsNull)
self.assertIsInstance(name_isnull_lookup.lhs, Col)
self.assertIsNone(name_isnull_lookup.lhs.alias)
num_lt_lookup = where.children[1]
self.assertIsInstance(num_lt_lookup, LessThan)
self.assertIsInstance(num_lt_lookup.rhs, Col)
self.assertIsNone(num_lt_lookup.rhs.alias)
self.assertIsInstance(num_lt_lookup.lhs, Col)
self.assertIsNone(num_lt_lookup.lhs.alias)
def test_complex_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2) | Q(num__lt=0))
self.assertEqual(where.connector, OR)
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
lookup = where.children[1]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.rhs, 0)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
def test_multiple_fields(self):
query = Query(Item, alias_cols=False)
where = query.build_where(Q(modified__gt=F("created")))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertIsInstance(lookup.rhs, Col)
self.assertIsNone(lookup.rhs.alias)
self.assertIsInstance(lookup.lhs, Col)
self.assertIsNone(lookup.lhs.alias)
self.assertEqual(lookup.rhs.target, Item._meta.get_field("created"))
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
def test_transform(self):
query = Query(Author, alias_cols=False)
with register_lookup(CharField, Lower):
where = query.build_where(~Q(name__lower="foo"))
lookup = where.children[0]
self.assertIsInstance(lookup, Exact)
self.assertIsInstance(lookup.lhs, Lower)
self.assertIsInstance(lookup.lhs.lhs, Col)
self.assertIsNone(lookup.lhs.lhs.alias)
self.assertEqual(lookup.lhs.lhs.target, Author._meta.get_field("name"))
def test_negated_nullable(self):
query = Query(Item)
where = query.build_where(~Q(modified__lt=datetime(2017, 1, 1)))
self.assertTrue(where.negated)
lookup = where.children[0]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
lookup = where.children[1]
self.assertIsInstance(lookup, IsNull)
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
def test_foreign_key(self):
query = Query(Item)
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
query.build_where(Q(creator__num__gt=2))
def test_foreign_key_f(self):
query = Query(Ranking)
with self.assertRaises(FieldError):
query.build_where(Q(rank__gt=F("author__num")))
def test_foreign_key_exclusive(self):
query = Query(ObjectC, alias_cols=False)
where = query.build_where(Q(objecta=None) | Q(objectb=None))
a_isnull = where.children[0]
self.assertIsInstance(a_isnull, RelatedIsNull)
self.assertIsInstance(a_isnull.lhs, Col)
self.assertIsNone(a_isnull.lhs.alias)
self.assertEqual(a_isnull.lhs.target, ObjectC._meta.get_field("objecta"))
b_isnull = where.children[1]
self.assertIsInstance(b_isnull, RelatedIsNull)
self.assertIsInstance(b_isnull.lhs, Col)
self.assertIsNone(b_isnull.lhs.alias)
self.assertEqual(b_isnull.lhs.target, ObjectC._meta.get_field("objectb"))
def test_clone_select_related(self):
query = Query(Item)
query.add_select_related(["creator"])
clone = query.clone()
clone.add_select_related(["note", "creator__extra"])
self.assertEqual(query.select_related, {"creator": {}})
def test_iterable_lookup_value(self):
query = Query(Item)
where = query.build_where(Q(name=["a", "b"]))
name_exact = where.children[0]
self.assertIsInstance(name_exact, Exact)
self.assertEqual(name_exact.rhs, "['a', 'b']")
def test_filter_conditional(self):
query = Query(Item)
where = query.build_where(Func(output_field=BooleanField()))
exact = where.children[0]
self.assertIsInstance(exact, Exact)
self.assertIsInstance(exact.lhs, Func)
self.assertIs(exact.rhs, True)
def test_filter_conditional_join(self):
query = Query(Item)
filter_expr = Func("note__note", output_field=BooleanField())
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
query.build_where(filter_expr)
def test_filter_non_conditional(self):
query = Query(Item)
msg = "Cannot filter against a non-conditional expression."
with self.assertRaisesMessage(TypeError, msg):
query.build_where(Func(output_field=CharField()))
class TestQueryNoModel(TestCase):
def test_rawsql_annotation(self):
query = Query(None)
sql = "%s IS NULL"
# Wrap with a CASE WHEN expression if a database backend (e.g. Oracle)
# doesn't support boolean expression in SELECT list.
if not connection.features.supports_boolean_expr_in_select_clause:
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
query.add_annotation(RawSQL(sql, (None,), BooleanField()), "_check")
result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)
self.assertEqual(result[0], 1)
def test_subquery_annotation(self):
query = Query(None)
query.add_annotation(Exists(Item.objects.all()), "_check")
result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)
self.assertEqual(result[0], 0)
@skipUnlessDBFeature("supports_boolean_expr_in_select_clause")
def test_q_annotation(self):
query = Query(None)
check = ExpressionWrapper(
Q(RawSQL("%s IS NULL", (None,), BooleanField()))
| Q(Exists(Item.objects.all())),
BooleanField(),
)
query.add_annotation(check, "_check")
result = query.get_compiler(using=DEFAULT_DB_ALIAS).execute_sql(SINGLE)
self.assertEqual(result[0], 1)
def test_names_to_path_field(self):
query = Query(None)
query.add_annotation(Value(True), "value")
path, final_field, targets, names = query.names_to_path(["value"], opts=None)
self.assertEqual(path, [])
self.assertIsInstance(final_field, BooleanField)
self.assertEqual(len(targets), 1)
self.assertIsInstance(targets[0], BooleanField)
self.assertEqual(names, [])
def test_names_to_path_field_error(self):
query = Query(None)
msg = "Cannot resolve keyword 'nonexistent' into field."
with self.assertRaisesMessage(FieldError, msg):
query.names_to_path(["nonexistent"], opts=None)
def test_get_field_names_from_opts(self):
self.assertEqual(get_field_names_from_opts(None), set())
class JoinPromoterTest(SimpleTestCase):
def test_repr(self):
self.assertEqual(
repr(JoinPromoter("AND", 3, True)),
"JoinPromoter(connector='AND', num_children=3, negated=True)",
)
|
42c1538fa1c0534ee6e0db29317f426cebeb1db511f48f0306f9cfc3ffba57d2 | """Tests for django.db.utils."""
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS, ProgrammingError, connection
from django.db.utils import ConnectionHandler, load_backend
from django.test import SimpleTestCase, TestCase
from django.utils.connection import ConnectionDoesNotExist
class ConnectionHandlerTests(SimpleTestCase):
def test_connection_handler_no_databases(self):
"""
Empty DATABASES and empty 'default' settings default to the dummy
backend.
"""
for DATABASES in (
{}, # Empty DATABASES setting.
{"default": {}}, # Empty 'default' database.
):
with self.subTest(DATABASES=DATABASES):
self.assertImproperlyConfigured(DATABASES)
def assertImproperlyConfigured(self, DATABASES):
conns = ConnectionHandler(DATABASES)
self.assertEqual(
conns[DEFAULT_DB_ALIAS].settings_dict["ENGINE"], "django.db.backends.dummy"
)
msg = (
"settings.DATABASES is improperly configured. Please supply the "
"ENGINE value. Check settings documentation for more details."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
conns[DEFAULT_DB_ALIAS].ensure_connection()
def test_no_default_database(self):
DATABASES = {"other": {}}
conns = ConnectionHandler(DATABASES)
msg = "You must define a 'default' database."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
conns["other"].ensure_connection()
def test_nonexistent_alias(self):
msg = "The connection 'nonexistent' doesn't exist."
conns = ConnectionHandler(
{
DEFAULT_DB_ALIAS: {"ENGINE": "django.db.backends.dummy"},
}
)
with self.assertRaisesMessage(ConnectionDoesNotExist, msg):
conns["nonexistent"]
class DatabaseErrorWrapperTests(TestCase):
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL test")
def test_reraising_backend_specific_database_exception(self):
with connection.cursor() as cursor:
msg = 'table "X" does not exist'
with self.assertRaisesMessage(ProgrammingError, msg) as cm:
cursor.execute('DROP TABLE "X"')
self.assertNotEqual(type(cm.exception), type(cm.exception.__cause__))
self.assertIsNotNone(cm.exception.__cause__)
self.assertIsNotNone(cm.exception.__cause__.pgcode)
self.assertIsNotNone(cm.exception.__cause__.pgerror)
class LoadBackendTests(SimpleTestCase):
def test_load_backend_invalid_name(self):
msg = (
"'foo' isn't an available database backend or couldn't be "
"imported. Check the above exception. To use one of the built-in "
"backends, use 'django.db.backends.XXX', where XXX is one of:\n"
" 'mysql', 'oracle', 'postgresql', 'sqlite3'"
)
with self.assertRaisesMessage(ImproperlyConfigured, msg) as cm:
load_backend("foo")
self.assertEqual(str(cm.exception.__cause__), "No module named 'foo'")
|
6e5575785c34db4cc341ad27a3ffb94635b40e9af88c75b20429f33d56afed76 | import sys
from io import StringIO
from django.apps import apps
from django.core import checks
from django.core.checks import Error, Warning
from django.core.checks.messages import CheckMessage
from django.core.checks.registry import CheckRegistry
from django.core.management import call_command
from django.core.management.base import CommandError
from django.db import models
from django.test import SimpleTestCase
from django.test.utils import isolate_apps, override_settings, override_system_checks
from .models import SimpleModel, my_check
class DummyObj:
def __repr__(self):
return "obj"
class SystemCheckFrameworkTests(SimpleTestCase):
def test_register_and_run_checks(self):
def f(**kwargs):
calls[0] += 1
return [1, 2, 3]
def f2(**kwargs):
return [4]
def f3(**kwargs):
return [5]
calls = [0]
# test register as decorator
registry = CheckRegistry()
registry.register()(f)
registry.register("tag1", "tag2")(f2)
registry.register("tag2", deploy=True)(f3)
# test register as function
registry2 = CheckRegistry()
registry2.register(f)
registry2.register(f2, "tag1", "tag2")
registry2.register(f3, "tag2", deploy=True)
# check results
errors = registry.run_checks()
errors2 = registry2.run_checks()
self.assertEqual(errors, errors2)
self.assertEqual(sorted(errors), [1, 2, 3, 4])
self.assertEqual(calls[0], 2)
errors = registry.run_checks(tags=["tag1"])
errors2 = registry2.run_checks(tags=["tag1"])
self.assertEqual(errors, errors2)
self.assertEqual(sorted(errors), [4])
errors = registry.run_checks(
tags=["tag1", "tag2"], include_deployment_checks=True
)
errors2 = registry2.run_checks(
tags=["tag1", "tag2"], include_deployment_checks=True
)
self.assertEqual(errors, errors2)
self.assertEqual(sorted(errors), [4, 5])
def test_register_no_kwargs_error(self):
registry = CheckRegistry()
msg = "Check functions must accept keyword arguments (**kwargs)."
with self.assertRaisesMessage(TypeError, msg):
@registry.register
def no_kwargs(app_configs, databases):
pass
def test_register_run_checks_non_iterable(self):
registry = CheckRegistry()
@registry.register
def return_non_iterable(**kwargs):
return Error("Message")
msg = (
"The function %r did not return a list. All functions registered "
"with the checks registry must return a list." % return_non_iterable
)
with self.assertRaisesMessage(TypeError, msg):
registry.run_checks()
class MessageTests(SimpleTestCase):
def test_printing(self):
e = Error("Message", hint="Hint", obj=DummyObj())
expected = "obj: Message\n\tHINT: Hint"
self.assertEqual(str(e), expected)
def test_printing_no_hint(self):
e = Error("Message", obj=DummyObj())
expected = "obj: Message"
self.assertEqual(str(e), expected)
def test_printing_no_object(self):
e = Error("Message", hint="Hint")
expected = "?: Message\n\tHINT: Hint"
self.assertEqual(str(e), expected)
def test_printing_with_given_id(self):
e = Error("Message", hint="Hint", obj=DummyObj(), id="ID")
expected = "obj: (ID) Message\n\tHINT: Hint"
self.assertEqual(str(e), expected)
def test_printing_field_error(self):
field = SimpleModel._meta.get_field("field")
e = Error("Error", obj=field)
expected = "check_framework.SimpleModel.field: Error"
self.assertEqual(str(e), expected)
def test_printing_model_error(self):
e = Error("Error", obj=SimpleModel)
expected = "check_framework.SimpleModel: Error"
self.assertEqual(str(e), expected)
def test_printing_manager_error(self):
manager = SimpleModel.manager
e = Error("Error", obj=manager)
expected = "check_framework.SimpleModel.manager: Error"
self.assertEqual(str(e), expected)
def test_equal_to_self(self):
e = Error("Error", obj=SimpleModel)
self.assertEqual(e, e)
def test_equal_to_same_constructed_check(self):
e1 = Error("Error", obj=SimpleModel)
e2 = Error("Error", obj=SimpleModel)
self.assertEqual(e1, e2)
def test_not_equal_to_different_constructed_check(self):
e1 = Error("Error", obj=SimpleModel)
e2 = Error("Error2", obj=SimpleModel)
self.assertNotEqual(e1, e2)
def test_not_equal_to_non_check(self):
e = Error("Error", obj=DummyObj())
self.assertNotEqual(e, "a string")
def test_invalid_level(self):
msg = "The first argument should be level."
with self.assertRaisesMessage(TypeError, msg):
CheckMessage("ERROR", "Message")
def simple_system_check(**kwargs):
simple_system_check.kwargs = kwargs
return []
def tagged_system_check(**kwargs):
tagged_system_check.kwargs = kwargs
return [checks.Warning("System Check")]
tagged_system_check.tags = ["simpletag"]
def deployment_system_check(**kwargs):
deployment_system_check.kwargs = kwargs
return [checks.Warning("Deployment Check")]
deployment_system_check.tags = ["deploymenttag"]
class CheckCommandTests(SimpleTestCase):
def setUp(self):
simple_system_check.kwargs = None
tagged_system_check.kwargs = None
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
def tearDown(self):
sys.stdout, sys.stderr = self.old_stdout, self.old_stderr
@override_system_checks([simple_system_check, tagged_system_check])
def test_simple_call(self):
call_command("check")
self.assertEqual(
simple_system_check.kwargs, {"app_configs": None, "databases": None}
)
self.assertEqual(
tagged_system_check.kwargs, {"app_configs": None, "databases": None}
)
@override_system_checks([simple_system_check, tagged_system_check])
def test_given_app(self):
call_command("check", "auth", "admin")
auth_config = apps.get_app_config("auth")
admin_config = apps.get_app_config("admin")
self.assertEqual(
simple_system_check.kwargs,
{"app_configs": [auth_config, admin_config], "databases": None},
)
self.assertEqual(
tagged_system_check.kwargs,
{"app_configs": [auth_config, admin_config], "databases": None},
)
@override_system_checks([simple_system_check, tagged_system_check])
def test_given_tag(self):
call_command("check", tags=["simpletag"])
self.assertIsNone(simple_system_check.kwargs)
self.assertEqual(
tagged_system_check.kwargs, {"app_configs": None, "databases": None}
)
@override_system_checks([simple_system_check, tagged_system_check])
def test_invalid_tag(self):
msg = 'There is no system check with the "missingtag" tag.'
with self.assertRaisesMessage(CommandError, msg):
call_command("check", tags=["missingtag"])
@override_system_checks([simple_system_check])
def test_list_tags_empty(self):
call_command("check", list_tags=True)
self.assertEqual("\n", sys.stdout.getvalue())
@override_system_checks([tagged_system_check])
def test_list_tags(self):
call_command("check", list_tags=True)
self.assertEqual("simpletag\n", sys.stdout.getvalue())
@override_system_checks(
[tagged_system_check], deployment_checks=[deployment_system_check]
)
def test_list_deployment_check_omitted(self):
call_command("check", list_tags=True)
self.assertEqual("simpletag\n", sys.stdout.getvalue())
@override_system_checks(
[tagged_system_check], deployment_checks=[deployment_system_check]
)
def test_list_deployment_check_included(self):
call_command("check", deploy=True, list_tags=True)
self.assertEqual("deploymenttag\nsimpletag\n", sys.stdout.getvalue())
@override_system_checks(
[tagged_system_check], deployment_checks=[deployment_system_check]
)
def test_tags_deployment_check_omitted(self):
msg = 'There is no system check with the "deploymenttag" tag.'
with self.assertRaisesMessage(CommandError, msg):
call_command("check", tags=["deploymenttag"])
@override_system_checks(
[tagged_system_check], deployment_checks=[deployment_system_check]
)
def test_tags_deployment_check_included(self):
call_command("check", deploy=True, tags=["deploymenttag"])
self.assertIn("Deployment Check", sys.stderr.getvalue())
@override_system_checks([tagged_system_check])
def test_fail_level(self):
with self.assertRaises(CommandError):
call_command("check", fail_level="WARNING")
def custom_error_system_check(app_configs, **kwargs):
return [Error("Error", id="myerrorcheck.E001")]
def custom_warning_system_check(app_configs, **kwargs):
return [Warning("Warning", id="mywarningcheck.E001")]
class SilencingCheckTests(SimpleTestCase):
def setUp(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.stdout, self.stderr = StringIO(), StringIO()
sys.stdout, sys.stderr = self.stdout, self.stderr
def tearDown(self):
sys.stdout, sys.stderr = self.old_stdout, self.old_stderr
@override_settings(SILENCED_SYSTEM_CHECKS=["myerrorcheck.E001"])
@override_system_checks([custom_error_system_check])
def test_silenced_error(self):
out = StringIO()
err = StringIO()
call_command("check", stdout=out, stderr=err)
self.assertEqual(
out.getvalue(), "System check identified no issues (1 silenced).\n"
)
self.assertEqual(err.getvalue(), "")
@override_settings(SILENCED_SYSTEM_CHECKS=["mywarningcheck.E001"])
@override_system_checks([custom_warning_system_check])
def test_silenced_warning(self):
out = StringIO()
err = StringIO()
call_command("check", stdout=out, stderr=err)
self.assertEqual(
out.getvalue(), "System check identified no issues (1 silenced).\n"
)
self.assertEqual(err.getvalue(), "")
class CheckFrameworkReservedNamesTests(SimpleTestCase):
@isolate_apps("check_framework", kwarg_name="apps")
@override_system_checks([checks.model_checks.check_all_models])
def test_model_check_method_not_shadowed(self, apps):
class ModelWithAttributeCalledCheck(models.Model):
check = 42
class ModelWithFieldCalledCheck(models.Model):
check = models.IntegerField()
class ModelWithRelatedManagerCalledCheck(models.Model):
pass
class ModelWithDescriptorCalledCheck(models.Model):
check = models.ForeignKey(
ModelWithRelatedManagerCalledCheck, models.CASCADE
)
article = models.ForeignKey(
ModelWithRelatedManagerCalledCheck,
models.CASCADE,
related_name="check",
)
errors = checks.run_checks(app_configs=apps.get_app_configs())
expected = [
Error(
"The 'ModelWithAttributeCalledCheck.check()' class method is "
"currently overridden by 42.",
obj=ModelWithAttributeCalledCheck,
id="models.E020",
),
Error(
"The 'ModelWithFieldCalledCheck.check()' class method is "
"currently overridden by %r." % ModelWithFieldCalledCheck.check,
obj=ModelWithFieldCalledCheck,
id="models.E020",
),
Error(
"The 'ModelWithRelatedManagerCalledCheck.check()' class method is "
"currently overridden by %r."
% ModelWithRelatedManagerCalledCheck.check,
obj=ModelWithRelatedManagerCalledCheck,
id="models.E020",
),
Error(
"The 'ModelWithDescriptorCalledCheck.check()' class method is "
"currently overridden by %r." % ModelWithDescriptorCalledCheck.check,
obj=ModelWithDescriptorCalledCheck,
id="models.E020",
),
]
self.assertEqual(errors, expected)
class ChecksRunDuringTests(SimpleTestCase):
databases = "__all__"
def test_registered_check_did_run(self):
self.assertTrue(my_check.did_run)
|
60d937af4062534fb8cdfc1dee338f6303968a747a8d3f9c90338d907f14d4b4 | import operator
import uuid
from unittest import mock
from django import forms
from django.core import serializers
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.db import (
DataError,
IntegrityError,
NotSupportedError,
OperationalError,
connection,
models,
)
from django.db.models import (
Count,
ExpressionWrapper,
F,
IntegerField,
OuterRef,
Q,
Subquery,
Transform,
Value,
)
from django.db.models.expressions import RawSQL
from django.db.models.fields.json import (
KeyTextTransform,
KeyTransform,
KeyTransformFactory,
KeyTransformTextLookupMixin,
)
from django.db.models.functions import Cast
from django.test import SimpleTestCase, TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import CustomJSONDecoder, JSONModel, NullableJSONModel, RelatedJSONModel
@skipUnlessDBFeature("supports_json_field")
class JSONFieldTests(TestCase):
def test_invalid_value(self):
msg = "is not JSON serializable"
with self.assertRaisesMessage(TypeError, msg):
NullableJSONModel.objects.create(
value={
"uuid": uuid.UUID("d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475"),
}
)
def test_custom_encoder_decoder(self):
value = {"uuid": uuid.UUID("{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}")}
obj = NullableJSONModel(value_custom=value)
obj.clean_fields()
obj.save()
obj.refresh_from_db()
self.assertEqual(obj.value_custom, value)
def test_db_check_constraints(self):
value = "{@!invalid json value 123 $!@#"
with mock.patch.object(DjangoJSONEncoder, "encode", return_value=value):
with self.assertRaises((IntegrityError, DataError, OperationalError)):
NullableJSONModel.objects.create(value_custom=value)
class TestMethods(SimpleTestCase):
def test_deconstruct(self):
field = models.JSONField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.JSONField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_deconstruct_custom_encoder_decoder(self):
field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs["encoder"], DjangoJSONEncoder)
self.assertEqual(kwargs["decoder"], CustomJSONDecoder)
def test_get_transforms(self):
@models.JSONField.register_lookup
class MyTransform(Transform):
lookup_name = "my_transform"
field = models.JSONField()
transform = field.get_transform("my_transform")
self.assertIs(transform, MyTransform)
models.JSONField._unregister_lookup(MyTransform)
models.JSONField._clear_cached_lookups()
transform = field.get_transform("my_transform")
self.assertIsInstance(transform, KeyTransformFactory)
def test_key_transform_text_lookup_mixin_non_key_transform(self):
transform = Transform("test")
msg = (
"Transform should be an instance of KeyTransform in order to use "
"this lookup."
)
with self.assertRaisesMessage(TypeError, msg):
KeyTransformTextLookupMixin(transform)
class TestValidation(SimpleTestCase):
def test_invalid_encoder(self):
msg = "The encoder parameter must be a callable object."
with self.assertRaisesMessage(ValueError, msg):
models.JSONField(encoder=DjangoJSONEncoder())
def test_invalid_decoder(self):
msg = "The decoder parameter must be a callable object."
with self.assertRaisesMessage(ValueError, msg):
models.JSONField(decoder=CustomJSONDecoder())
def test_validation_error(self):
field = models.JSONField()
msg = "Value must be valid JSON."
value = uuid.UUID("{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}")
with self.assertRaisesMessage(ValidationError, msg):
field.clean({"uuid": value}, None)
def test_custom_encoder(self):
field = models.JSONField(encoder=DjangoJSONEncoder)
value = uuid.UUID("{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}")
field.clean({"uuid": value}, None)
class TestFormField(SimpleTestCase):
def test_formfield(self):
model_field = models.JSONField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.JSONField)
def test_formfield_custom_encoder_decoder(self):
model_field = models.JSONField(
encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder
)
form_field = model_field.formfield()
self.assertIs(form_field.encoder, DjangoJSONEncoder)
self.assertIs(form_field.decoder, CustomJSONDecoder)
class TestSerialization(SimpleTestCase):
test_data = (
'[{"fields": {"value": %s}, "model": "model_fields.jsonmodel", "pk": null}]'
)
test_values = (
# (Python value, serialized value),
({"a": "b", "c": None}, '{"a": "b", "c": null}'),
("abc", '"abc"'),
('{"a": "a"}', '"{\\"a\\": \\"a\\"}"'),
)
def test_dumping(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = JSONModel(value=value)
data = serializers.serialize("json", [instance])
self.assertJSONEqual(data, self.test_data % serialized)
def test_loading(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = list(
serializers.deserialize("json", self.test_data % serialized)
)[0].object
self.assertEqual(instance.value, value)
def test_xml_serialization(self):
test_xml_data = (
'<django-objects version="1.0">'
'<object model="model_fields.nullablejsonmodel">'
'<field name="value" type="JSONField">%s'
"</field></object></django-objects>"
)
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = NullableJSONModel(value=value)
data = serializers.serialize("xml", [instance], fields=["value"])
self.assertXMLEqual(data, test_xml_data % serialized)
new_instance = list(serializers.deserialize("xml", data))[0].object
self.assertEqual(new_instance.value, instance.value)
@skipUnlessDBFeature("supports_json_field")
class TestSaveLoad(TestCase):
def test_null(self):
obj = NullableJSONModel(value=None)
obj.save()
obj.refresh_from_db()
self.assertIsNone(obj.value)
@skipUnlessDBFeature("supports_primitives_in_json_field")
def test_json_null_different_from_sql_null(self):
json_null = NullableJSONModel.objects.create(value=Value("null"))
json_null.refresh_from_db()
sql_null = NullableJSONModel.objects.create(value=None)
sql_null.refresh_from_db()
# 'null' is not equal to NULL in the database.
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value=Value("null")),
[json_null],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value=None),
[json_null],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__isnull=True),
[sql_null],
)
# 'null' is equal to NULL in Python (None).
self.assertEqual(json_null.value, sql_null.value)
@skipUnlessDBFeature("supports_primitives_in_json_field")
def test_primitives(self):
values = [
True,
1,
1.45,
"String",
"",
]
for value in values:
with self.subTest(value=value):
obj = JSONModel(value=value)
obj.save()
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_dict(self):
values = [
{},
{"name": "John", "age": 20, "height": 180.3},
{"a": True, "b": {"b1": False, "b2": None}},
]
for value in values:
with self.subTest(value=value):
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_list(self):
values = [
[],
["John", 20, 180.3],
[True, [False, None]],
]
for value in values:
with self.subTest(value=value):
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_realistic_object(self):
value = {
"name": "John",
"age": 20,
"pets": [
{"name": "Kit", "type": "cat", "age": 2},
{"name": "Max", "type": "dog", "age": 1},
],
"courses": [
["A1", "A2", "A3"],
["B1", "B2"],
["C1"],
],
}
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
@skipUnlessDBFeature("supports_json_field")
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.primitives = [True, False, "yes", 7, 9.6]
values = [
None,
[],
{},
{"a": "b", "c": 14},
{
"a": "b",
"c": 14,
"d": ["e", {"f": "g"}],
"h": True,
"i": False,
"j": None,
"k": {"l": "m"},
"n": [None, True, False],
"o": '"quoted"',
"p": 4.2,
"r": {"s": True, "t": False},
},
[1, [2]],
{"k": True, "l": False, "foo": "bax"},
{
"foo": "bar",
"baz": {"a": "b", "c": "d"},
"bar": ["foo", "bar"],
"bax": {"foo": "bar"},
},
]
cls.objs = [NullableJSONModel.objects.create(value=value) for value in values]
if connection.features.supports_primitives_in_json_field:
cls.objs.extend(
[
NullableJSONModel.objects.create(value=value)
for value in cls.primitives
]
)
cls.raw_sql = "%s::jsonb" if connection.vendor == "postgresql" else "%s"
def test_exact(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={}),
[self.objs[2]],
)
def test_exact_complex(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={"a": "b", "c": 14}),
[self.objs[3]],
)
def test_icontains(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__icontains="BaX"),
self.objs[6:8],
)
def test_isnull(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__isnull=True),
[self.objs[0]],
)
def test_ordering_by_transform(self):
mariadb = connection.vendor == "mysql" and connection.mysql_is_mariadb
values = [
{"ord": 93, "name": "bar"},
{"ord": 22.1, "name": "foo"},
{"ord": -1, "name": "baz"},
{"ord": 21.931902, "name": "spam"},
{"ord": -100291029, "name": "eggs"},
]
for field_name in ["value", "value_custom"]:
with self.subTest(field=field_name):
objs = [
NullableJSONModel.objects.create(**{field_name: value})
for value in values
]
query = NullableJSONModel.objects.filter(
**{"%s__name__isnull" % field_name: False},
).order_by("%s__ord" % field_name)
expected = [objs[4], objs[2], objs[3], objs[1], objs[0]]
if mariadb or connection.vendor == "oracle":
# MariaDB and Oracle return JSON values as strings.
expected = [objs[2], objs[4], objs[3], objs[1], objs[0]]
self.assertSequenceEqual(query, expected)
def test_ordering_grouping_by_key_transform(self):
base_qs = NullableJSONModel.objects.filter(value__d__0__isnull=False)
for qs in (
base_qs.order_by("value__d__0"),
base_qs.annotate(
key=KeyTransform("0", KeyTransform("d", "value"))
).order_by("key"),
):
self.assertSequenceEqual(qs, [self.objs[4]])
qs = NullableJSONModel.objects.filter(value__isnull=False)
self.assertQuerysetEqual(
qs.filter(value__isnull=False)
.annotate(
key=KeyTextTransform(
"f", KeyTransform("1", KeyTransform("d", "value"))
),
)
.values("key")
.annotate(count=Count("key"))
.order_by("count"),
[(None, 0), ("g", 1)],
operator.itemgetter("key", "count"),
)
def test_ordering_grouping_by_count(self):
qs = (
NullableJSONModel.objects.filter(
value__isnull=False,
)
.values("value__d__0")
.annotate(count=Count("value__d__0"))
.order_by("count")
)
self.assertQuerysetEqual(qs, [0, 1], operator.itemgetter("count"))
def test_order_grouping_custom_decoder(self):
NullableJSONModel.objects.create(value_custom={"a": "b"})
qs = NullableJSONModel.objects.filter(value_custom__isnull=False)
self.assertSequenceEqual(
qs.values(
"value_custom__a",
)
.annotate(
count=Count("id"),
)
.order_by("value_custom__a"),
[{"value_custom__a": "b", "count": 1}],
)
def test_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": "bar"}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__foo=KeyTransform("x", expr)),
[self.objs[7]],
)
def test_nested_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": {"y": "bar"}}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(
value__foo=KeyTransform("y", KeyTransform("x", expr))
),
[self.objs[7]],
)
def test_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=KeyTransform("d", "value"),
chain=KeyTransform("0", "key"),
expr=KeyTransform("0", Cast("key", models.JSONField())),
)
.filter(chain=F("expr")),
[self.objs[4]],
)
def test_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(value={"d": ["e", "e"]})
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=F("value__d"),
chain=F("key__0"),
expr=Cast("key", models.JSONField()),
)
.filter(chain=F("expr__1")),
[obj],
)
def test_nested_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=KeyTransform("d", "value"),
chain=KeyTransform("f", KeyTransform("1", "key")),
expr=KeyTransform(
"f", KeyTransform("1", Cast("key", models.JSONField()))
),
)
.filter(chain=F("expr")),
[self.objs[4]],
)
def test_nested_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(
value={"d": ["e", {"f": "g"}, {"f": "g"}]},
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=F("value__d"),
chain=F("key__1__f"),
expr=Cast("key", models.JSONField()),
)
.filter(chain=F("expr__2__f")),
[obj],
)
def test_nested_key_transform_on_subquery(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
subquery_value=Subquery(
NullableJSONModel.objects.filter(pk=OuterRef("pk")).values("value")
),
key=KeyTransform("d", "subquery_value"),
chain=KeyTransform("f", KeyTransform("1", "key")),
)
.filter(chain="g"),
[self.objs[4]],
)
def test_expression_wrapper_key_transform(self):
self.assertSequenceEqual(
NullableJSONModel.objects.annotate(
expr=ExpressionWrapper(
KeyTransform("c", "value"),
output_field=IntegerField(),
),
).filter(expr__isnull=False),
self.objs[3:5],
)
def test_has_key(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_key="a"),
[self.objs[3], self.objs[4]],
)
def test_has_key_null_value(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_key="j"),
[self.objs[4]],
)
def test_has_key_deep(self):
tests = [
(Q(value__baz__has_key="a"), self.objs[7]),
(
Q(value__has_key=KeyTransform("a", KeyTransform("baz", "value"))),
self.objs[7],
),
(Q(value__has_key=F("value__baz__a")), self.objs[7]),
(
Q(value__has_key=KeyTransform("c", KeyTransform("baz", "value"))),
self.objs[7],
),
(Q(value__has_key=F("value__baz__c")), self.objs[7]),
(Q(value__d__1__has_key="f"), self.objs[4]),
(
Q(
value__has_key=KeyTransform(
"f", KeyTransform("1", KeyTransform("d", "value"))
)
),
self.objs[4],
),
(Q(value__has_key=F("value__d__1__f")), self.objs[4]),
]
for condition, expected in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[expected],
)
def test_has_key_list(self):
obj = NullableJSONModel.objects.create(value=[{"a": 1}, {"b": "x"}])
tests = [
Q(value__1__has_key="b"),
Q(value__has_key=KeyTransform("b", KeyTransform(1, "value"))),
Q(value__has_key=KeyTransform("b", KeyTransform("1", "value"))),
Q(value__has_key=F("value__1__b")),
]
for condition in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[obj],
)
def test_has_keys(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_keys=["a", "c", "h"]),
[self.objs[4]],
)
def test_has_any_keys(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_any_keys=["c", "l"]),
[self.objs[3], self.objs[4], self.objs[6]],
)
def test_has_key_number(self):
obj = NullableJSONModel.objects.create(
value={
"123": "value",
"nested": {"456": "bar", "lorem": "abc", "999": True},
"array": [{"789": "baz", "777": "def", "ipsum": 200}],
"000": "val",
}
)
tests = [
Q(value__has_key="123"),
Q(value__nested__has_key="456"),
Q(value__array__0__has_key="789"),
Q(value__has_keys=["nested", "123", "array", "000"]),
Q(value__nested__has_keys=["lorem", "999", "456"]),
Q(value__array__0__has_keys=["789", "ipsum", "777"]),
Q(value__has_any_keys=["000", "nonexistent"]),
Q(value__nested__has_any_keys=["999", "nonexistent"]),
Q(value__array__0__has_any_keys=["777", "nonexistent"]),
]
for condition in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[obj],
)
@skipUnlessDBFeature("supports_json_field_contains")
def test_contains(self):
tests = [
({}, self.objs[2:5] + self.objs[6:8]),
({"baz": {"a": "b", "c": "d"}}, [self.objs[7]]),
({"baz": {"a": "b"}}, [self.objs[7]]),
({"baz": {"c": "d"}}, [self.objs[7]]),
({"k": True, "l": False}, [self.objs[6]]),
({"d": ["e", {"f": "g"}]}, [self.objs[4]]),
({"d": ["e"]}, [self.objs[4]]),
({"d": [{"f": "g"}]}, [self.objs[4]]),
([1, [2]], [self.objs[5]]),
([1], [self.objs[5]]),
([[2]], [self.objs[5]]),
({"n": [None, True, False]}, [self.objs[4]]),
({"j": None}, [self.objs[4]]),
]
for value, expected in tests:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertSequenceEqual(qs, expected)
@skipIfDBFeature("supports_json_field_contains")
def test_contains_unsupported(self):
msg = "contains lookup is not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(
value__contains={"baz": {"a": "b", "c": "d"}},
).get()
@skipUnlessDBFeature(
"supports_primitives_in_json_field",
"supports_json_field_contains",
)
def test_contains_primitives(self):
for value in self.primitives:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertIs(qs.exists(), True)
@skipUnlessDBFeature("supports_json_field_contains")
def test_contained_by(self):
qs = NullableJSONModel.objects.filter(
value__contained_by={"a": "b", "c": 14, "h": True}
)
self.assertSequenceEqual(qs, self.objs[2:4])
@skipIfDBFeature("supports_json_field_contains")
def test_contained_by_unsupported(self):
msg = "contained_by lookup is not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(value__contained_by={"a": "b"}).get()
def test_deep_values(self):
qs = NullableJSONModel.objects.values_list("value__k__l")
expected_objs = [(None,)] * len(self.objs)
expected_objs[4] = ("m",)
self.assertSequenceEqual(qs, expected_objs)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_deep_distinct(self):
query = NullableJSONModel.objects.distinct("value__k__l").values_list(
"value__k__l"
)
self.assertSequenceEqual(query, [("m",), (None,)])
def test_isnull_key(self):
# key__isnull=False works the same as has_key='key'.
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a__isnull=True),
self.objs[:3] + self.objs[5:],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j__isnull=True),
self.objs[:4] + self.objs[5:],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a__isnull=False),
[self.objs[3], self.objs[4]],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j__isnull=False),
[self.objs[4]],
)
def test_isnull_key_or_none(self):
obj = NullableJSONModel.objects.create(value={"a": None})
self.assertSequenceEqual(
NullableJSONModel.objects.filter(
Q(value__a__isnull=True) | Q(value__a=None)
),
self.objs[:3] + self.objs[5:] + [obj],
)
def test_none_key(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j=None),
[self.objs[4]],
)
def test_none_key_exclude(self):
obj = NullableJSONModel.objects.create(value={"j": 1})
if connection.vendor == "oracle":
# Oracle supports filtering JSON objects with NULL keys, but the
# current implementation doesn't support it.
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(value__j=None),
self.objs[1:4] + self.objs[5:] + [obj],
)
else:
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(value__j=None), [obj]
)
def test_shallow_list_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__0=1),
[self.objs[5]],
)
def test_shallow_obj_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a="b"),
[self.objs[3], self.objs[4]],
)
def test_obj_subquery_lookup(self):
qs = NullableJSONModel.objects.annotate(
field=Subquery(
NullableJSONModel.objects.filter(pk=OuterRef("pk")).values("value")
),
).filter(field__a="b")
self.assertSequenceEqual(qs, [self.objs[3], self.objs[4]])
def test_deep_lookup_objs(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k__l="m"),
[self.objs[4]],
)
def test_shallow_lookup_obj_target(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k={"l": "m"}),
[self.objs[4]],
)
def test_deep_lookup_array(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__1__0=2),
[self.objs[5]],
)
def test_deep_lookup_mixed(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__1__f="g"),
[self.objs[4]],
)
def test_deep_lookup_transform(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__c__gt=2),
[self.objs[3], self.objs[4]],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__c__gt=2.33),
[self.objs[3], self.objs[4]],
)
self.assertIs(NullableJSONModel.objects.filter(value__c__lt=5).exists(), False)
def test_lookup_exclude(self):
tests = [
(Q(value__a="b"), [self.objs[0]]),
(Q(value__foo="bax"), [self.objs[0], self.objs[7]]),
]
for condition, expected in tests:
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(condition),
expected,
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(~condition),
expected,
)
def test_lookup_exclude_nonexistent_key(self):
# Values without the key are ignored.
condition = Q(value__foo="bax")
objs_with_value = [self.objs[6]]
objs_with_different_value = [self.objs[0], self.objs[7]]
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(condition),
objs_with_different_value,
)
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(~condition),
objs_with_value,
)
self.assertCountEqual(
NullableJSONModel.objects.filter(condition | ~condition),
objs_with_value + objs_with_different_value,
)
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition & ~condition),
objs_with_value + objs_with_different_value,
)
# Add the __isnull lookup to get an exhaustive set.
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(condition & Q(value__foo__isnull=False)),
self.objs[0:6] + self.objs[7:],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition & Q(value__foo__isnull=False)),
objs_with_value,
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(
id__in=NullableJSONModel.objects.filter(value__c=14),
),
self.objs[3:5],
)
@skipUnlessDBFeature("supports_json_field_contains")
def test_array_key_contains(self):
tests = [
([], [self.objs[7]]),
("bar", [self.objs[7]]),
(["bar"], [self.objs[7]]),
("ar", []),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__bar__contains=value),
expected,
)
def test_key_iexact(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iexact="BaR").exists(), True
)
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iexact='"BaR"').exists(), False
)
def test_key_in(self):
tests = [
("value__c__in", [14], self.objs[3:5]),
("value__c__in", [14, 15], self.objs[3:5]),
("value__0__in", [1], [self.objs[5]]),
("value__0__in", [1, 3], [self.objs[5]]),
("value__foo__in", ["bar"], [self.objs[7]]),
(
"value__foo__in",
[KeyTransform("foo", KeyTransform("bax", "value"))],
[self.objs[7]],
),
("value__foo__in", [F("value__bax__foo")], [self.objs[7]]),
(
"value__foo__in",
[KeyTransform("foo", KeyTransform("bax", "value")), "baz"],
[self.objs[7]],
),
("value__foo__in", [F("value__bax__foo"), "baz"], [self.objs[7]]),
("value__foo__in", ["bar", "baz"], [self.objs[7]]),
("value__bar__in", [["foo", "bar"]], [self.objs[7]]),
("value__bar__in", [["foo", "bar"], ["a"]], [self.objs[7]]),
("value__bax__in", [{"foo": "bar"}, {"a": "b"}], [self.objs[7]]),
("value__h__in", [True, "foo"], [self.objs[4]]),
("value__i__in", [False, "foo"], [self.objs[4]]),
]
for lookup, value, expected in tests:
with self.subTest(lookup=lookup, value=value):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(**{lookup: value}),
expected,
)
def test_key_values(self):
qs = NullableJSONModel.objects.filter(value__h=True)
tests = [
("value__a", "b"),
("value__c", 14),
("value__d", ["e", {"f": "g"}]),
("value__h", True),
("value__i", False),
("value__j", None),
("value__k", {"l": "m"}),
("value__n", [None, True, False]),
("value__p", 4.2),
("value__r", {"s": True, "t": False}),
]
for lookup, expected in tests:
with self.subTest(lookup=lookup):
self.assertEqual(qs.values_list(lookup, flat=True).get(), expected)
def test_key_values_boolean(self):
qs = NullableJSONModel.objects.filter(value__h=True, value__i=False)
tests = [
("value__h", True),
("value__i", False),
]
for lookup, expected in tests:
with self.subTest(lookup=lookup):
self.assertIs(qs.values_list(lookup, flat=True).get(), expected)
@skipUnlessDBFeature("supports_json_field_contains")
def test_key_contains(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__contains="ar").exists(), False
)
self.assertIs(
NullableJSONModel.objects.filter(value__foo__contains="bar").exists(), True
)
def test_key_icontains(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__icontains="Ar").exists(), True
)
def test_key_startswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__startswith="b").exists(), True
)
def test_key_istartswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__istartswith="B").exists(), True
)
def test_key_endswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__endswith="r").exists(), True
)
def test_key_iendswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iendswith="R").exists(), True
)
def test_key_regex(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__regex=r"^bar$").exists(), True
)
def test_key_iregex(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iregex=r"^bAr$").exists(), True
)
def test_key_quoted_string(self):
self.assertEqual(
NullableJSONModel.objects.filter(value__o='"quoted"').get(),
self.objs[4],
)
@skipUnlessDBFeature("has_json_operators")
def test_key_sql_injection(self):
with CaptureQueriesContext(connection) as queries:
self.assertIs(
NullableJSONModel.objects.filter(
**{
"""value__test' = '"a"') OR 1 = 1 OR ('d""": "x",
}
).exists(),
False,
)
self.assertIn(
"""."value" -> 'test'' = ''"a"'') OR 1 = 1 OR (''d') = '"x"' """,
queries[0]["sql"],
)
@skipIfDBFeature("has_json_operators")
def test_key_sql_injection_escape(self):
query = str(
JSONModel.objects.filter(
**{
"""value__test") = '"a"' OR 1 = 1 OR ("d""": "x",
}
).query
)
self.assertIn('"test\\"', query)
self.assertIn('\\"d', query)
def test_key_escape(self):
obj = NullableJSONModel.objects.create(value={"%total": 10})
self.assertEqual(
NullableJSONModel.objects.filter(**{"value__%total": 10}).get(), obj
)
def test_none_key_and_exact_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a="b", value__j=None),
[self.objs[4]],
)
def test_lookups_with_key_transform(self):
tests = (
("value__baz__has_key", "c"),
("value__baz__has_keys", ["a", "c"]),
("value__baz__has_any_keys", ["a", "x"]),
("value__has_key", KeyTextTransform("foo", "value")),
)
for lookup, value in tests:
with self.subTest(lookup=lookup):
self.assertIs(
NullableJSONModel.objects.filter(
**{lookup: value},
).exists(),
True,
)
@skipUnlessDBFeature("supports_json_field_contains")
def test_contains_contained_by_with_key_transform(self):
tests = [
("value__d__contains", "e"),
("value__d__contains", [{"f": "g"}]),
("value__contains", KeyTransform("bax", "value")),
("value__contains", F("value__bax")),
("value__baz__contains", {"a": "b"}),
("value__baz__contained_by", {"a": "b", "c": "d", "e": "f"}),
(
"value__contained_by",
KeyTransform(
"x",
RawSQL(
self.raw_sql,
['{"x": {"a": "b", "c": 1, "d": "e"}}'],
),
),
),
]
# For databases where {'f': 'g'} (without surrounding []) matches
# [{'f': 'g'}].
if not connection.features.json_key_contains_list_matching_requires_list:
tests.append(("value__d__contains", {"f": "g"}))
for lookup, value in tests:
with self.subTest(lookup=lookup, value=value):
self.assertIs(
NullableJSONModel.objects.filter(
**{lookup: value},
).exists(),
True,
)
def test_join_key_transform_annotation_expression(self):
related_obj = RelatedJSONModel.objects.create(
value={"d": ["f", "e"]},
json_model=self.objs[4],
)
RelatedJSONModel.objects.create(
value={"d": ["e", "f"]},
json_model=self.objs[4],
)
self.assertSequenceEqual(
RelatedJSONModel.objects.annotate(
key=F("value__d"),
related_key=F("json_model__value__d"),
chain=F("key__1"),
expr=Cast("key", models.JSONField()),
).filter(chain=F("related_key__0")),
[related_obj],
)
|
02a83d32c27a40cb7750bb679a3fcce9b420dbdd54d73b8c576f98e1d18acb55 | from django import forms
from django.contrib import admin
from django.contrib.admin import AdminSite
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.contenttypes.admin import GenericStackedInline
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.core import checks
from django.test import SimpleTestCase, override_settings
from .models import Album, Author, Book, City, Influence, Song, State, TwoAlbumFKAndAnE
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ["title"]
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(
None,
{
"fields": ("name",),
},
),
)
class MyAdmin(admin.ModelAdmin):
def check(self, **kwargs):
return ["error!"]
class AuthenticationMiddlewareSubclass(AuthenticationMiddleware):
pass
class MessageMiddlewareSubclass(MessageMiddleware):
pass
class ModelBackendSubclass(ModelBackend):
pass
class SessionMiddlewareSubclass(SessionMiddleware):
pass
@override_settings(
SILENCED_SYSTEM_CHECKS=["fields.W342"], # ForeignKey(unique=True)
INSTALLED_APPS=[
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"admin_checks",
],
)
class SystemChecksTestCase(SimpleTestCase):
databases = "__all__"
def test_checks_are_performed(self):
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ["error!"]
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
@override_settings(INSTALLED_APPS=["django.contrib.admin"])
def test_apps_dependencies(self):
errors = admin.checks.check_dependencies()
expected = [
checks.Error(
"'django.contrib.contenttypes' must be in "
"INSTALLED_APPS in order to use the admin application.",
id="admin.E401",
),
checks.Error(
"'django.contrib.auth' must be in INSTALLED_APPS in order "
"to use the admin application.",
id="admin.E405",
),
checks.Error(
"'django.contrib.messages' must be in INSTALLED_APPS in order "
"to use the admin application.",
id="admin.E406",
),
]
self.assertEqual(errors, expected)
@override_settings(TEMPLATES=[])
def test_no_template_engines(self):
self.assertEqual(
admin.checks.check_dependencies(),
[
checks.Error(
"A 'django.template.backends.django.DjangoTemplates' "
"instance must be configured in TEMPLATES in order to use "
"the admin application.",
id="admin.E403",
)
],
)
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [],
},
}
],
)
def test_context_processor_dependencies(self):
expected = [
checks.Error(
"'django.contrib.auth.context_processors.auth' must be "
"enabled in DjangoTemplates (TEMPLATES) if using the default "
"auth backend in order to use the admin application.",
id="admin.E402",
),
checks.Error(
"'django.contrib.messages.context_processors.messages' must "
"be enabled in DjangoTemplates (TEMPLATES) in order to use "
"the admin application.",
id="admin.E404",
),
checks.Warning(
"'django.template.context_processors.request' must be enabled "
"in DjangoTemplates (TEMPLATES) in order to use the admin "
"navigation sidebar.",
id="admin.W411",
),
]
self.assertEqual(admin.checks.check_dependencies(), expected)
# The first error doesn't happen if
# 'django.contrib.auth.backends.ModelBackend' isn't in
# AUTHENTICATION_BACKENDS.
with self.settings(AUTHENTICATION_BACKENDS=[]):
self.assertEqual(admin.checks.check_dependencies(), expected[1:])
@override_settings(
AUTHENTICATION_BACKENDS=["admin_checks.tests.ModelBackendSubclass"],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
def test_context_processor_dependencies_model_backend_subclass(self):
self.assertEqual(
admin.checks.check_dependencies(),
[
checks.Error(
"'django.contrib.auth.context_processors.auth' must be "
"enabled in DjangoTemplates (TEMPLATES) if using the default "
"auth backend in order to use the admin application.",
id="admin.E402",
),
],
)
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.dummy.TemplateStrings",
"DIRS": [],
"APP_DIRS": True,
},
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
],
)
def test_several_templates_backends(self):
self.assertEqual(admin.checks.check_dependencies(), [])
@override_settings(MIDDLEWARE=[])
def test_middleware_dependencies(self):
errors = admin.checks.check_dependencies()
expected = [
checks.Error(
"'django.contrib.auth.middleware.AuthenticationMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
id="admin.E408",
),
checks.Error(
"'django.contrib.messages.middleware.MessageMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
id="admin.E409",
),
checks.Error(
"'django.contrib.sessions.middleware.SessionMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
hint=(
"Insert "
"'django.contrib.sessions.middleware.SessionMiddleware' "
"before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
),
id="admin.E410",
),
]
self.assertEqual(errors, expected)
@override_settings(
MIDDLEWARE=[
"admin_checks.tests.AuthenticationMiddlewareSubclass",
"admin_checks.tests.MessageMiddlewareSubclass",
"admin_checks.tests.SessionMiddlewareSubclass",
]
)
def test_middleware_subclasses(self):
self.assertEqual(admin.checks.check_dependencies(), [])
@override_settings(
MIDDLEWARE=[
"django.contrib.does.not.Exist",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
]
)
def test_admin_check_ignores_import_error_in_middleware(self):
self.assertEqual(admin.checks.check_dependencies(), [])
def test_custom_adminsite(self):
class CustomAdminSite(admin.AdminSite):
pass
custom_site = CustomAdminSite()
custom_site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ["error!"]
self.assertEqual(errors, expected)
finally:
custom_site.unregister(Song)
def test_allows_checks_relying_on_other_modeladmins(self):
class MyBookAdmin(admin.ModelAdmin):
def check(self, **kwargs):
errors = super().check(**kwargs)
author_admin = self.admin_site._registry.get(Author)
if author_admin is None:
errors.append("AuthorAdmin missing!")
return errors
class MyAuthorAdmin(admin.ModelAdmin):
pass
admin.site.register(Book, MyBookAdmin)
admin.site.register(Author, MyAuthorAdmin)
try:
self.assertEqual(admin.site.check(None), [])
finally:
admin.site.unregister(Book)
admin.site.unregister(Author)
def test_field_name_not_in_list_display(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ["original_release"]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not contained in 'list_display'.",
obj=SongAdmin,
id="admin.E122",
)
]
self.assertEqual(errors, expected)
def test_list_editable_not_a_list_or_tuple(self):
class SongAdmin(admin.ModelAdmin):
list_editable = "test"
self.assertEqual(
SongAdmin(Song, AdminSite()).check(),
[
checks.Error(
"The value of 'list_editable' must be a list or tuple.",
obj=SongAdmin,
id="admin.E120",
)
],
)
def test_list_editable_missing_field(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ("test",)
self.assertEqual(
SongAdmin(Song, AdminSite()).check(),
[
checks.Error(
"The value of 'list_editable[0]' refers to 'test', which is "
"not a field of 'admin_checks.Song'.",
obj=SongAdmin,
id="admin.E121",
)
],
)
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(
None,
{
"fields": ["title", "original_release"],
},
),
]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin.",
obj=SongAdmin,
id="admin.E125",
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(
None,
{
"fields": ["title", "original_release"],
},
),
]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
The fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
"""
errors = ValidFormFieldsets(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_fieldsets_fields_non_tuple(self):
"""
The first fieldset's fields must be a list/tuple.
"""
class NotATupleAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {"fields": "title"}), # not a tuple
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[0][1]['fields']' must be a list or tuple.",
obj=NotATupleAdmin,
id="admin.E008",
)
]
self.assertEqual(errors, expected)
def test_nonfirst_fieldset(self):
"""
The second fieldset's fields must be a list/tuple.
"""
class NotATupleAdmin(admin.ModelAdmin):
fieldsets = [
(None, {"fields": ("title",)}),
("foo", {"fields": "author"}), # not a tuple
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1]['fields']' must be a list or tuple.",
obj=NotATupleAdmin,
id="admin.E008",
)
]
self.assertEqual(errors, expected)
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = "foo"
errors = ExcludedFields1(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
obj=ExcludedFields1,
id="admin.E014",
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ("name", "name")
errors = ExcludedFields2(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
obj=ExcludedFields2,
id="admin.E015",
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = "foo"
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
obj=ExcludedFieldsInline,
id="admin.E014",
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ["album"]
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'.",
obj=SongInline,
id="admin.E201",
)
]
self.assertEqual(errors, expected)
def test_valid_generic_inline_model_admin(self):
"""
Regression test for #22034 - check that generic inlines don't look for
normal ForeignKey relations.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_generic_inline_model_admin_non_generic_model(self):
"""
A model without a GenericForeignKey raises problems if it's included
in a GenericInlineModelAdmin definition.
"""
class BookInline(GenericStackedInline):
model = Book
class SongAdmin(admin.ModelAdmin):
inlines = [BookInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Book' has no GenericForeignKey.",
obj=BookInline,
id="admin.E301",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_ct_field(self):
"""
A GenericInlineModelAdmin errors if the ct_field points to a
nonexistent field.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = "nonexistent"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_field' references 'nonexistent', which is not a field on "
"'admin_checks.Influence'.",
obj=InfluenceInline,
id="admin.E302",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_fk_field(self):
"""
A GenericInlineModelAdmin errors if the ct_fk_field points to a
nonexistent field.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = "nonexistent"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_fk_field' references 'nonexistent', which is not a field on "
"'admin_checks.Influence'.",
obj=InfluenceInline,
id="admin.E303",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_ct_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_field points to a
field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = "name"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'name' and object ID field 'object_id'.",
obj=InfluenceInline,
id="admin.E304",
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_fk_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_fk_field points to
a field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = "name"
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'content_type' and object ID field 'name'.",
obj=InfluenceInline,
id="admin.E304",
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
class RawIdNonexistentAdmin(admin.ModelAdmin):
raw_id_fields = ("nonexistent",)
errors = RawIdNonexistentAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'raw_id_fields[0]' refers to 'nonexistent', "
"which is not a field of 'admin_checks.Album'.",
obj=RawIdNonexistentAdmin,
id="admin.E002",
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey "
"to 'admin_checks.Album'. You must specify a 'fk_name' "
"attribute.",
obj=TwoAlbumFKAndAnEInline,
id="admin.E202",
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_inlines_property(self):
class CitiesInline(admin.TabularInline):
model = City
class StateAdmin(admin.ModelAdmin):
@property
def inlines(self):
return [CitiesInline]
errors = StateAdmin(State, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_method(self):
@admin.display
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
@admin.display
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_dynamic_attribute_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("dynamic_method",)
def __getattr__(self, item):
if item == "dynamic_method":
@admin.display
def method(obj):
pass
return method
raise AttributeError
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_nonexistent_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistent")
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'.",
obj=SongAdmin,
id="admin.E035",
)
]
self.assertEqual(errors, expected)
def test_nonexistent_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ["i_dont_exist"] # Missing attribute
errors = CityInline(State, AdminSite()).check()
expected = [
checks.Error(
"The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'.",
obj=CityInline,
id="admin.E035",
)
]
self.assertEqual(errors, expected)
def test_readonly_fields_not_list_or_tuple(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = "test"
self.assertEqual(
SongAdmin(Song, AdminSite()).check(),
[
checks.Error(
"The value of 'readonly_fields' must be a list or tuple.",
obj=SongAdmin,
id="admin.E034",
)
],
)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
@admin.display
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ["authors"]
errors = BookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model.",
obj=BookAdmin,
id="admin.E013",
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
("Header 1", {"fields": ("name",)}),
("Header 2", {"fields": ("authors",)}),
)
errors = FieldsetBookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1][\"fields\"]' cannot include the "
"ManyToManyField 'authors', because that field manually specifies a "
"relationship model.",
obj=FieldsetBookAdmin,
id="admin.E013",
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ("price", ("name", "subtitle"))
errors = NestedFieldsAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (("Main", {"fields": ("price", ("name", "subtitle"))}),)
errors = NestedFieldsetAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ["title", "extra_data"]
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = "__all__"
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ["extra_data", "title"]
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ["state", ["state"]]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
obj=MyModelAdmin,
id="admin.E006",
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {"fields": ["title", "album", ("title", "album")]}),
]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
obj=MyModelAdmin,
id="admin.E012",
)
]
self.assertEqual(errors, expected)
def test_list_filter_works_on_through_field_even_when_apps_not_ready(self):
"""
Ensure list_filter can access reverse fields even when the app registry
is not ready; refs #24146.
"""
class BookAdminWithListFilter(admin.ModelAdmin):
list_filter = ["authorsbooks__featured"]
# Temporarily pretending apps are not ready yet. This issue can happen
# if the value of 'list_filter' refers to a 'through__field'.
Book._meta.apps.ready = False
try:
errors = BookAdminWithListFilter(Book, AdminSite()).check()
self.assertEqual(errors, [])
finally:
Book._meta.apps.ready = True
|
7d9cbe795455d930c2e5bc298366f8f907d6f43bd3c207dbc132a51ff7af96d9 | import copy
import multiprocessing
import unittest
from unittest import mock
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connection, connections
from django.test import SimpleTestCase
@unittest.skipUnless(connection.vendor == "sqlite", "SQLite tests")
class TestDbSignatureTests(SimpleTestCase):
def test_custom_test_name(self):
test_connection = copy.copy(connections[DEFAULT_DB_ALIAS])
test_connection.settings_dict = copy.deepcopy(
connections[DEFAULT_DB_ALIAS].settings_dict
)
test_connection.settings_dict["NAME"] = None
test_connection.settings_dict["TEST"]["NAME"] = "custom.sqlite.db"
signature = test_connection.creation_class(test_connection).test_db_signature()
self.assertEqual(signature, (None, "custom.sqlite.db"))
def test_get_test_db_clone_settings_name(self):
test_connection = copy.copy(connections[DEFAULT_DB_ALIAS])
test_connection.settings_dict = copy.deepcopy(
connections[DEFAULT_DB_ALIAS].settings_dict,
)
tests = [
("test.sqlite3", "test_1.sqlite3"),
("test", "test_1"),
]
for test_db_name, expected_clone_name in tests:
with self.subTest(test_db_name=test_db_name):
test_connection.settings_dict["NAME"] = test_db_name
test_connection.settings_dict["TEST"]["NAME"] = test_db_name
creation_class = test_connection.creation_class(test_connection)
clone_settings_dict = creation_class.get_test_db_clone_settings("1")
self.assertEqual(clone_settings_dict["NAME"], expected_clone_name)
@mock.patch.object(multiprocessing, "get_start_method", return_value="forkserver")
def test_get_test_db_clone_settings_not_supported(self, *mocked_objects):
msg = "Cloning with start method 'forkserver' is not supported."
with self.assertRaisesMessage(NotSupportedError, msg):
connection.creation.get_test_db_clone_settings(1)
|
c2eeec23e28d8d2db5acd3c8b92339623db3121dc80913d38138d82e1c78472d | import os
import re
from io import StringIO
from django.contrib.gis.gdal import GDAL_VERSION, Driver, GDALException
from django.contrib.gis.utils.ogrinspect import ogrinspect
from django.core.management import call_command
from django.db import connection, connections
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import modify_settings
from ..test_data import TEST_DATA
from .models import AllOGRFields
class InspectDbTests(TestCase):
def test_geom_columns(self):
"""
Test the geo-enabled inspectdb command.
"""
out = StringIO()
call_command(
"inspectdb",
table_name_filter=lambda tn: tn == "inspectapp_allogrfields",
stdout=out,
)
output = out.getvalue()
if connection.features.supports_geometry_field_introspection:
self.assertIn("geom = models.PolygonField()", output)
self.assertIn("point = models.PointField()", output)
else:
self.assertIn("geom = models.GeometryField(", output)
self.assertIn("point = models.GeometryField(", output)
@skipUnlessDBFeature("supports_3d_storage")
def test_3d_columns(self):
out = StringIO()
call_command(
"inspectdb",
table_name_filter=lambda tn: tn == "inspectapp_fields3d",
stdout=out,
)
output = out.getvalue()
if connection.features.supports_geometry_field_introspection:
self.assertIn("point = models.PointField(dim=3)", output)
if connection.features.supports_geography:
self.assertIn(
"pointg = models.PointField(geography=True, dim=3)", output
)
else:
self.assertIn("pointg = models.PointField(dim=3)", output)
self.assertIn("line = models.LineStringField(dim=3)", output)
self.assertIn("poly = models.PolygonField(dim=3)", output)
else:
self.assertIn("point = models.GeometryField(", output)
self.assertIn("pointg = models.GeometryField(", output)
self.assertIn("line = models.GeometryField(", output)
self.assertIn("poly = models.GeometryField(", output)
@modify_settings(
INSTALLED_APPS={"append": "django.contrib.gis"},
)
class OGRInspectTest(SimpleTestCase):
maxDiff = 1024
def test_poly(self):
shp_file = os.path.join(TEST_DATA, "test_poly", "test_poly.shp")
model_def = ogrinspect(shp_file, "MyModel")
expected = [
"# This is an auto-generated Django model module created by ogrinspect.",
"from django.contrib.gis.db import models",
"",
"",
"class MyModel(models.Model):",
" float = models.FloatField()",
" int = models.BigIntegerField()",
" str = models.CharField(max_length=80)",
" geom = models.PolygonField()",
]
self.assertEqual(model_def, "\n".join(expected))
def test_poly_multi(self):
shp_file = os.path.join(TEST_DATA, "test_poly", "test_poly.shp")
model_def = ogrinspect(shp_file, "MyModel", multi_geom=True)
self.assertIn("geom = models.MultiPolygonField()", model_def)
# Same test with a 25D-type geometry field
shp_file = os.path.join(TEST_DATA, "gas_lines", "gas_leitung.shp")
model_def = ogrinspect(shp_file, "MyModel", multi_geom=True)
srid = "-1" if GDAL_VERSION < (2, 3) else "31253"
self.assertIn("geom = models.MultiLineStringField(srid=%s)" % srid, model_def)
def test_date_field(self):
shp_file = os.path.join(TEST_DATA, "cities", "cities.shp")
model_def = ogrinspect(shp_file, "City")
expected = [
"# This is an auto-generated Django model module created by ogrinspect.",
"from django.contrib.gis.db import models",
"",
"",
"class City(models.Model):",
" name = models.CharField(max_length=80)",
" population = models.BigIntegerField()",
" density = models.FloatField()",
" created = models.DateField()",
" geom = models.PointField()",
]
self.assertEqual(model_def, "\n".join(expected))
def test_time_field(self):
# Getting the database identifier used by OGR, if None returned
# GDAL does not have the support compiled in.
ogr_db = get_ogr_db_string()
if not ogr_db:
self.skipTest("Unable to setup an OGR connection to your database")
try:
# Writing shapefiles via GDAL currently does not support writing OGRTime
# fields, so we need to actually use a database
model_def = ogrinspect(
ogr_db,
"Measurement",
layer_key=AllOGRFields._meta.db_table,
decimal=["f_decimal"],
)
except GDALException:
self.skipTest("Unable to setup an OGR connection to your database")
self.assertTrue(
model_def.startswith(
"# This is an auto-generated Django model module created by "
"ogrinspect.\n"
"from django.contrib.gis.db import models\n"
"\n"
"\n"
"class Measurement(models.Model):\n"
)
)
# The ordering of model fields might vary depending on several factors
# (version of GDAL, etc.).
if connection.vendor == "sqlite" and GDAL_VERSION < (3, 4):
# SpatiaLite introspection is somewhat lacking on GDAL < 3.4 (#29461).
self.assertIn(" f_decimal = models.CharField(max_length=0)", model_def)
else:
self.assertIn(
" f_decimal = models.DecimalField(max_digits=0, decimal_places=0)",
model_def,
)
self.assertIn(" f_int = models.IntegerField()", model_def)
if not connection.ops.mariadb:
# Probably a bug between GDAL and MariaDB on time fields.
self.assertIn(" f_datetime = models.DateTimeField()", model_def)
self.assertIn(" f_time = models.TimeField()", model_def)
if connection.vendor == "sqlite" and GDAL_VERSION < (3, 4):
self.assertIn(" f_float = models.CharField(max_length=0)", model_def)
else:
self.assertIn(" f_float = models.FloatField()", model_def)
max_length = 0 if connection.vendor == "sqlite" else 10
self.assertIn(
" f_char = models.CharField(max_length=%s)" % max_length, model_def
)
self.assertIn(" f_date = models.DateField()", model_def)
# Some backends may have srid=-1
self.assertIsNotNone(
re.search(r" geom = models.PolygonField\(([^\)])*\)", model_def)
)
def test_management_command(self):
shp_file = os.path.join(TEST_DATA, "cities", "cities.shp")
out = StringIO()
call_command("ogrinspect", shp_file, "City", stdout=out)
output = out.getvalue()
self.assertIn("class City(models.Model):", output)
def test_mapping_option(self):
expected = (
" geom = models.PointField()\n"
"\n"
"\n"
"# Auto-generated `LayerMapping` dictionary for City model\n"
"city_mapping = {\n"
" 'name': 'Name',\n"
" 'population': 'Population',\n"
" 'density': 'Density',\n"
" 'created': 'Created',\n"
" 'geom': 'POINT',\n"
"}\n"
)
shp_file = os.path.join(TEST_DATA, "cities", "cities.shp")
out = StringIO()
call_command("ogrinspect", shp_file, "--mapping", "City", stdout=out)
self.assertIn(expected, out.getvalue())
def get_ogr_db_string():
"""
Construct the DB string that GDAL will use to inspect the database.
GDAL will create its own connection to the database, so we re-use the
connection settings from the Django test.
"""
db = connections.settings["default"]
# Map from the django backend into the OGR driver name and database identifier
# https://gdal.org/drivers/vector/
#
# TODO: Support Oracle (OCI).
drivers = {
"django.contrib.gis.db.backends.postgis": (
"PostgreSQL",
"PG:dbname='%(db_name)s'",
" ",
),
"django.contrib.gis.db.backends.mysql": ("MySQL", 'MYSQL:"%(db_name)s"', ","),
"django.contrib.gis.db.backends.spatialite": ("SQLite", "%(db_name)s", ""),
}
db_engine = db["ENGINE"]
if db_engine not in drivers:
return None
drv_name, db_str, param_sep = drivers[db_engine]
# Ensure that GDAL library has driver support for the database.
try:
Driver(drv_name)
except GDALException:
return None
# SQLite/SpatiaLite in-memory databases
if db["NAME"] == ":memory:":
return None
# Build the params of the OGR database connection string
params = [db_str % {"db_name": db["NAME"]}]
def add(key, template):
value = db.get(key, None)
# Don't add the parameter if it is not in django's settings
if value:
params.append(template % value)
add("HOST", "host='%s'")
add("PORT", "port='%s'")
add("USER", "user='%s'")
add("PASSWORD", "password='%s'")
return param_sep.join(params)
|
1a1f16087dd705bc830df15d2d5a081ca6bccbf311731ee80f67f5bd302ddd63 | import os
import shutil
import struct
import tempfile
import zipfile
from unittest import mock
from django.contrib.gis.gdal import GDALRaster, SpatialReference
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.raster.band import GDALBand
from django.contrib.gis.shortcuts import numpy
from django.test import SimpleTestCase
from ..data.rasters.textrasters import JSON_RASTER
class GDALRasterTests(SimpleTestCase):
"""
Test a GDALRaster instance created from a file (GeoTiff).
"""
def setUp(self):
self.rs_path = os.path.join(
os.path.dirname(__file__), "../data/rasters/raster.tif"
)
self.rs = GDALRaster(self.rs_path)
def test_rs_name_repr(self):
self.assertEqual(self.rs_path, self.rs.name)
self.assertRegex(repr(self.rs), r"<Raster object at 0x\w+>")
def test_rs_driver(self):
self.assertEqual(self.rs.driver.name, "GTiff")
def test_rs_size(self):
self.assertEqual(self.rs.width, 163)
self.assertEqual(self.rs.height, 174)
def test_rs_srs(self):
self.assertEqual(self.rs.srs.srid, 3086)
self.assertEqual(self.rs.srs.units, (1.0, "metre"))
def test_rs_srid(self):
rast = GDALRaster(
{
"width": 16,
"height": 16,
"srid": 4326,
}
)
self.assertEqual(rast.srid, 4326)
rast.srid = 3086
self.assertEqual(rast.srid, 3086)
def test_geotransform_and_friends(self):
# Assert correct values for file based raster
self.assertEqual(
self.rs.geotransform,
[511700.4680706557, 100.0, 0.0, 435103.3771231986, 0.0, -100.0],
)
self.assertEqual(self.rs.origin, [511700.4680706557, 435103.3771231986])
self.assertEqual(self.rs.origin.x, 511700.4680706557)
self.assertEqual(self.rs.origin.y, 435103.3771231986)
self.assertEqual(self.rs.scale, [100.0, -100.0])
self.assertEqual(self.rs.scale.x, 100.0)
self.assertEqual(self.rs.scale.y, -100.0)
self.assertEqual(self.rs.skew, [0, 0])
self.assertEqual(self.rs.skew.x, 0)
self.assertEqual(self.rs.skew.y, 0)
# Create in-memory rasters and change gtvalues
rsmem = GDALRaster(JSON_RASTER)
# geotransform accepts both floats and ints
rsmem.geotransform = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
self.assertEqual(rsmem.geotransform, [0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
rsmem.geotransform = range(6)
self.assertEqual(rsmem.geotransform, [float(x) for x in range(6)])
self.assertEqual(rsmem.origin, [0, 3])
self.assertEqual(rsmem.origin.x, 0)
self.assertEqual(rsmem.origin.y, 3)
self.assertEqual(rsmem.scale, [1, 5])
self.assertEqual(rsmem.scale.x, 1)
self.assertEqual(rsmem.scale.y, 5)
self.assertEqual(rsmem.skew, [2, 4])
self.assertEqual(rsmem.skew.x, 2)
self.assertEqual(rsmem.skew.y, 4)
self.assertEqual(rsmem.width, 5)
self.assertEqual(rsmem.height, 5)
def test_geotransform_bad_inputs(self):
rsmem = GDALRaster(JSON_RASTER)
error_geotransforms = [
[1, 2],
[1, 2, 3, 4, 5, "foo"],
[1, 2, 3, 4, 5, 6, "foo"],
]
msg = "Geotransform must consist of 6 numeric values."
for geotransform in error_geotransforms:
with self.subTest(i=geotransform), self.assertRaisesMessage(
ValueError, msg
):
rsmem.geotransform = geotransform
def test_rs_extent(self):
self.assertEqual(
self.rs.extent,
(
511700.4680706557,
417703.3771231986,
528000.4680706557,
435103.3771231986,
),
)
def test_rs_bands(self):
self.assertEqual(len(self.rs.bands), 1)
self.assertIsInstance(self.rs.bands[0], GDALBand)
def test_memory_based_raster_creation(self):
# Create uint8 raster with full pixel data range (0-255)
rast = GDALRaster(
{
"datatype": 1,
"width": 16,
"height": 16,
"srid": 4326,
"bands": [
{
"data": range(256),
"nodata_value": 255,
}
],
}
)
# Get array from raster
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# Assert data is same as original input
self.assertEqual(result, list(range(256)))
def test_file_based_raster_creation(self):
# Prepare tempfile
rstfile = tempfile.NamedTemporaryFile(suffix=".tif")
# Create file-based raster from scratch
GDALRaster(
{
"datatype": self.rs.bands[0].datatype(),
"driver": "tif",
"name": rstfile.name,
"width": 163,
"height": 174,
"nr_of_bands": 1,
"srid": self.rs.srs.wkt,
"origin": (self.rs.origin.x, self.rs.origin.y),
"scale": (self.rs.scale.x, self.rs.scale.y),
"skew": (self.rs.skew.x, self.rs.skew.y),
"bands": [
{
"data": self.rs.bands[0].data(),
"nodata_value": self.rs.bands[0].nodata_value,
}
],
}
)
# Reload newly created raster from file
restored_raster = GDALRaster(rstfile.name)
# Presence of TOWGS84 depend on GDAL/Proj versions.
self.assertEqual(
restored_raster.srs.wkt.replace("TOWGS84[0,0,0,0,0,0,0],", ""),
self.rs.srs.wkt.replace("TOWGS84[0,0,0,0,0,0,0],", ""),
)
self.assertEqual(restored_raster.geotransform, self.rs.geotransform)
if numpy:
numpy.testing.assert_equal(
restored_raster.bands[0].data(), self.rs.bands[0].data()
)
else:
self.assertEqual(restored_raster.bands[0].data(), self.rs.bands[0].data())
def test_nonexistent_file(self):
msg = 'Unable to read raster source input "nonexistent.tif".'
with self.assertRaisesMessage(GDALException, msg):
GDALRaster("nonexistent.tif")
def test_vsi_raster_creation(self):
# Open a raster as a file object.
with open(self.rs_path, "rb") as dat:
# Instantiate a raster from the file binary buffer.
vsimem = GDALRaster(dat.read())
# The data of the in-memory file is equal to the source file.
result = vsimem.bands[0].data()
target = self.rs.bands[0].data()
if numpy:
result = result.flatten().tolist()
target = target.flatten().tolist()
self.assertEqual(result, target)
def test_vsi_raster_deletion(self):
path = "/vsimem/raster.tif"
# Create a vsi-based raster from scratch.
vsimem = GDALRaster(
{
"name": path,
"driver": "tif",
"width": 4,
"height": 4,
"srid": 4326,
"bands": [
{
"data": range(16),
}
],
}
)
# The virtual file exists.
rst = GDALRaster(path)
self.assertEqual(rst.width, 4)
# Delete GDALRaster.
del vsimem
del rst
# The virtual file has been removed.
msg = 'Could not open the datasource at "/vsimem/raster.tif"'
with self.assertRaisesMessage(GDALException, msg):
GDALRaster(path)
def test_vsi_invalid_buffer_error(self):
msg = "Failed creating VSI raster from the input buffer."
with self.assertRaisesMessage(GDALException, msg):
GDALRaster(b"not-a-raster-buffer")
def test_vsi_buffer_property(self):
# Create a vsi-based raster from scratch.
rast = GDALRaster(
{
"name": "/vsimem/raster.tif",
"driver": "tif",
"width": 4,
"height": 4,
"srid": 4326,
"bands": [
{
"data": range(16),
}
],
}
)
# Do a round trip from raster to buffer to raster.
result = GDALRaster(rast.vsi_buffer).bands[0].data()
if numpy:
result = result.flatten().tolist()
# Band data is equal to nodata value except on input block of ones.
self.assertEqual(result, list(range(16)))
# The vsi buffer is None for rasters that are not vsi based.
self.assertIsNone(self.rs.vsi_buffer)
def test_vsi_vsizip_filesystem(self):
rst_zipfile = tempfile.NamedTemporaryFile(suffix=".zip")
with zipfile.ZipFile(rst_zipfile, mode="w") as zf:
zf.write(self.rs_path, "raster.tif")
rst_path = "/vsizip/" + os.path.join(rst_zipfile.name, "raster.tif")
rst = GDALRaster(rst_path)
self.assertEqual(rst.driver.name, self.rs.driver.name)
self.assertEqual(rst.name, rst_path)
self.assertIs(rst.is_vsi_based, True)
self.assertIsNone(rst.vsi_buffer)
def test_offset_size_and_shape_on_raster_creation(self):
rast = GDALRaster(
{
"datatype": 1,
"width": 4,
"height": 4,
"srid": 4326,
"bands": [
{
"data": (1,),
"offset": (1, 1),
"size": (2, 2),
"shape": (1, 1),
"nodata_value": 2,
}
],
}
)
# Get array from raster.
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# Band data is equal to nodata value except on input block of ones.
self.assertEqual(result, [2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 2])
def test_set_nodata_value_on_raster_creation(self):
# Create raster filled with nodata values.
rast = GDALRaster(
{
"datatype": 1,
"width": 2,
"height": 2,
"srid": 4326,
"bands": [{"nodata_value": 23}],
}
)
# Get array from raster.
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# All band data is equal to nodata value.
self.assertEqual(result, [23] * 4)
def test_set_nodata_none_on_raster_creation(self):
# Create raster without data and without nodata value.
rast = GDALRaster(
{
"datatype": 1,
"width": 2,
"height": 2,
"srid": 4326,
"bands": [{"nodata_value": None}],
}
)
# Get array from raster.
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# Band data is equal to zero because no nodata value has been specified.
self.assertEqual(result, [0] * 4)
def test_raster_metadata_property(self):
data = self.rs.metadata
self.assertEqual(data["DEFAULT"], {"AREA_OR_POINT": "Area"})
self.assertEqual(data["IMAGE_STRUCTURE"], {"INTERLEAVE": "BAND"})
# Create file-based raster from scratch
source = GDALRaster(
{
"datatype": 1,
"width": 2,
"height": 2,
"srid": 4326,
"bands": [{"data": range(4), "nodata_value": 99}],
}
)
# Set metadata on raster and on a band.
metadata = {
"DEFAULT": {"OWNER": "Django", "VERSION": "1.0", "AREA_OR_POINT": "Point"},
}
source.metadata = metadata
source.bands[0].metadata = metadata
self.assertEqual(source.metadata["DEFAULT"], metadata["DEFAULT"])
self.assertEqual(source.bands[0].metadata["DEFAULT"], metadata["DEFAULT"])
# Update metadata on raster.
metadata = {
"DEFAULT": {"VERSION": "2.0"},
}
source.metadata = metadata
self.assertEqual(source.metadata["DEFAULT"]["VERSION"], "2.0")
# Remove metadata on raster.
metadata = {
"DEFAULT": {"OWNER": None},
}
source.metadata = metadata
self.assertNotIn("OWNER", source.metadata["DEFAULT"])
def test_raster_info_accessor(self):
infos = self.rs.info
# Data
info_lines = [line.strip() for line in infos.split("\n") if line.strip() != ""]
for line in [
"Driver: GTiff/GeoTIFF",
"Files: {}".format(self.rs_path),
"Size is 163, 174",
"Origin = (511700.468070655711927,435103.377123198588379)",
"Pixel Size = (100.000000000000000,-100.000000000000000)",
"Metadata:",
"AREA_OR_POINT=Area",
"Image Structure Metadata:",
"INTERLEAVE=BAND",
"Band 1 Block=163x50 Type=Byte, ColorInterp=Gray",
"NoData Value=15",
]:
self.assertIn(line, info_lines)
for line in [
r"Upper Left \( 511700.468, 435103.377\) "
r'\( 82d51\'46.1\d"W, 27d55\' 1.5\d"N\)',
r"Lower Left \( 511700.468, 417703.377\) "
r'\( 82d51\'52.0\d"W, 27d45\'37.5\d"N\)',
r"Upper Right \( 528000.468, 435103.377\) "
r'\( 82d41\'48.8\d"W, 27d54\'56.3\d"N\)',
r"Lower Right \( 528000.468, 417703.377\) "
r'\( 82d41\'55.5\d"W, 27d45\'32.2\d"N\)',
r"Center \( 519850.468, 426403.377\) "
r'\( 82d46\'50.6\d"W, 27d50\'16.9\d"N\)',
]:
self.assertRegex(infos, line)
# CRS (skip the name because string depends on the GDAL/Proj versions).
self.assertIn("NAD83 / Florida GDL Albers", infos)
def test_compressed_file_based_raster_creation(self):
rstfile = tempfile.NamedTemporaryFile(suffix=".tif")
# Make a compressed copy of an existing raster.
compressed = self.rs.warp(
{"papsz_options": {"compress": "packbits"}, "name": rstfile.name}
)
# Check physically if compression worked.
self.assertLess(os.path.getsize(compressed.name), os.path.getsize(self.rs.name))
# Create file-based raster with options from scratch.
compressed = GDALRaster(
{
"datatype": 1,
"driver": "tif",
"name": rstfile.name,
"width": 40,
"height": 40,
"srid": 3086,
"origin": (500000, 400000),
"scale": (100, -100),
"skew": (0, 0),
"bands": [
{
"data": range(40 ^ 2),
"nodata_value": 255,
}
],
"papsz_options": {
"compress": "packbits",
"pixeltype": "signedbyte",
"blockxsize": 23,
"blockysize": 23,
},
}
)
# Check if options used on creation are stored in metadata.
# Reopening the raster ensures that all metadata has been written
# to the file.
compressed = GDALRaster(compressed.name)
self.assertEqual(
compressed.metadata["IMAGE_STRUCTURE"]["COMPRESSION"],
"PACKBITS",
)
self.assertEqual(
compressed.bands[0].metadata["IMAGE_STRUCTURE"]["PIXELTYPE"], "SIGNEDBYTE"
)
self.assertIn("Block=40x23", compressed.info)
def test_raster_warp(self):
# Create in memory raster
source = GDALRaster(
{
"datatype": 1,
"driver": "MEM",
"name": "sourceraster",
"width": 4,
"height": 4,
"nr_of_bands": 1,
"srid": 3086,
"origin": (500000, 400000),
"scale": (100, -100),
"skew": (0, 0),
"bands": [
{
"data": range(16),
"nodata_value": 255,
}
],
}
)
# Test altering the scale, width, and height of a raster
data = {
"scale": [200, -200],
"width": 2,
"height": 2,
}
target = source.warp(data)
self.assertEqual(target.width, data["width"])
self.assertEqual(target.height, data["height"])
self.assertEqual(target.scale, data["scale"])
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertEqual(target.name, "sourceraster_copy.MEM")
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(result, [5, 7, 13, 15])
# Test altering the name and datatype (to float)
data = {
"name": "/path/to/targetraster.tif",
"datatype": 6,
}
target = source.warp(data)
self.assertEqual(target.bands[0].datatype(), 6)
self.assertEqual(target.name, "/path/to/targetraster.tif")
self.assertEqual(target.driver.name, "MEM")
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(
result,
[
0.0,
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
],
)
def test_raster_warp_nodata_zone(self):
# Create in memory raster.
source = GDALRaster(
{
"datatype": 1,
"driver": "MEM",
"width": 4,
"height": 4,
"srid": 3086,
"origin": (500000, 400000),
"scale": (100, -100),
"skew": (0, 0),
"bands": [
{
"data": range(16),
"nodata_value": 23,
}
],
}
)
# Warp raster onto a location that does not cover any pixels of the original.
result = source.warp({"origin": (200000, 200000)}).bands[0].data()
if numpy:
result = result.flatten().tolist()
# The result is an empty raster filled with the correct nodata value.
self.assertEqual(result, [23] * 16)
def test_raster_clone(self):
rstfile = tempfile.NamedTemporaryFile(suffix=".tif")
tests = [
("MEM", "", 23), # In memory raster.
("tif", rstfile.name, 99), # In file based raster.
]
for driver, name, nodata_value in tests:
with self.subTest(driver=driver):
source = GDALRaster(
{
"datatype": 1,
"driver": driver,
"name": name,
"width": 4,
"height": 4,
"srid": 3086,
"origin": (500000, 400000),
"scale": (100, -100),
"skew": (0, 0),
"bands": [
{
"data": range(16),
"nodata_value": nodata_value,
}
],
}
)
clone = source.clone()
self.assertNotEqual(clone.name, source.name)
self.assertEqual(clone._write, source._write)
self.assertEqual(clone.srs.srid, source.srs.srid)
self.assertEqual(clone.width, source.width)
self.assertEqual(clone.height, source.height)
self.assertEqual(clone.origin, source.origin)
self.assertEqual(clone.scale, source.scale)
self.assertEqual(clone.skew, source.skew)
self.assertIsNot(clone, source)
def test_raster_transform(self):
tests = [
3086,
"3086",
SpatialReference(3086),
]
for srs in tests:
with self.subTest(srs=srs):
# Prepare tempfile and nodata value.
rstfile = tempfile.NamedTemporaryFile(suffix=".tif")
ndv = 99
# Create in file based raster.
source = GDALRaster(
{
"datatype": 1,
"driver": "tif",
"name": rstfile.name,
"width": 5,
"height": 5,
"nr_of_bands": 1,
"srid": 4326,
"origin": (-5, 5),
"scale": (2, -2),
"skew": (0, 0),
"bands": [
{
"data": range(25),
"nodata_value": ndv,
}
],
}
)
target = source.transform(srs)
# Reload data from disk.
target = GDALRaster(target.name)
self.assertEqual(target.srs.srid, 3086)
self.assertEqual(target.width, 7)
self.assertEqual(target.height, 7)
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertAlmostEqual(target.origin[0], 9124842.791079799, 3)
self.assertAlmostEqual(target.origin[1], 1589911.6476407414, 3)
self.assertAlmostEqual(target.scale[0], 223824.82664250192, 3)
self.assertAlmostEqual(target.scale[1], -223824.82664250192, 3)
self.assertEqual(target.skew, [0, 0])
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
# The reprojection of a raster that spans over a large area
# skews the data matrix and might introduce nodata values.
self.assertEqual(
result,
[
ndv,
ndv,
ndv,
ndv,
4,
ndv,
ndv,
ndv,
ndv,
2,
3,
9,
ndv,
ndv,
ndv,
1,
2,
8,
13,
19,
ndv,
0,
6,
6,
12,
18,
18,
24,
ndv,
10,
11,
16,
22,
23,
ndv,
ndv,
ndv,
15,
21,
22,
ndv,
ndv,
ndv,
ndv,
20,
ndv,
ndv,
ndv,
ndv,
],
)
def test_raster_transform_clone(self):
with mock.patch.object(GDALRaster, "clone") as mocked_clone:
# Create in file based raster.
rstfile = tempfile.NamedTemporaryFile(suffix=".tif")
source = GDALRaster(
{
"datatype": 1,
"driver": "tif",
"name": rstfile.name,
"width": 5,
"height": 5,
"nr_of_bands": 1,
"srid": 4326,
"origin": (-5, 5),
"scale": (2, -2),
"skew": (0, 0),
"bands": [
{
"data": range(25),
"nodata_value": 99,
}
],
}
)
# transform() returns a clone because it is the same SRID and
# driver.
source.transform(4326)
self.assertEqual(mocked_clone.call_count, 1)
def test_raster_transform_clone_name(self):
# Create in file based raster.
rstfile = tempfile.NamedTemporaryFile(suffix=".tif")
source = GDALRaster(
{
"datatype": 1,
"driver": "tif",
"name": rstfile.name,
"width": 5,
"height": 5,
"nr_of_bands": 1,
"srid": 4326,
"origin": (-5, 5),
"scale": (2, -2),
"skew": (0, 0),
"bands": [
{
"data": range(25),
"nodata_value": 99,
}
],
}
)
clone_name = rstfile.name + "_respect_name.GTiff"
target = source.transform(4326, name=clone_name)
self.assertEqual(target.name, clone_name)
class GDALBandTests(SimpleTestCase):
rs_path = os.path.join(os.path.dirname(__file__), "../data/rasters/raster.tif")
def test_band_data(self):
rs = GDALRaster(self.rs_path)
band = rs.bands[0]
self.assertEqual(band.width, 163)
self.assertEqual(band.height, 174)
self.assertEqual(band.description, "")
self.assertEqual(band.datatype(), 1)
self.assertEqual(band.datatype(as_string=True), "GDT_Byte")
self.assertEqual(band.color_interp(), 1)
self.assertEqual(band.color_interp(as_string=True), "GCI_GrayIndex")
self.assertEqual(band.nodata_value, 15)
if numpy:
data = band.data()
assert_array = numpy.loadtxt(
os.path.join(
os.path.dirname(__file__), "../data/rasters/raster.numpy.txt"
)
)
numpy.testing.assert_equal(data, assert_array)
self.assertEqual(data.shape, (band.height, band.width))
def test_band_statistics(self):
with tempfile.TemporaryDirectory() as tmp_dir:
rs_path = os.path.join(tmp_dir, "raster.tif")
shutil.copyfile(self.rs_path, rs_path)
rs = GDALRaster(rs_path)
band = rs.bands[0]
pam_file = rs_path + ".aux.xml"
smin, smax, smean, sstd = band.statistics(approximate=True)
self.assertEqual(smin, 0)
self.assertEqual(smax, 9)
self.assertAlmostEqual(smean, 2.842331288343558)
self.assertAlmostEqual(sstd, 2.3965567248965356)
smin, smax, smean, sstd = band.statistics(approximate=False, refresh=True)
self.assertEqual(smin, 0)
self.assertEqual(smax, 9)
self.assertAlmostEqual(smean, 2.828326634228898)
self.assertAlmostEqual(sstd, 2.4260526986669095)
self.assertEqual(band.min, 0)
self.assertEqual(band.max, 9)
self.assertAlmostEqual(band.mean, 2.828326634228898)
self.assertAlmostEqual(band.std, 2.4260526986669095)
# Statistics are persisted into PAM file on band close
rs = band = None
self.assertTrue(os.path.isfile(pam_file))
def test_read_mode_error(self):
# Open raster in read mode
rs = GDALRaster(self.rs_path, write=False)
band = rs.bands[0]
# Setting attributes in write mode raises exception in the _flush method
try:
with self.assertRaises(GDALException):
setattr(band, "nodata_value", 10)
finally:
pam_file = self.rs_path + ".aux.xml"
if os.path.isfile(pam_file):
os.remove(pam_file)
def test_band_data_setters(self):
# Create in-memory raster and get band
rsmem = GDALRaster(
{
"datatype": 1,
"driver": "MEM",
"name": "mem_rst",
"width": 10,
"height": 10,
"nr_of_bands": 1,
"srid": 4326,
}
)
bandmem = rsmem.bands[0]
# Set nodata value
bandmem.nodata_value = 99
self.assertEqual(bandmem.nodata_value, 99)
# Set data for entire dataset
bandmem.data(range(100))
if numpy:
numpy.testing.assert_equal(
bandmem.data(), numpy.arange(100).reshape(10, 10)
)
else:
self.assertEqual(bandmem.data(), list(range(100)))
# Prepare data for setting values in subsequent tests
block = list(range(100, 104))
packed_block = struct.pack("<" + "B B B B", *block)
# Set data from list
bandmem.data(block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from packed block
bandmem.data(packed_block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytes
bandmem.data(bytes(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytearray
bandmem.data(bytearray(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from memoryview
bandmem.data(memoryview(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from numpy array
if numpy:
bandmem.data(numpy.array(block, dtype="int8").reshape(2, 2), (1, 1), (2, 2))
numpy.testing.assert_equal(
bandmem.data(offset=(1, 1), size=(2, 2)),
numpy.array(block).reshape(2, 2),
)
# Test json input data
rsmemjson = GDALRaster(JSON_RASTER)
bandmemjson = rsmemjson.bands[0]
if numpy:
numpy.testing.assert_equal(
bandmemjson.data(), numpy.array(range(25)).reshape(5, 5)
)
else:
self.assertEqual(bandmemjson.data(), list(range(25)))
def test_band_statistics_automatic_refresh(self):
rsmem = GDALRaster(
{
"srid": 4326,
"width": 2,
"height": 2,
"bands": [{"data": [0] * 4, "nodata_value": 99}],
}
)
band = rsmem.bands[0]
# Populate statistics cache
self.assertEqual(band.statistics(), (0, 0, 0, 0))
# Change data
band.data([1, 1, 0, 0])
# Statistics are properly updated
self.assertEqual(band.statistics(), (0.0, 1.0, 0.5, 0.5))
# Change nodata_value
band.nodata_value = 0
# Statistics are properly updated
self.assertEqual(band.statistics(), (1.0, 1.0, 1.0, 0.0))
def test_band_statistics_empty_band(self):
rsmem = GDALRaster(
{
"srid": 4326,
"width": 1,
"height": 1,
"bands": [{"data": [0], "nodata_value": 0}],
}
)
self.assertEqual(rsmem.bands[0].statistics(), (None, None, None, None))
def test_band_delete_nodata(self):
rsmem = GDALRaster(
{
"srid": 4326,
"width": 1,
"height": 1,
"bands": [{"data": [0], "nodata_value": 1}],
}
)
rsmem.bands[0].nodata_value = None
self.assertIsNone(rsmem.bands[0].nodata_value)
def test_band_data_replication(self):
band = GDALRaster(
{
"srid": 4326,
"width": 3,
"height": 3,
"bands": [{"data": range(10, 19), "nodata_value": 0}],
}
).bands[0]
# Variations for input (data, shape, expected result).
combos = (
([1], (1, 1), [1] * 9),
(range(3), (1, 3), [0, 0, 0, 1, 1, 1, 2, 2, 2]),
(range(3), (3, 1), [0, 1, 2, 0, 1, 2, 0, 1, 2]),
)
for combo in combos:
band.data(combo[0], shape=combo[1])
if numpy:
numpy.testing.assert_equal(
band.data(), numpy.array(combo[2]).reshape(3, 3)
)
else:
self.assertEqual(band.data(), list(combo[2]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.