hash
stringlengths
64
64
content
stringlengths
0
1.51M
0254599ec2af6a129363d51e921c0b8258cb393d43a7d983ee75d3f50a2bf5a4
import multiprocessing import os import shutil import sqlite3 import sys from pathlib import Path from django.db import NotSupportedError from django.db.backends.base.creation import BaseDatabaseCreation class DatabaseCreation(BaseDatabaseCreation): @staticmethod def is_in_memory_db(database_name): return not isinstance(database_name, Path) and ( database_name == ":memory:" or "mode=memory" in database_name ) def _get_test_db_name(self): test_database_name = self.connection.settings_dict["TEST"]["NAME"] or ":memory:" if test_database_name == ":memory:": return "file:memorydb_%s?mode=memory&cache=shared" % self.connection.alias return test_database_name def _create_test_db(self, verbosity, autoclobber, keepdb=False): test_database_name = self._get_test_db_name() if keepdb: return test_database_name if not self.is_in_memory_db(test_database_name): # Erase the old test database if verbosity >= 1: self.log( "Destroying old test database for alias %s..." % (self._get_database_display_str(verbosity, test_database_name),) ) if os.access(test_database_name, os.F_OK): if not autoclobber: confirm = input( "Type 'yes' if you would like to try deleting the test " "database '%s', or 'no' to cancel: " % test_database_name ) if autoclobber or confirm == "yes": try: os.remove(test_database_name) except Exception as e: self.log("Got an error deleting the old test database: %s" % e) sys.exit(2) else: self.log("Tests cancelled.") sys.exit(1) return test_database_name def get_test_db_clone_settings(self, suffix): orig_settings_dict = self.connection.settings_dict source_database_name = orig_settings_dict["NAME"] if not self.is_in_memory_db(source_database_name): root, ext = os.path.splitext(source_database_name) return {**orig_settings_dict, "NAME": f"{root}_{suffix}{ext}"} start_method = multiprocessing.get_start_method() if start_method == "fork": return orig_settings_dict if start_method == "spawn": return { **orig_settings_dict, "NAME": f"{self.connection.alias}_{suffix}.sqlite3", } raise NotSupportedError( f"Cloning with start method {start_method!r} is not supported." ) def _clone_test_db(self, suffix, verbosity, keepdb=False): source_database_name = self.connection.settings_dict["NAME"] target_database_name = self.get_test_db_clone_settings(suffix)["NAME"] if not self.is_in_memory_db(source_database_name): # Erase the old test database if os.access(target_database_name, os.F_OK): if keepdb: return if verbosity >= 1: self.log( "Destroying old test database for alias %s..." % ( self._get_database_display_str( verbosity, target_database_name ), ) ) try: os.remove(target_database_name) except Exception as e: self.log("Got an error deleting the old test database: %s" % e) sys.exit(2) try: shutil.copy(source_database_name, target_database_name) except Exception as e: self.log("Got an error cloning the test database: %s" % e) sys.exit(2) # Forking automatically makes a copy of an in-memory database. # Spawn requires migrating to disk which will be re-opened in # setup_worker_connection. elif multiprocessing.get_start_method() == "spawn": ondisk_db = sqlite3.connect(target_database_name, uri=True) self.connection.connection.backup(ondisk_db) ondisk_db.close() def _destroy_test_db(self, test_database_name, verbosity): if test_database_name and not self.is_in_memory_db(test_database_name): # Remove the SQLite database file os.remove(test_database_name) def test_db_signature(self): """ Return a tuple that uniquely identifies a test database. This takes into account the special cases of ":memory:" and "" for SQLite since the databases will be distinct despite having the same TEST NAME. See https://www.sqlite.org/inmemorydb.html """ test_database_name = self._get_test_db_name() sig = [self.connection.settings_dict["NAME"]] if self.is_in_memory_db(test_database_name): sig.append(self.connection.alias) else: sig.append(test_database_name) return tuple(sig) def setup_worker_connection(self, _worker_id): settings_dict = self.get_test_db_clone_settings(_worker_id) # connection.settings_dict must be updated in place for changes to be # reflected in django.db.connections. Otherwise new threads would # connect to the default database instead of the appropriate clone. start_method = multiprocessing.get_start_method() if start_method == "fork": # Update settings_dict in place. self.connection.settings_dict.update(settings_dict) self.connection.close() elif start_method == "spawn": alias = self.connection.alias connection_str = ( f"file:memorydb_{alias}_{_worker_id}?mode=memory&cache=shared" ) source_db = self.connection.Database.connect( f"file:{alias}_{_worker_id}.sqlite3", uri=True ) target_db = sqlite3.connect(connection_str, uri=True) source_db.backup(target_db) source_db.close() # Update settings_dict in place. self.connection.settings_dict.update(settings_dict) self.connection.settings_dict["NAME"] = connection_str # Re-open connection to in-memory database before closing copy # connection. self.connection.connect() target_db.close() if os.environ.get("RUNNING_DJANGOS_TEST_SUITE") == "true": self.mark_expected_failures_and_skips()
5cd6a6c209db9dcc80765a37ac4a33e317cec1d61d3878cd81a8e02247baebb0
""" Move a file in the safest way possible:: >>> from django.core.files.move import file_move_safe >>> file_move_safe("/tmp/old_file", "/tmp/new_file") """ import os from shutil import copymode, copystat from django.core.files import locks __all__ = ["file_move_safe"] def _samefile(src, dst): # Macintosh, Unix. if hasattr(os.path, "samefile"): try: return os.path.samefile(src, dst) except OSError: return False # All other platforms: check for same pathname. return os.path.normcase(os.path.abspath(src)) == os.path.normcase( os.path.abspath(dst) ) def file_move_safe( old_file_name, new_file_name, chunk_size=1024 * 64, allow_overwrite=False ): """ Move a file from one location to another in the safest way possible. First, try ``os.rename``, which is simple but will break across filesystems. If that fails, stream manually from one file to another in pure Python. If the destination file exists and ``allow_overwrite`` is ``False``, raise ``FileExistsError``. """ # There's no reason to move if we don't have to. if _samefile(old_file_name, new_file_name): return try: if not allow_overwrite and os.access(new_file_name, os.F_OK): raise FileExistsError( "Destination file %s exists and allow_overwrite is False." % new_file_name ) os.rename(old_file_name, new_file_name) return except OSError: # OSError happens with os.rename() if moving to another filesystem or # when moving opened files on certain operating systems. pass # first open the old file, so that it won't go away with open(old_file_name, "rb") as old_file: # now open the new file, not forgetting allow_overwrite fd = os.open( new_file_name, ( os.O_WRONLY | os.O_CREAT | getattr(os, "O_BINARY", 0) | (os.O_EXCL if not allow_overwrite else 0) ), ) try: locks.lock(fd, locks.LOCK_EX) current_chunk = None while current_chunk != b"": current_chunk = old_file.read(chunk_size) os.write(fd, current_chunk) finally: locks.unlock(fd) os.close(fd) try: copystat(old_file_name, new_file_name) except PermissionError: # Certain filesystems (e.g. CIFS) fail to copy the file's metadata if # the type of the destination filesystem isn't the same as the source # filesystem. This also happens with some SELinux-enabled systems. # Ignore that, but try to set basic permissions. try: copymode(old_file_name, new_file_name) except PermissionError: pass try: os.remove(old_file_name) except PermissionError as e: # Certain operating systems (Cygwin and Windows) # fail when deleting opened files, ignore it. (For the # systems where this happens, temporary files will be auto-deleted # on close anyway.) if getattr(e, "winerror", 0) != 32: raise
6c29cefce8f753eb66f6cebb7de4219c316f5027fdbf0f0dcb64f88441cd3921
import argparse import mimetypes import os import posixpath import shutil import stat import tempfile from importlib import import_module from urllib.request import build_opener import django from django.conf import settings from django.core.management.base import BaseCommand, CommandError from django.core.management.utils import ( find_formatters, handle_extensions, run_formatters, ) from django.template import Context, Engine from django.utils import archive from django.utils.http import parse_header_parameters from django.utils.version import get_docs_version class TemplateCommand(BaseCommand): """ Copy either a Django application layout template or a Django project layout template into the specified directory. :param style: A color style object (see django.core.management.color). :param app_or_project: The string 'app' or 'project'. :param name: The name of the application or project. :param directory: The directory to which the template should be copied. :param options: The additional variables passed to project or app templates """ requires_system_checks = [] # The supported URL schemes url_schemes = ["http", "https", "ftp"] # Rewrite the following suffixes when determining the target filename. rewrite_template_suffixes = ( # Allow shipping invalid .py files without byte-compilation. (".py-tpl", ".py"), ) def add_arguments(self, parser): parser.add_argument("name", help="Name of the application or project.") parser.add_argument( "directory", nargs="?", help="Optional destination directory" ) parser.add_argument( "--template", help="The path or URL to load the template from." ) parser.add_argument( "--extension", "-e", dest="extensions", action="append", default=["py"], help='The file extension(s) to render (default: "py"). ' "Separate multiple extensions with commas, or use " "-e multiple times.", ) parser.add_argument( "--name", "-n", dest="files", action="append", default=[], help="The file name(s) to render. Separate multiple file names " "with commas, or use -n multiple times.", ) parser.add_argument( "--exclude", "-x", action="append", default=argparse.SUPPRESS, nargs="?", const="", help=( "The directory name(s) to exclude, in addition to .git and " "__pycache__. Can be used multiple times." ), ) def handle(self, app_or_project, name, target=None, **options): self.app_or_project = app_or_project self.a_or_an = "an" if app_or_project == "app" else "a" self.paths_to_remove = [] self.verbosity = options["verbosity"] self.validate_name(name) # if some directory is given, make sure it's nicely expanded if target is None: top_dir = os.path.join(os.getcwd(), name) try: os.makedirs(top_dir) except FileExistsError: raise CommandError("'%s' already exists" % top_dir) except OSError as e: raise CommandError(e) else: top_dir = os.path.abspath(os.path.expanduser(target)) if app_or_project == "app": self.validate_name(os.path.basename(top_dir), "directory") if not os.path.exists(top_dir): raise CommandError( "Destination directory '%s' does not " "exist, please create it first." % top_dir ) # Find formatters, which are external executables, before input # from the templates can sneak into the path. formatter_paths = find_formatters() extensions = tuple(handle_extensions(options["extensions"])) extra_files = [] excluded_directories = [".git", "__pycache__"] for file in options["files"]: extra_files.extend(map(lambda x: x.strip(), file.split(","))) if exclude := options.get("exclude"): for directory in exclude: excluded_directories.append(directory.strip()) if self.verbosity >= 2: self.stdout.write( "Rendering %s template files with extensions: %s" % (app_or_project, ", ".join(extensions)) ) self.stdout.write( "Rendering %s template files with filenames: %s" % (app_or_project, ", ".join(extra_files)) ) base_name = "%s_name" % app_or_project base_subdir = "%s_template" % app_or_project base_directory = "%s_directory" % app_or_project camel_case_name = "camel_case_%s_name" % app_or_project camel_case_value = "".join(x for x in name.title() if x != "_") context = Context( { **options, base_name: name, base_directory: top_dir, camel_case_name: camel_case_value, "docs_version": get_docs_version(), "django_version": django.__version__, }, autoescape=False, ) # Setup a stub settings environment for template rendering if not settings.configured: settings.configure() django.setup() template_dir = self.handle_template(options["template"], base_subdir) prefix_length = len(template_dir) + 1 for root, dirs, files in os.walk(template_dir): path_rest = root[prefix_length:] relative_dir = path_rest.replace(base_name, name) if relative_dir: target_dir = os.path.join(top_dir, relative_dir) os.makedirs(target_dir, exist_ok=True) for dirname in dirs[:]: if "exclude" not in options: if dirname.startswith(".") or dirname == "__pycache__": dirs.remove(dirname) elif dirname in excluded_directories: dirs.remove(dirname) for filename in files: if filename.endswith((".pyo", ".pyc", ".py.class")): # Ignore some files as they cause various breakages. continue old_path = os.path.join(root, filename) new_path = os.path.join( top_dir, relative_dir, filename.replace(base_name, name) ) for old_suffix, new_suffix in self.rewrite_template_suffixes: if new_path.endswith(old_suffix): new_path = new_path[: -len(old_suffix)] + new_suffix break # Only rewrite once if os.path.exists(new_path): raise CommandError( "%s already exists. Overlaying %s %s into an existing " "directory won't replace conflicting files." % ( new_path, self.a_or_an, app_or_project, ) ) # Only render the Python files, as we don't want to # accidentally render Django templates files if new_path.endswith(extensions) or filename in extra_files: with open(old_path, encoding="utf-8") as template_file: content = template_file.read() template = Engine().from_string(content) content = template.render(context) with open(new_path, "w", encoding="utf-8") as new_file: new_file.write(content) else: shutil.copyfile(old_path, new_path) if self.verbosity >= 2: self.stdout.write("Creating %s" % new_path) try: self.apply_umask(old_path, new_path) self.make_writeable(new_path) except OSError: self.stderr.write( "Notice: Couldn't set permission bits on %s. You're " "probably using an uncommon filesystem setup. No " "problem." % new_path, self.style.NOTICE, ) if self.paths_to_remove: if self.verbosity >= 2: self.stdout.write("Cleaning up temporary files.") for path_to_remove in self.paths_to_remove: if os.path.isfile(path_to_remove): os.remove(path_to_remove) else: shutil.rmtree(path_to_remove) run_formatters([top_dir], **formatter_paths) def handle_template(self, template, subdir): """ Determine where the app or project templates are. Use django.__path__[0] as the default because the Django install directory isn't known. """ if template is None: return os.path.join(django.__path__[0], "conf", subdir) else: if template.startswith("file://"): template = template[7:] expanded_template = os.path.expanduser(template) expanded_template = os.path.normpath(expanded_template) if os.path.isdir(expanded_template): return expanded_template if self.is_url(template): # downloads the file and returns the path absolute_path = self.download(template) else: absolute_path = os.path.abspath(expanded_template) if os.path.exists(absolute_path): return self.extract(absolute_path) raise CommandError( "couldn't handle %s template %s." % (self.app_or_project, template) ) def validate_name(self, name, name_or_dir="name"): if name is None: raise CommandError( "you must provide {an} {app} name".format( an=self.a_or_an, app=self.app_or_project, ) ) # Check it's a valid directory name. if not name.isidentifier(): raise CommandError( "'{name}' is not a valid {app} {type}. Please make sure the " "{type} is a valid identifier.".format( name=name, app=self.app_or_project, type=name_or_dir, ) ) # Check it cannot be imported. try: import_module(name) except ImportError: pass else: raise CommandError( "'{name}' conflicts with the name of an existing Python " "module and cannot be used as {an} {app} {type}. Please try " "another {type}.".format( name=name, an=self.a_or_an, app=self.app_or_project, type=name_or_dir, ) ) def download(self, url): """ Download the given URL and return the file name. """ def cleanup_url(url): tmp = url.rstrip("/") filename = tmp.split("/")[-1] if url.endswith("/"): display_url = tmp + "/" else: display_url = url return filename, display_url prefix = "django_%s_template_" % self.app_or_project tempdir = tempfile.mkdtemp(prefix=prefix, suffix="_download") self.paths_to_remove.append(tempdir) filename, display_url = cleanup_url(url) if self.verbosity >= 2: self.stdout.write("Downloading %s" % display_url) the_path = os.path.join(tempdir, filename) opener = build_opener() opener.addheaders = [("User-Agent", f"Django/{django.__version__}")] try: with opener.open(url) as source, open(the_path, "wb") as target: headers = source.info() target.write(source.read()) except OSError as e: raise CommandError( "couldn't download URL %s to %s: %s" % (url, filename, e) ) used_name = the_path.split("/")[-1] # Trying to get better name from response headers content_disposition = headers["content-disposition"] if content_disposition: _, params = parse_header_parameters(content_disposition) guessed_filename = params.get("filename") or used_name else: guessed_filename = used_name # Falling back to content type guessing ext = self.splitext(guessed_filename)[1] content_type = headers["content-type"] if not ext and content_type: ext = mimetypes.guess_extension(content_type) if ext: guessed_filename += ext # Move the temporary file to a filename that has better # chances of being recognized by the archive utils if used_name != guessed_filename: guessed_path = os.path.join(tempdir, guessed_filename) shutil.move(the_path, guessed_path) return guessed_path # Giving up return the_path def splitext(self, the_path): """ Like os.path.splitext, but takes off .tar, too """ base, ext = posixpath.splitext(the_path) if base.lower().endswith(".tar"): ext = base[-4:] + ext base = base[:-4] return base, ext def extract(self, filename): """ Extract the given file to a temporary directory and return the path of the directory with the extracted content. """ prefix = "django_%s_template_" % self.app_or_project tempdir = tempfile.mkdtemp(prefix=prefix, suffix="_extract") self.paths_to_remove.append(tempdir) if self.verbosity >= 2: self.stdout.write("Extracting %s" % filename) try: archive.extract(filename, tempdir) return tempdir except (archive.ArchiveException, OSError) as e: raise CommandError( "couldn't extract file %s to %s: %s" % (filename, tempdir, e) ) def is_url(self, template): """Return True if the name looks like a URL.""" if ":" not in template: return False scheme = template.split(":", 1)[0].lower() return scheme in self.url_schemes def apply_umask(self, old_path, new_path): current_umask = os.umask(0) os.umask(current_umask) current_mode = stat.S_IMODE(os.stat(old_path).st_mode) os.chmod(new_path, current_mode & ~current_umask) def make_writeable(self, filename): """ Make sure that the file is writeable. Useful if our source is read-only. """ if not os.access(filename, os.W_OK): st = os.stat(filename) new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR os.chmod(filename, new_permissions)
0a6d9cdee5dd9292f2336b24d3f44c22e9f2925152bbd836f64375292fd77dd8
import unicodedata from django import forms from django.contrib.auth import authenticate, get_user_model, password_validation from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX, identify_hasher from django.contrib.auth.models import User from django.contrib.auth.tokens import default_token_generator from django.contrib.sites.shortcuts import get_current_site from django.core.exceptions import ValidationError from django.core.mail import EmailMultiAlternatives from django.template import loader from django.utils.encoding import force_bytes from django.utils.http import urlsafe_base64_encode from django.utils.text import capfirst from django.utils.translation import gettext from django.utils.translation import gettext_lazy as _ UserModel = get_user_model() def _unicode_ci_compare(s1, s2): """ Perform case-insensitive comparison of two identifiers, using the recommended algorithm from Unicode Technical Report 36, section 2.11.2(B)(2). """ return ( unicodedata.normalize("NFKC", s1).casefold() == unicodedata.normalize("NFKC", s2).casefold() ) class ReadOnlyPasswordHashWidget(forms.Widget): template_name = "auth/widgets/read_only_password_hash.html" read_only = True def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) summary = [] if not value or value.startswith(UNUSABLE_PASSWORD_PREFIX): summary.append({"label": gettext("No password set.")}) else: try: hasher = identify_hasher(value) except ValueError: summary.append( { "label": gettext( "Invalid password format or unknown hashing algorithm." ) } ) else: for key, value_ in hasher.safe_summary(value).items(): summary.append({"label": gettext(key), "value": value_}) context["summary"] = summary return context def id_for_label(self, id_): return None class ReadOnlyPasswordHashField(forms.Field): widget = ReadOnlyPasswordHashWidget def __init__(self, *args, **kwargs): kwargs.setdefault("required", False) kwargs.setdefault("disabled", True) super().__init__(*args, **kwargs) class UsernameField(forms.CharField): def to_python(self, value): return unicodedata.normalize("NFKC", super().to_python(value)) def widget_attrs(self, widget): return { **super().widget_attrs(widget), "autocapitalize": "none", "autocomplete": "username", } class UserCreationForm(forms.ModelForm): """ A form that creates a user, with no privileges, from the given username and password. """ error_messages = { "password_mismatch": _("The two password fields didn’t match."), } password1 = forms.CharField( label=_("Password"), strip=False, widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}), help_text=password_validation.password_validators_help_text_html(), ) password2 = forms.CharField( label=_("Password confirmation"), widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}), strip=False, help_text=_("Enter the same password as before, for verification."), ) class Meta: model = User fields = ("username",) field_classes = {"username": UsernameField} def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self._meta.model.USERNAME_FIELD in self.fields: self.fields[self._meta.model.USERNAME_FIELD].widget.attrs[ "autofocus" ] = True def clean_password2(self): password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 and password2 and password1 != password2: raise ValidationError( self.error_messages["password_mismatch"], code="password_mismatch", ) return password2 def _post_clean(self): super()._post_clean() # Validate the password after self.instance is updated with form data # by super(). password = self.cleaned_data.get("password2") if password: try: password_validation.validate_password(password, self.instance) except ValidationError as error: self.add_error("password2", error) def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data["password1"]) if commit: user.save() return user class UserChangeForm(forms.ModelForm): password = ReadOnlyPasswordHashField( label=_("Password"), help_text=_( "Raw passwords are not stored, so there is no way to see this " "user’s password, but you can change the password using " '<a href="{}">this form</a>.' ), ) class Meta: model = User fields = "__all__" field_classes = {"username": UsernameField} def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) password = self.fields.get("password") if password: password.help_text = password.help_text.format("../password/") user_permissions = self.fields.get("user_permissions") if user_permissions: user_permissions.queryset = user_permissions.queryset.select_related( "content_type" ) class AuthenticationForm(forms.Form): """ Base class for authenticating users. Extend this to get a form that accepts username/password logins. """ username = UsernameField(widget=forms.TextInput(attrs={"autofocus": True})) password = forms.CharField( label=_("Password"), strip=False, widget=forms.PasswordInput(attrs={"autocomplete": "current-password"}), ) error_messages = { "invalid_login": _( "Please enter a correct %(username)s and password. Note that both " "fields may be case-sensitive." ), "inactive": _("This account is inactive."), } def __init__(self, request=None, *args, **kwargs): """ The 'request' parameter is set for custom auth use by subclasses. The form data comes in via the standard 'data' kwarg. """ self.request = request self.user_cache = None super().__init__(*args, **kwargs) # Set the max length and label for the "username" field. self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD) username_max_length = self.username_field.max_length or 254 self.fields["username"].max_length = username_max_length self.fields["username"].widget.attrs["maxlength"] = username_max_length if self.fields["username"].label is None: self.fields["username"].label = capfirst(self.username_field.verbose_name) def clean(self): username = self.cleaned_data.get("username") password = self.cleaned_data.get("password") if username is not None and password: self.user_cache = authenticate( self.request, username=username, password=password ) if self.user_cache is None: raise self.get_invalid_login_error() else: self.confirm_login_allowed(self.user_cache) return self.cleaned_data def confirm_login_allowed(self, user): """ Controls whether the given User may log in. This is a policy setting, independent of end-user authentication. This default behavior is to allow login by active users, and reject login by inactive users. If the given user cannot log in, this method should raise a ``ValidationError``. If the given user may log in, this method should return None. """ if not user.is_active: raise ValidationError( self.error_messages["inactive"], code="inactive", ) def get_user(self): return self.user_cache def get_invalid_login_error(self): return ValidationError( self.error_messages["invalid_login"], code="invalid_login", params={"username": self.username_field.verbose_name}, ) class PasswordResetForm(forms.Form): email = forms.EmailField( label=_("Email"), max_length=254, widget=forms.EmailInput(attrs={"autocomplete": "email"}), ) def send_mail( self, subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None, ): """ Send a django.core.mail.EmailMultiAlternatives to `to_email`. """ subject = loader.render_to_string(subject_template_name, context) # Email subject *must not* contain newlines subject = "".join(subject.splitlines()) body = loader.render_to_string(email_template_name, context) email_message = EmailMultiAlternatives(subject, body, from_email, [to_email]) if html_email_template_name is not None: html_email = loader.render_to_string(html_email_template_name, context) email_message.attach_alternative(html_email, "text/html") email_message.send() def get_users(self, email): """Given an email, return matching user(s) who should receive a reset. This allows subclasses to more easily customize the default policies that prevent inactive users and users with unusable passwords from resetting their password. """ email_field_name = UserModel.get_email_field_name() active_users = UserModel._default_manager.filter( **{ "%s__iexact" % email_field_name: email, "is_active": True, } ) return ( u for u in active_users if u.has_usable_password() and _unicode_ci_compare(email, getattr(u, email_field_name)) ) def save( self, domain_override=None, subject_template_name="registration/password_reset_subject.txt", email_template_name="registration/password_reset_email.html", use_https=False, token_generator=default_token_generator, from_email=None, request=None, html_email_template_name=None, extra_email_context=None, ): """ Generate a one-use only link for resetting password and send it to the user. """ email = self.cleaned_data["email"] if not domain_override: current_site = get_current_site(request) site_name = current_site.name domain = current_site.domain else: site_name = domain = domain_override email_field_name = UserModel.get_email_field_name() for user in self.get_users(email): user_email = getattr(user, email_field_name) context = { "email": user_email, "domain": domain, "site_name": site_name, "uid": urlsafe_base64_encode(force_bytes(user.pk)), "user": user, "token": token_generator.make_token(user), "protocol": "https" if use_https else "http", **(extra_email_context or {}), } self.send_mail( subject_template_name, email_template_name, context, from_email, user_email, html_email_template_name=html_email_template_name, ) class SetPasswordForm(forms.Form): """ A form that lets a user change set their password without entering the old password """ error_messages = { "password_mismatch": _("The two password fields didn’t match."), } new_password1 = forms.CharField( label=_("New password"), widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}), strip=False, help_text=password_validation.password_validators_help_text_html(), ) new_password2 = forms.CharField( label=_("New password confirmation"), strip=False, widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}), ) def __init__(self, user, *args, **kwargs): self.user = user super().__init__(*args, **kwargs) def clean_new_password2(self): password1 = self.cleaned_data.get("new_password1") password2 = self.cleaned_data.get("new_password2") if password1 and password2 and password1 != password2: raise ValidationError( self.error_messages["password_mismatch"], code="password_mismatch", ) password_validation.validate_password(password2, self.user) return password2 def save(self, commit=True): password = self.cleaned_data["new_password1"] self.user.set_password(password) if commit: self.user.save() return self.user class PasswordChangeForm(SetPasswordForm): """ A form that lets a user change their password by entering their old password. """ error_messages = { **SetPasswordForm.error_messages, "password_incorrect": _( "Your old password was entered incorrectly. Please enter it again." ), } old_password = forms.CharField( label=_("Old password"), strip=False, widget=forms.PasswordInput( attrs={"autocomplete": "current-password", "autofocus": True} ), ) field_order = ["old_password", "new_password1", "new_password2"] def clean_old_password(self): """ Validate that the old_password field is correct. """ old_password = self.cleaned_data["old_password"] if not self.user.check_password(old_password): raise ValidationError( self.error_messages["password_incorrect"], code="password_incorrect", ) return old_password class AdminPasswordChangeForm(forms.Form): """ A form used to change the password of a user in the admin interface. """ error_messages = { "password_mismatch": _("The two password fields didn’t match."), } required_css_class = "required" password1 = forms.CharField( label=_("Password"), widget=forms.PasswordInput( attrs={"autocomplete": "new-password", "autofocus": True} ), strip=False, help_text=password_validation.password_validators_help_text_html(), ) password2 = forms.CharField( label=_("Password (again)"), widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}), strip=False, help_text=_("Enter the same password as before, for verification."), ) def __init__(self, user, *args, **kwargs): self.user = user super().__init__(*args, **kwargs) def clean_password2(self): password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 and password2 and password1 != password2: raise ValidationError( self.error_messages["password_mismatch"], code="password_mismatch", ) password_validation.validate_password(password2, self.user) return password2 def save(self, commit=True): """Save the new password.""" password = self.cleaned_data["password1"] self.user.set_password(password) if commit: self.user.save() return self.user @property def changed_data(self): data = super().changed_data for name in self.fields: if name not in data: return [] return ["password"]
e3cf59647b4c46badb2fe54603c526f608e3e54becde10cf5a483b4eb1c31c85
import datetime import re from unittest import mock from django.contrib.auth.forms import ( AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm, PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget, SetPasswordForm, UserChangeForm, UserCreationForm, ) from django.contrib.auth.models import User from django.contrib.auth.signals import user_login_failed from django.contrib.sites.models import Site from django.core import mail from django.core.exceptions import ValidationError from django.core.mail import EmailMultiAlternatives from django.forms import forms from django.forms.fields import CharField, Field, IntegerField from django.test import SimpleTestCase, TestCase, override_settings from django.utils import translation from django.utils.text import capfirst from django.utils.translation import gettext as _ from .models.custom_user import ( CustomUser, CustomUserWithoutIsActiveField, ExtensionUser, ) from .models.with_custom_email_field import CustomEmailField from .models.with_integer_username import IntegerUsernameUser from .settings import AUTH_TEMPLATES class TestDataMixin: @classmethod def setUpTestData(cls): cls.u1 = User.objects.create_user( username="testclient", password="password", email="[email protected]" ) cls.u2 = User.objects.create_user( username="inactive", password="password", is_active=False ) cls.u3 = User.objects.create_user(username="staff", password="password") cls.u4 = User.objects.create(username="empty_password", password="") cls.u5 = User.objects.create(username="unmanageable_password", password="$") cls.u6 = User.objects.create(username="unknown_password", password="foo$bar") class UserCreationFormTest(TestDataMixin, TestCase): def test_user_already_exists(self): data = { "username": "testclient", "password1": "test123", "password2": "test123", } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual( form["username"].errors, [str(User._meta.get_field("username").error_messages["unique"])], ) def test_invalid_data(self): data = { "username": "jsmith!", "password1": "test123", "password2": "test123", } form = UserCreationForm(data) self.assertFalse(form.is_valid()) validator = next( v for v in User._meta.get_field("username").validators if v.code == "invalid" ) self.assertEqual(form["username"].errors, [str(validator.message)]) def test_password_verification(self): # The verification password is incorrect. data = { "username": "jsmith", "password1": "test123", "password2": "test", } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual( form["password2"].errors, [str(form.error_messages["password_mismatch"])] ) def test_both_passwords(self): # One (or both) passwords weren't given data = {"username": "jsmith"} form = UserCreationForm(data) required_error = [str(Field.default_error_messages["required"])] self.assertFalse(form.is_valid()) self.assertEqual(form["password1"].errors, required_error) self.assertEqual(form["password2"].errors, required_error) data["password2"] = "test123" form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form["password1"].errors, required_error) self.assertEqual(form["password2"].errors, []) @mock.patch("django.contrib.auth.password_validation.password_changed") def test_success(self, password_changed): # The success case. data = { "username": "[email protected]", "password1": "test123", "password2": "test123", } form = UserCreationForm(data) self.assertTrue(form.is_valid()) form.save(commit=False) self.assertEqual(password_changed.call_count, 0) u = form.save() self.assertEqual(password_changed.call_count, 1) self.assertEqual(repr(u), "<User: [email protected]>") def test_unicode_username(self): data = { "username": "宝", "password1": "test123", "password2": "test123", } form = UserCreationForm(data) self.assertTrue(form.is_valid()) u = form.save() self.assertEqual(u.username, "宝") def test_normalize_username(self): # The normalization happens in AbstractBaseUser.clean() and ModelForm # validation calls Model.clean(). ohm_username = "testΩ" # U+2126 OHM SIGN data = { "username": ohm_username, "password1": "pwd2", "password2": "pwd2", } form = UserCreationForm(data) self.assertTrue(form.is_valid()) user = form.save() self.assertNotEqual(user.username, ohm_username) self.assertEqual(user.username, "testΩ") # U+03A9 GREEK CAPITAL LETTER OMEGA def test_duplicate_normalized_unicode(self): """ To prevent almost identical usernames, visually identical but differing by their unicode code points only, Unicode NFKC normalization should make appear them equal to Django. """ omega_username = "iamtheΩ" # U+03A9 GREEK CAPITAL LETTER OMEGA ohm_username = "iamtheΩ" # U+2126 OHM SIGN self.assertNotEqual(omega_username, ohm_username) User.objects.create_user(username=omega_username, password="pwd") data = { "username": ohm_username, "password1": "pwd2", "password2": "pwd2", } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual( form.errors["username"], ["A user with that username already exists."] ) @override_settings( AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, { "NAME": ( "django.contrib.auth.password_validation.MinimumLengthValidator" ), "OPTIONS": { "min_length": 12, }, }, ] ) def test_validates_password(self): data = { "username": "testclient", "password1": "testclient", "password2": "testclient", } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(len(form["password2"].errors), 2) self.assertIn( "The password is too similar to the username.", form["password2"].errors ) self.assertIn( "This password is too short. It must contain at least 12 characters.", form["password2"].errors, ) def test_custom_form(self): class CustomUserCreationForm(UserCreationForm): class Meta(UserCreationForm.Meta): model = ExtensionUser fields = UserCreationForm.Meta.fields + ("date_of_birth",) data = { "username": "testclient", "password1": "testclient", "password2": "testclient", "date_of_birth": "1988-02-24", } form = CustomUserCreationForm(data) self.assertTrue(form.is_valid()) def test_custom_form_with_different_username_field(self): class CustomUserCreationForm(UserCreationForm): class Meta(UserCreationForm.Meta): model = CustomUser fields = ("email", "date_of_birth") data = { "email": "[email protected]", "password1": "testclient", "password2": "testclient", "date_of_birth": "1988-02-24", } form = CustomUserCreationForm(data) self.assertTrue(form.is_valid()) def test_custom_form_hidden_username_field(self): class CustomUserCreationForm(UserCreationForm): class Meta(UserCreationForm.Meta): model = CustomUserWithoutIsActiveField fields = ("email",) # without USERNAME_FIELD data = { "email": "[email protected]", "password1": "testclient", "password2": "testclient", } form = CustomUserCreationForm(data) self.assertTrue(form.is_valid()) def test_password_whitespace_not_stripped(self): data = { "username": "testuser", "password1": " testpassword ", "password2": " testpassword ", } form = UserCreationForm(data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data["password1"], data["password1"]) self.assertEqual(form.cleaned_data["password2"], data["password2"]) @override_settings( AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, ] ) def test_password_help_text(self): form = UserCreationForm() self.assertEqual( form.fields["password1"].help_text, "<ul><li>" "Your password can’t be too similar to your other personal information." "</li></ul>", ) @override_settings( AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, ] ) def test_user_create_form_validates_password_with_all_data(self): """UserCreationForm password validation uses all of the form's data.""" class CustomUserCreationForm(UserCreationForm): class Meta(UserCreationForm.Meta): model = User fields = ("username", "email", "first_name", "last_name") form = CustomUserCreationForm( { "username": "testuser", "password1": "testpassword", "password2": "testpassword", "first_name": "testpassword", "last_name": "lastname", } ) self.assertFalse(form.is_valid()) self.assertEqual( form.errors["password2"], ["The password is too similar to the first name."], ) def test_username_field_autocapitalize_none(self): form = UserCreationForm() self.assertEqual( form.fields["username"].widget.attrs.get("autocapitalize"), "none" ) def test_html_autocomplete_attributes(self): form = UserCreationForm() tests = ( ("username", "username"), ("password1", "new-password"), ("password2", "new-password"), ) for field_name, autocomplete in tests: with self.subTest(field_name=field_name, autocomplete=autocomplete): self.assertEqual( form.fields[field_name].widget.attrs["autocomplete"], autocomplete ) # To verify that the login form rejects inactive users, use an authentication # backend that allows them. @override_settings( AUTHENTICATION_BACKENDS=["django.contrib.auth.backends.AllowAllUsersModelBackend"] ) class AuthenticationFormTest(TestDataMixin, TestCase): def test_invalid_username(self): # The user submits an invalid username. data = { "username": "jsmith_does_not_exist", "password": "test123", } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual( form.non_field_errors(), [ form.error_messages["invalid_login"] % {"username": User._meta.get_field("username").verbose_name} ], ) def test_inactive_user(self): # The user is inactive. data = { "username": "inactive", "password": "password", } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual( form.non_field_errors(), [str(form.error_messages["inactive"])] ) # Use an authentication backend that rejects inactive users. @override_settings( AUTHENTICATION_BACKENDS=["django.contrib.auth.backends.ModelBackend"] ) def test_inactive_user_incorrect_password(self): """An invalid login doesn't leak the inactive status of a user.""" data = { "username": "inactive", "password": "incorrect", } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual( form.non_field_errors(), [ form.error_messages["invalid_login"] % {"username": User._meta.get_field("username").verbose_name} ], ) def test_login_failed(self): signal_calls = [] def signal_handler(**kwargs): signal_calls.append(kwargs) user_login_failed.connect(signal_handler) fake_request = object() try: form = AuthenticationForm( fake_request, { "username": "testclient", "password": "incorrect", }, ) self.assertFalse(form.is_valid()) self.assertIs(signal_calls[0]["request"], fake_request) finally: user_login_failed.disconnect(signal_handler) def test_inactive_user_i18n(self): with self.settings(USE_I18N=True), translation.override( "pt-br", deactivate=True ): # The user is inactive. data = { "username": "inactive", "password": "password", } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual( form.non_field_errors(), [str(form.error_messages["inactive"])] ) # Use an authentication backend that allows inactive users. @override_settings( AUTHENTICATION_BACKENDS=[ "django.contrib.auth.backends.AllowAllUsersModelBackend" ] ) def test_custom_login_allowed_policy(self): # The user is inactive, but our custom form policy allows them to log in. data = { "username": "inactive", "password": "password", } class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm): def confirm_login_allowed(self, user): pass form = AuthenticationFormWithInactiveUsersOkay(None, data) self.assertTrue(form.is_valid()) # Raise a ValidationError in the form to disallow some logins according # to custom logic. class PickyAuthenticationForm(AuthenticationForm): def confirm_login_allowed(self, user): if user.username == "inactive": raise ValidationError("This user is disallowed.") raise ValidationError("Sorry, nobody's allowed in.") form = PickyAuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), ["This user is disallowed."]) data = { "username": "testclient", "password": "password", } form = PickyAuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."]) def test_success(self): # The success case data = { "username": "testclient", "password": "password", } form = AuthenticationForm(None, data) self.assertTrue(form.is_valid()) self.assertEqual(form.non_field_errors(), []) def test_unicode_username(self): User.objects.create_user(username="Σαρα", password="pwd") data = { "username": "Σαρα", "password": "pwd", } form = AuthenticationForm(None, data) self.assertTrue(form.is_valid()) self.assertEqual(form.non_field_errors(), []) @override_settings(AUTH_USER_MODEL="auth_tests.CustomEmailField") def test_username_field_max_length_matches_user_model(self): self.assertEqual(CustomEmailField._meta.get_field("username").max_length, 255) data = { "username": "u" * 255, "password": "pwd", "email": "[email protected]", } CustomEmailField.objects.create_user(**data) form = AuthenticationForm(None, data) self.assertEqual(form.fields["username"].max_length, 255) self.assertEqual(form.fields["username"].widget.attrs.get("maxlength"), 255) self.assertEqual(form.errors, {}) @override_settings(AUTH_USER_MODEL="auth_tests.IntegerUsernameUser") def test_username_field_max_length_defaults_to_254(self): self.assertIsNone(IntegerUsernameUser._meta.get_field("username").max_length) data = { "username": "0123456", "password": "password", } IntegerUsernameUser.objects.create_user(**data) form = AuthenticationForm(None, data) self.assertEqual(form.fields["username"].max_length, 254) self.assertEqual(form.fields["username"].widget.attrs.get("maxlength"), 254) self.assertEqual(form.errors, {}) def test_username_field_label(self): class CustomAuthenticationForm(AuthenticationForm): username = CharField(label="Name", max_length=75) form = CustomAuthenticationForm() self.assertEqual(form["username"].label, "Name") def test_username_field_label_not_set(self): class CustomAuthenticationForm(AuthenticationForm): username = CharField() form = CustomAuthenticationForm() username_field = User._meta.get_field(User.USERNAME_FIELD) self.assertEqual( form.fields["username"].label, capfirst(username_field.verbose_name) ) def test_username_field_autocapitalize_none(self): form = AuthenticationForm() self.assertEqual( form.fields["username"].widget.attrs.get("autocapitalize"), "none" ) def test_username_field_label_empty_string(self): class CustomAuthenticationForm(AuthenticationForm): username = CharField(label="") form = CustomAuthenticationForm() self.assertEqual(form.fields["username"].label, "") def test_password_whitespace_not_stripped(self): data = { "username": "testuser", "password": " pass ", } form = AuthenticationForm(None, data) form.is_valid() # Not necessary to have valid credentails for the test. self.assertEqual(form.cleaned_data["password"], data["password"]) @override_settings(AUTH_USER_MODEL="auth_tests.IntegerUsernameUser") def test_integer_username(self): class CustomAuthenticationForm(AuthenticationForm): username = IntegerField() user = IntegerUsernameUser.objects.create_user(username=0, password="pwd") data = { "username": 0, "password": "pwd", } form = CustomAuthenticationForm(None, data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data["username"], data["username"]) self.assertEqual(form.cleaned_data["password"], data["password"]) self.assertEqual(form.errors, {}) self.assertEqual(form.user_cache, user) def test_get_invalid_login_error(self): error = AuthenticationForm().get_invalid_login_error() self.assertIsInstance(error, ValidationError) self.assertEqual( error.message, "Please enter a correct %(username)s and password. Note that both " "fields may be case-sensitive.", ) self.assertEqual(error.code, "invalid_login") self.assertEqual(error.params, {"username": "username"}) def test_html_autocomplete_attributes(self): form = AuthenticationForm() tests = ( ("username", "username"), ("password", "current-password"), ) for field_name, autocomplete in tests: with self.subTest(field_name=field_name, autocomplete=autocomplete): self.assertEqual( form.fields[field_name].widget.attrs["autocomplete"], autocomplete ) def test_no_password(self): data = {"username": "username"} form = AuthenticationForm(None, data) self.assertIs(form.is_valid(), False) self.assertEqual( form["password"].errors, [Field.default_error_messages["required"]] ) class SetPasswordFormTest(TestDataMixin, TestCase): def test_password_verification(self): # The two new passwords do not match. user = User.objects.get(username="testclient") data = { "new_password1": "abc123", "new_password2": "abc", } form = SetPasswordForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual( form["new_password2"].errors, [str(form.error_messages["password_mismatch"])], ) @mock.patch("django.contrib.auth.password_validation.password_changed") def test_success(self, password_changed): user = User.objects.get(username="testclient") data = { "new_password1": "abc123", "new_password2": "abc123", } form = SetPasswordForm(user, data) self.assertTrue(form.is_valid()) form.save(commit=False) self.assertEqual(password_changed.call_count, 0) form.save() self.assertEqual(password_changed.call_count, 1) @override_settings( AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, { "NAME": ( "django.contrib.auth.password_validation.MinimumLengthValidator" ), "OPTIONS": { "min_length": 12, }, }, ] ) def test_validates_password(self): user = User.objects.get(username="testclient") data = { "new_password1": "testclient", "new_password2": "testclient", } form = SetPasswordForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual(len(form["new_password2"].errors), 2) self.assertIn( "The password is too similar to the username.", form["new_password2"].errors ) self.assertIn( "This password is too short. It must contain at least 12 characters.", form["new_password2"].errors, ) def test_no_password(self): user = User.objects.get(username="testclient") data = {"new_password1": "new-password"} form = SetPasswordForm(user, data) self.assertIs(form.is_valid(), False) self.assertEqual( form["new_password2"].errors, [Field.default_error_messages["required"]] ) form = SetPasswordForm(user, {}) self.assertIs(form.is_valid(), False) self.assertEqual( form["new_password1"].errors, [Field.default_error_messages["required"]] ) self.assertEqual( form["new_password2"].errors, [Field.default_error_messages["required"]] ) def test_password_whitespace_not_stripped(self): user = User.objects.get(username="testclient") data = { "new_password1": " password ", "new_password2": " password ", } form = SetPasswordForm(user, data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data["new_password1"], data["new_password1"]) self.assertEqual(form.cleaned_data["new_password2"], data["new_password2"]) @override_settings( AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, { "NAME": ( "django.contrib.auth.password_validation.MinimumLengthValidator" ), "OPTIONS": { "min_length": 12, }, }, ] ) def test_help_text_translation(self): french_help_texts = [ "Votre mot de passe ne peut pas trop ressembler à vos autres informations " "personnelles.", "Votre mot de passe doit contenir au minimum 12 caractères.", ] form = SetPasswordForm(self.u1) with translation.override("fr"): html = form.as_p() for french_text in french_help_texts: self.assertIn(french_text, html) def test_html_autocomplete_attributes(self): form = SetPasswordForm(self.u1) tests = ( ("new_password1", "new-password"), ("new_password2", "new-password"), ) for field_name, autocomplete in tests: with self.subTest(field_name=field_name, autocomplete=autocomplete): self.assertEqual( form.fields[field_name].widget.attrs["autocomplete"], autocomplete ) class PasswordChangeFormTest(TestDataMixin, TestCase): def test_incorrect_password(self): user = User.objects.get(username="testclient") data = { "old_password": "test", "new_password1": "abc123", "new_password2": "abc123", } form = PasswordChangeForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual( form["old_password"].errors, [str(form.error_messages["password_incorrect"])], ) def test_password_verification(self): # The two new passwords do not match. user = User.objects.get(username="testclient") data = { "old_password": "password", "new_password1": "abc123", "new_password2": "abc", } form = PasswordChangeForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual( form["new_password2"].errors, [str(form.error_messages["password_mismatch"])], ) @mock.patch("django.contrib.auth.password_validation.password_changed") def test_success(self, password_changed): # The success case. user = User.objects.get(username="testclient") data = { "old_password": "password", "new_password1": "abc123", "new_password2": "abc123", } form = PasswordChangeForm(user, data) self.assertTrue(form.is_valid()) form.save(commit=False) self.assertEqual(password_changed.call_count, 0) form.save() self.assertEqual(password_changed.call_count, 1) def test_field_order(self): # Regression test - check the order of fields: user = User.objects.get(username="testclient") self.assertEqual( list(PasswordChangeForm(user, {}).fields), ["old_password", "new_password1", "new_password2"], ) def test_password_whitespace_not_stripped(self): user = User.objects.get(username="testclient") user.set_password(" oldpassword ") data = { "old_password": " oldpassword ", "new_password1": " pass ", "new_password2": " pass ", } form = PasswordChangeForm(user, data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data["old_password"], data["old_password"]) self.assertEqual(form.cleaned_data["new_password1"], data["new_password1"]) self.assertEqual(form.cleaned_data["new_password2"], data["new_password2"]) def test_html_autocomplete_attributes(self): user = User.objects.get(username="testclient") form = PasswordChangeForm(user) self.assertEqual( form.fields["old_password"].widget.attrs["autocomplete"], "current-password" ) class UserChangeFormTest(TestDataMixin, TestCase): def test_username_validity(self): user = User.objects.get(username="testclient") data = {"username": "not valid"} form = UserChangeForm(data, instance=user) self.assertFalse(form.is_valid()) validator = next( v for v in User._meta.get_field("username").validators if v.code == "invalid" ) self.assertEqual(form["username"].errors, [str(validator.message)]) def test_bug_14242(self): # A regression test, introduce by adding an optimization for the # UserChangeForm. class MyUserForm(UserChangeForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields[ "groups" ].help_text = "These groups give users different permissions" class Meta(UserChangeForm.Meta): fields = ("groups",) # Just check we can create it MyUserForm({}) def test_unusable_password(self): user = User.objects.get(username="empty_password") user.set_unusable_password() user.save() form = UserChangeForm(instance=user) self.assertIn(_("No password set."), form.as_table()) def test_bug_17944_empty_password(self): user = User.objects.get(username="empty_password") form = UserChangeForm(instance=user) self.assertIn(_("No password set."), form.as_table()) def test_bug_17944_unmanageable_password(self): user = User.objects.get(username="unmanageable_password") form = UserChangeForm(instance=user) self.assertIn( _("Invalid password format or unknown hashing algorithm."), form.as_table() ) def test_bug_17944_unknown_password_algorithm(self): user = User.objects.get(username="unknown_password") form = UserChangeForm(instance=user) self.assertIn( _("Invalid password format or unknown hashing algorithm."), form.as_table() ) def test_bug_19133(self): "The change form does not return the password value" # Use the form to construct the POST data user = User.objects.get(username="testclient") form_for_data = UserChangeForm(instance=user) post_data = form_for_data.initial # The password field should be readonly, so anything # posted here should be ignored; the form will be # valid, and give back the 'initial' value for the # password field. post_data["password"] = "new password" form = UserChangeForm(instance=user, data=post_data) self.assertTrue(form.is_valid()) # original hashed password contains $ self.assertIn("$", form.cleaned_data["password"]) def test_bug_19349_bound_password_field(self): user = User.objects.get(username="testclient") form = UserChangeForm(data={}, instance=user) # When rendering the bound password field, # ReadOnlyPasswordHashWidget needs the initial # value to render correctly self.assertEqual(form.initial["password"], form["password"].value()) def test_custom_form(self): class CustomUserChangeForm(UserChangeForm): class Meta(UserChangeForm.Meta): model = ExtensionUser fields = ( "username", "password", "date_of_birth", ) user = User.objects.get(username="testclient") data = { "username": "testclient", "password": "testclient", "date_of_birth": "1998-02-24", } form = CustomUserChangeForm(data, instance=user) self.assertTrue(form.is_valid()) form.save() self.assertEqual(form.cleaned_data["username"], "testclient") self.assertEqual(form.cleaned_data["date_of_birth"], datetime.date(1998, 2, 24)) def test_password_excluded(self): class UserChangeFormWithoutPassword(UserChangeForm): password = None class Meta: model = User exclude = ["password"] form = UserChangeFormWithoutPassword() self.assertNotIn("password", form.fields) def test_username_field_autocapitalize_none(self): form = UserChangeForm() self.assertEqual( form.fields["username"].widget.attrs.get("autocapitalize"), "none" ) @override_settings(TEMPLATES=AUTH_TEMPLATES) class PasswordResetFormTest(TestDataMixin, TestCase): @classmethod def setUpClass(cls): super().setUpClass() # This cleanup is necessary because contrib.sites cache # makes tests interfere with each other, see #11505 Site.objects.clear_cache() def create_dummy_user(self): """ Create a user and return a tuple (user_object, username, email). """ username = "jsmith" email = "[email protected]" user = User.objects.create_user(username, email, "test123") return (user, username, email) def test_invalid_email(self): data = {"email": "not valid"} form = PasswordResetForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form["email"].errors, [_("Enter a valid email address.")]) def test_user_email_unicode_collision(self): User.objects.create_user("mike123", "[email protected]", "test123") User.objects.create_user("mike456", "mı[email protected]", "test123") data = {"email": "mı[email protected]"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ["mı[email protected]"]) def test_user_email_domain_unicode_collision(self): User.objects.create_user("mike123", "[email protected]", "test123") User.objects.create_user("mike456", "mike@ıxample.org", "test123") data = {"email": "mike@ıxample.org"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ["mike@ıxample.org"]) def test_user_email_unicode_collision_nonexistent(self): User.objects.create_user("mike123", "[email protected]", "test123") data = {"email": "mı[email protected]"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 0) def test_user_email_domain_unicode_collision_nonexistent(self): User.objects.create_user("mike123", "[email protected]", "test123") data = {"email": "mike@ıxample.org"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 0) def test_nonexistent_email(self): """ Test nonexistent email address. This should not fail because it would expose information about registered users. """ data = {"email": "[email protected]"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) self.assertEqual(len(mail.outbox), 0) def test_cleaned_data(self): (user, username, email) = self.create_dummy_user() data = {"email": email} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) form.save(domain_override="example.com") self.assertEqual(form.cleaned_data["email"], email) self.assertEqual(len(mail.outbox), 1) def test_custom_email_subject(self): data = {"email": "[email protected]"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) # Since we're not providing a request object, we must provide a # domain_override to prevent the save operation from failing in the # potential case where contrib.sites is not installed. Refs #16412. form.save(domain_override="example.com") self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, "Custom password reset on example.com") def test_custom_email_constructor(self): data = {"email": "[email protected]"} class CustomEmailPasswordResetForm(PasswordResetForm): def send_mail( self, subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None, ): EmailMultiAlternatives( "Forgot your password?", "Sorry to hear you forgot your password.", None, [to_email], ["[email protected]"], headers={"Reply-To": "[email protected]"}, alternatives=[ ("Really sorry to hear you forgot your password.", "text/html") ], ).send() form = CustomEmailPasswordResetForm(data) self.assertTrue(form.is_valid()) # Since we're not providing a request object, we must provide a # domain_override to prevent the save operation from failing in the # potential case where contrib.sites is not installed. Refs #16412. form.save(domain_override="example.com") self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, "Forgot your password?") self.assertEqual(mail.outbox[0].bcc, ["[email protected]"]) self.assertEqual(mail.outbox[0].content_subtype, "plain") def test_preserve_username_case(self): """ Preserve the case of the user name (before the @ in the email address) when creating a user (#5605). """ user = User.objects.create_user("forms_test2", "[email protected]", "test") self.assertEqual(user.email, "[email protected]") user = User.objects.create_user("forms_test3", "tesT", "test") self.assertEqual(user.email, "tesT") def test_inactive_user(self): """ Inactive user cannot receive password reset email. """ (user, username, email) = self.create_dummy_user() user.is_active = False user.save() form = PasswordResetForm({"email": email}) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 0) def test_unusable_password(self): user = User.objects.create_user("testuser", "[email protected]", "test") data = {"email": "[email protected]"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) user.set_unusable_password() user.save() form = PasswordResetForm(data) # The form itself is valid, but no email is sent self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 0) def test_save_plaintext_email(self): """ Test the PasswordResetForm.save() method with no html_email_template_name parameter passed in. Test to ensure original behavior is unchanged after the parameter was added. """ (user, username, email) = self.create_dummy_user() form = PasswordResetForm({"email": email}) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 1) message = mail.outbox[0].message() self.assertFalse(message.is_multipart()) self.assertEqual(message.get_content_type(), "text/plain") self.assertEqual(message.get("subject"), "Custom password reset on example.com") self.assertEqual(len(mail.outbox[0].alternatives), 0) self.assertEqual(message.get_all("to"), [email]) self.assertTrue( re.match(r"^http://example.com/reset/[\w+/-]", message.get_payload()) ) def test_save_html_email_template_name(self): """ Test the PasswordResetForm.save() method with html_email_template_name parameter specified. Test to ensure that a multipart email is sent with both text/plain and text/html parts. """ (user, username, email) = self.create_dummy_user() form = PasswordResetForm({"email": email}) self.assertTrue(form.is_valid()) form.save( html_email_template_name="registration/html_password_reset_email.html" ) self.assertEqual(len(mail.outbox), 1) self.assertEqual(len(mail.outbox[0].alternatives), 1) message = mail.outbox[0].message() self.assertEqual(message.get("subject"), "Custom password reset on example.com") self.assertEqual(len(message.get_payload()), 2) self.assertTrue(message.is_multipart()) self.assertEqual(message.get_payload(0).get_content_type(), "text/plain") self.assertEqual(message.get_payload(1).get_content_type(), "text/html") self.assertEqual(message.get_all("to"), [email]) self.assertTrue( re.match( r"^http://example.com/reset/[\w/-]+", message.get_payload(0).get_payload(), ) ) self.assertTrue( re.match( r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$', message.get_payload(1).get_payload(), ) ) @override_settings(AUTH_USER_MODEL="auth_tests.CustomEmailField") def test_custom_email_field(self): email = "[email protected]" CustomEmailField.objects.create_user("test name", "test password", email) form = PasswordResetForm({"email": email}) self.assertTrue(form.is_valid()) form.save() self.assertEqual(form.cleaned_data["email"], email) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, [email]) def test_html_autocomplete_attributes(self): form = PasswordResetForm() self.assertEqual(form.fields["email"].widget.attrs["autocomplete"], "email") class ReadOnlyPasswordHashTest(SimpleTestCase): def test_bug_19349_render_with_none_value(self): # Rendering the widget with value set to None # mustn't raise an exception. widget = ReadOnlyPasswordHashWidget() html = widget.render(name="password", value=None, attrs={}) self.assertIn(_("No password set."), html) @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.PBKDF2PasswordHasher"] ) def test_render(self): widget = ReadOnlyPasswordHashWidget() value = ( "pbkdf2_sha256$100000$a6Pucb1qSFcD$WmCkn9Hqidj48NVe5x0FEM6A9YiOqQcl/83m2Z5u" "dm0=" ) self.assertHTMLEqual( widget.render("name", value, {"id": "id_password"}), '<div id="id_password">' " <strong>algorithm</strong>: <bdi>pbkdf2_sha256</bdi>" " <strong>iterations</strong>: <bdi>100000</bdi>" " <strong>salt</strong>: <bdi>a6Pucb******</bdi>" " <strong>hash</strong>: " " <bdi>WmCkn9**************************************</bdi>" "</div>", ) def test_readonly_field_has_changed(self): field = ReadOnlyPasswordHashField() self.assertIs(field.disabled, True) self.assertFalse(field.has_changed("aaa", "bbb")) def test_label(self): """ ReadOnlyPasswordHashWidget doesn't contain a for attribute in the <label> because it doesn't have any labelable elements. """ class TestForm(forms.Form): hash_field = ReadOnlyPasswordHashField() bound_field = TestForm()["hash_field"] self.assertIsNone(bound_field.field.widget.id_for_label("id")) self.assertEqual(bound_field.label_tag(), "<label>Hash field:</label>") class AdminPasswordChangeFormTest(TestDataMixin, TestCase): @mock.patch("django.contrib.auth.password_validation.password_changed") def test_success(self, password_changed): user = User.objects.get(username="testclient") data = { "password1": "test123", "password2": "test123", } form = AdminPasswordChangeForm(user, data) self.assertTrue(form.is_valid()) form.save(commit=False) self.assertEqual(password_changed.call_count, 0) form.save() self.assertEqual(password_changed.call_count, 1) self.assertEqual(form.changed_data, ["password"]) def test_password_whitespace_not_stripped(self): user = User.objects.get(username="testclient") data = { "password1": " pass ", "password2": " pass ", } form = AdminPasswordChangeForm(user, data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data["password1"], data["password1"]) self.assertEqual(form.cleaned_data["password2"], data["password2"]) self.assertEqual(form.changed_data, ["password"]) def test_non_matching_passwords(self): user = User.objects.get(username="testclient") data = {"password1": "password1", "password2": "password2"} form = AdminPasswordChangeForm(user, data) self.assertEqual( form.errors["password2"], [form.error_messages["password_mismatch"]] ) self.assertEqual(form.changed_data, ["password"]) def test_missing_passwords(self): user = User.objects.get(username="testclient") data = {"password1": "", "password2": ""} form = AdminPasswordChangeForm(user, data) required_error = [Field.default_error_messages["required"]] self.assertEqual(form.errors["password1"], required_error) self.assertEqual(form.errors["password2"], required_error) self.assertEqual(form.changed_data, []) def test_one_password(self): user = User.objects.get(username="testclient") form1 = AdminPasswordChangeForm(user, {"password1": "", "password2": "test"}) required_error = [Field.default_error_messages["required"]] self.assertEqual(form1.errors["password1"], required_error) self.assertNotIn("password2", form1.errors) self.assertEqual(form1.changed_data, []) form2 = AdminPasswordChangeForm(user, {"password1": "test", "password2": ""}) self.assertEqual(form2.errors["password2"], required_error) self.assertNotIn("password1", form2.errors) self.assertEqual(form2.changed_data, []) def test_html_autocomplete_attributes(self): user = User.objects.get(username="testclient") form = AdminPasswordChangeForm(user) tests = ( ("password1", "new-password"), ("password2", "new-password"), ) for field_name, autocomplete in tests: with self.subTest(field_name=field_name, autocomplete=autocomplete): self.assertEqual( form.fields[field_name].widget.attrs["autocomplete"], autocomplete )
f7facaa700bc96941dc7d6b61a1df991f9c7fecf80eebfae59702a1fa91417b5
from unittest import mock, skipUnless from django.conf.global_settings import PASSWORD_HASHERS from django.contrib.auth.hashers import ( UNUSABLE_PASSWORD_PREFIX, UNUSABLE_PASSWORD_SUFFIX_LENGTH, BasePasswordHasher, BCryptPasswordHasher, BCryptSHA256PasswordHasher, MD5PasswordHasher, PBKDF2PasswordHasher, PBKDF2SHA1PasswordHasher, ScryptPasswordHasher, check_password, get_hasher, identify_hasher, is_password_usable, make_password, ) from django.test import SimpleTestCase, ignore_warnings from django.test.utils import override_settings from django.utils.deprecation import RemovedInDjango50Warning, RemovedInDjango51Warning # RemovedInDjango50Warning. try: import crypt except ImportError: crypt = None else: # On some platforms (e.g. OpenBSD), crypt.crypt() always return None. if crypt.crypt("") is None: crypt = None try: import bcrypt except ImportError: bcrypt = None try: import argon2 except ImportError: argon2 = None # scrypt requires OpenSSL 1.1+ try: import hashlib scrypt = hashlib.scrypt except ImportError: scrypt = None class PBKDF2SingleIterationHasher(PBKDF2PasswordHasher): iterations = 1 @override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS) class TestUtilsHashPass(SimpleTestCase): def test_simple(self): encoded = make_password("lètmein") self.assertTrue(encoded.startswith("pbkdf2_sha256$")) self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password("lètmein", encoded)) self.assertFalse(check_password("lètmeinz", encoded)) # Blank passwords blank_encoded = make_password("") self.assertTrue(blank_encoded.startswith("pbkdf2_sha256$")) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password("", blank_encoded)) self.assertFalse(check_password(" ", blank_encoded)) def test_bytes(self): encoded = make_password(b"bytes_password") self.assertTrue(encoded.startswith("pbkdf2_sha256$")) self.assertIs(is_password_usable(encoded), True) self.assertIs(check_password(b"bytes_password", encoded), True) def test_invalid_password(self): msg = "Password must be a string or bytes, got int." with self.assertRaisesMessage(TypeError, msg): make_password(1) def test_pbkdf2(self): encoded = make_password("lètmein", "seasalt", "pbkdf2_sha256") self.assertEqual( encoded, "pbkdf2_sha256$480000$seasalt$G4ja8YRtfnNyEx4Ii2pbFMp/l8s4nnbMdJ+Fob/qNK8=", ) self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password("lètmein", encoded)) self.assertFalse(check_password("lètmeinz", encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "pbkdf2_sha256") # Blank passwords blank_encoded = make_password("", "seasalt", "pbkdf2_sha256") self.assertTrue(blank_encoded.startswith("pbkdf2_sha256$")) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password("", blank_encoded)) self.assertFalse(check_password(" ", blank_encoded)) # Salt entropy check. hasher = get_hasher("pbkdf2_sha256") encoded_weak_salt = make_password("lètmein", "iodizedsalt", "pbkdf2_sha256") encoded_strong_salt = make_password("lètmein", hasher.salt(), "pbkdf2_sha256") self.assertIs(hasher.must_update(encoded_weak_salt), True) self.assertIs(hasher.must_update(encoded_strong_salt), False) @ignore_warnings(category=RemovedInDjango51Warning) @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.SHA1PasswordHasher"] ) def test_sha1(self): encoded = make_password("lètmein", "seasalt", "sha1") self.assertEqual( encoded, "sha1$seasalt$cff36ea83f5706ce9aa7454e63e431fc726b2dc8" ) self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password("lètmein", encoded)) self.assertFalse(check_password("lètmeinz", encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "sha1") # Blank passwords blank_encoded = make_password("", "seasalt", "sha1") self.assertTrue(blank_encoded.startswith("sha1$")) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password("", blank_encoded)) self.assertFalse(check_password(" ", blank_encoded)) # Salt entropy check. hasher = get_hasher("sha1") encoded_weak_salt = make_password("lètmein", "iodizedsalt", "sha1") encoded_strong_salt = make_password("lètmein", hasher.salt(), "sha1") self.assertIs(hasher.must_update(encoded_weak_salt), True) self.assertIs(hasher.must_update(encoded_strong_salt), False) @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.SHA1PasswordHasher"] ) def test_sha1_deprecation_warning(self): msg = "django.contrib.auth.hashers.SHA1PasswordHasher is deprecated." with self.assertRaisesMessage(RemovedInDjango51Warning, msg): get_hasher("sha1") @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.MD5PasswordHasher"] ) def test_md5(self): encoded = make_password("lètmein", "seasalt", "md5") self.assertEqual(encoded, "md5$seasalt$3f86d0d3d465b7b458c231bf3555c0e3") self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password("lètmein", encoded)) self.assertFalse(check_password("lètmeinz", encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "md5") # Blank passwords blank_encoded = make_password("", "seasalt", "md5") self.assertTrue(blank_encoded.startswith("md5$")) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password("", blank_encoded)) self.assertFalse(check_password(" ", blank_encoded)) # Salt entropy check. hasher = get_hasher("md5") encoded_weak_salt = make_password("lètmein", "iodizedsalt", "md5") encoded_strong_salt = make_password("lètmein", hasher.salt(), "md5") self.assertIs(hasher.must_update(encoded_weak_salt), True) self.assertIs(hasher.must_update(encoded_strong_salt), False) @ignore_warnings(category=RemovedInDjango51Warning) @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.UnsaltedMD5PasswordHasher"] ) def test_unsalted_md5(self): encoded = make_password("lètmein", "", "unsalted_md5") self.assertEqual(encoded, "88a434c88cca4e900f7874cd98123f43") self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password("lètmein", encoded)) self.assertFalse(check_password("lètmeinz", encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_md5") # Alternate unsalted syntax alt_encoded = "md5$$%s" % encoded self.assertTrue(is_password_usable(alt_encoded)) self.assertTrue(check_password("lètmein", alt_encoded)) self.assertFalse(check_password("lètmeinz", alt_encoded)) # Blank passwords blank_encoded = make_password("", "", "unsalted_md5") self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password("", blank_encoded)) self.assertFalse(check_password(" ", blank_encoded)) @ignore_warnings(category=RemovedInDjango51Warning) @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.UnsaltedMD5PasswordHasher"] ) def test_unsalted_md5_encode_invalid_salt(self): hasher = get_hasher("unsalted_md5") msg = "salt must be empty." with self.assertRaisesMessage(ValueError, msg): hasher.encode("password", salt="salt") @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.UnsaltedMD5PasswordHasher"] ) def test_unsalted_md5_deprecation_warning(self): msg = "django.contrib.auth.hashers.UnsaltedMD5PasswordHasher is deprecated." with self.assertRaisesMessage(RemovedInDjango51Warning, msg): get_hasher("unsalted_md5") @ignore_warnings(category=RemovedInDjango51Warning) @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher"] ) def test_unsalted_sha1(self): encoded = make_password("lètmein", "", "unsalted_sha1") self.assertEqual(encoded, "sha1$$6d138ca3ae545631b3abd71a4f076ce759c5700b") self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password("lètmein", encoded)) self.assertFalse(check_password("lètmeinz", encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_sha1") # Raw SHA1 isn't acceptable alt_encoded = encoded[6:] self.assertFalse(check_password("lètmein", alt_encoded)) # Blank passwords blank_encoded = make_password("", "", "unsalted_sha1") self.assertTrue(blank_encoded.startswith("sha1$")) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password("", blank_encoded)) self.assertFalse(check_password(" ", blank_encoded)) @ignore_warnings(category=RemovedInDjango51Warning) @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher"] ) def test_unsalted_sha1_encode_invalid_salt(self): hasher = get_hasher("unsalted_sha1") msg = "salt must be empty." with self.assertRaisesMessage(ValueError, msg): hasher.encode("password", salt="salt") @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher"] ) def test_unsalted_sha1_deprecation_warning(self): msg = "django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher is deprecated." with self.assertRaisesMessage(RemovedInDjango51Warning, msg): get_hasher("unsalted_sha1") @ignore_warnings(category=RemovedInDjango50Warning) @skipUnless(crypt, "no crypt module to generate password.") @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.CryptPasswordHasher"] ) def test_crypt(self): encoded = make_password("lètmei", "ab", "crypt") self.assertEqual(encoded, "crypt$$ab1Hv2Lg7ltQo") self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password("lètmei", encoded)) self.assertFalse(check_password("lètmeiz", encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "crypt") # Blank passwords blank_encoded = make_password("", "ab", "crypt") self.assertTrue(blank_encoded.startswith("crypt$")) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password("", blank_encoded)) self.assertFalse(check_password(" ", blank_encoded)) @ignore_warnings(category=RemovedInDjango50Warning) @skipUnless(crypt, "no crypt module to generate password.") @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.CryptPasswordHasher"] ) def test_crypt_encode_invalid_salt(self): hasher = get_hasher("crypt") msg = "salt must be of length 2." with self.assertRaisesMessage(ValueError, msg): hasher.encode("password", salt="a") @ignore_warnings(category=RemovedInDjango50Warning) @skipUnless(crypt, "no crypt module to generate password.") @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.CryptPasswordHasher"] ) def test_crypt_encode_invalid_hash(self): hasher = get_hasher("crypt") msg = "hash must be provided." with mock.patch("crypt.crypt", return_value=None): with self.assertRaisesMessage(TypeError, msg): hasher.encode("password", salt="ab") @skipUnless(crypt, "no crypt module to generate password.") @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.CryptPasswordHasher"] ) def test_crypt_deprecation_warning(self): msg = "django.contrib.auth.hashers.CryptPasswordHasher is deprecated." with self.assertRaisesMessage(RemovedInDjango50Warning, msg): get_hasher("crypt") @skipUnless(bcrypt, "bcrypt not installed") def test_bcrypt_sha256(self): encoded = make_password("lètmein", hasher="bcrypt_sha256") self.assertTrue(is_password_usable(encoded)) self.assertTrue(encoded.startswith("bcrypt_sha256$")) self.assertTrue(check_password("lètmein", encoded)) self.assertFalse(check_password("lètmeinz", encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt_sha256") # password truncation no longer works password = ( "VSK0UYV6FFQVZ0KG88DYN9WADAADZO1CTSIVDJUNZSUML6IBX7LN7ZS3R5" "JGB3RGZ7VI7G7DJQ9NI8BQFSRPTG6UWTTVESA5ZPUN" ) encoded = make_password(password, hasher="bcrypt_sha256") self.assertTrue(check_password(password, encoded)) self.assertFalse(check_password(password[:72], encoded)) # Blank passwords blank_encoded = make_password("", hasher="bcrypt_sha256") self.assertTrue(blank_encoded.startswith("bcrypt_sha256$")) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password("", blank_encoded)) self.assertFalse(check_password(" ", blank_encoded)) @skipUnless(bcrypt, "bcrypt not installed") @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.BCryptPasswordHasher"] ) def test_bcrypt(self): encoded = make_password("lètmein", hasher="bcrypt") self.assertTrue(is_password_usable(encoded)) self.assertTrue(encoded.startswith("bcrypt$")) self.assertTrue(check_password("lètmein", encoded)) self.assertFalse(check_password("lètmeinz", encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt") # Blank passwords blank_encoded = make_password("", hasher="bcrypt") self.assertTrue(blank_encoded.startswith("bcrypt$")) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password("", blank_encoded)) self.assertFalse(check_password(" ", blank_encoded)) @skipUnless(bcrypt, "bcrypt not installed") @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.BCryptPasswordHasher"] ) def test_bcrypt_upgrade(self): hasher = get_hasher("bcrypt") self.assertEqual("bcrypt", hasher.algorithm) self.assertNotEqual(hasher.rounds, 4) old_rounds = hasher.rounds try: # Generate a password with 4 rounds. hasher.rounds = 4 encoded = make_password("letmein", hasher="bcrypt") rounds = hasher.safe_summary(encoded)["work factor"] self.assertEqual(rounds, 4) state = {"upgraded": False} def setter(password): state["upgraded"] = True # No upgrade is triggered. self.assertTrue(check_password("letmein", encoded, setter, "bcrypt")) self.assertFalse(state["upgraded"]) # Revert to the old rounds count and ... hasher.rounds = old_rounds # ... check if the password would get updated to the new count. self.assertTrue(check_password("letmein", encoded, setter, "bcrypt")) self.assertTrue(state["upgraded"]) finally: hasher.rounds = old_rounds @skipUnless(bcrypt, "bcrypt not installed") @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.BCryptPasswordHasher"] ) def test_bcrypt_harden_runtime(self): hasher = get_hasher("bcrypt") self.assertEqual("bcrypt", hasher.algorithm) with mock.patch.object(hasher, "rounds", 4): encoded = make_password("letmein", hasher="bcrypt") with mock.patch.object(hasher, "rounds", 6), mock.patch.object( hasher, "encode", side_effect=hasher.encode ): hasher.harden_runtime("wrong_password", encoded) # Increasing rounds from 4 to 6 means an increase of 4 in workload, # therefore hardening should run 3 times to make the timing the # same (the original encode() call already ran once). self.assertEqual(hasher.encode.call_count, 3) # Get the original salt (includes the original workload factor) algorithm, data = encoded.split("$", 1) expected_call = (("wrong_password", data[:29].encode()),) self.assertEqual(hasher.encode.call_args_list, [expected_call] * 3) def test_unusable(self): encoded = make_password(None) self.assertEqual( len(encoded), len(UNUSABLE_PASSWORD_PREFIX) + UNUSABLE_PASSWORD_SUFFIX_LENGTH, ) self.assertFalse(is_password_usable(encoded)) self.assertFalse(check_password(None, encoded)) self.assertFalse(check_password(encoded, encoded)) self.assertFalse(check_password(UNUSABLE_PASSWORD_PREFIX, encoded)) self.assertFalse(check_password("", encoded)) self.assertFalse(check_password("lètmein", encoded)) self.assertFalse(check_password("lètmeinz", encoded)) with self.assertRaisesMessage(ValueError, "Unknown password hashing algorith"): identify_hasher(encoded) # Assert that the unusable passwords actually contain a random part. # This might fail one day due to a hash collision. self.assertNotEqual(encoded, make_password(None), "Random password collision?") def test_unspecified_password(self): """ Makes sure specifying no plain password with a valid encoded password returns `False`. """ self.assertFalse(check_password(None, make_password("lètmein"))) def test_bad_algorithm(self): msg = ( "Unknown password hashing algorithm '%s'. Did you specify it in " "the PASSWORD_HASHERS setting?" ) with self.assertRaisesMessage(ValueError, msg % "lolcat"): make_password("lètmein", hasher="lolcat") with self.assertRaisesMessage(ValueError, msg % "lolcat"): identify_hasher("lolcat$salt$hash") def test_is_password_usable(self): passwords = ("lètmein_badencoded", "", None) for password in passwords: with self.subTest(password=password): self.assertIs(is_password_usable(password), True) def test_low_level_pbkdf2(self): hasher = PBKDF2PasswordHasher() encoded = hasher.encode("lètmein", "seasalt2") self.assertEqual( encoded, "pbkdf2_sha256$480000$seasalt2$WlORJKPl5w3Lubr7rYLOwSQCEOm4Or/NCA" "aECnB1PE0=", ) self.assertTrue(hasher.verify("lètmein", encoded)) def test_low_level_pbkdf2_sha1(self): hasher = PBKDF2SHA1PasswordHasher() encoded = hasher.encode("lètmein", "seasalt2") self.assertEqual( encoded, "pbkdf2_sha1$480000$seasalt2$qyT+EkK5g82hk2r+fRecFeoe28E=" ) self.assertTrue(hasher.verify("lètmein", encoded)) @skipUnless(bcrypt, "bcrypt not installed") def test_bcrypt_salt_check(self): hasher = BCryptPasswordHasher() encoded = hasher.encode("lètmein", hasher.salt()) self.assertIs(hasher.must_update(encoded), False) @skipUnless(bcrypt, "bcrypt not installed") def test_bcryptsha256_salt_check(self): hasher = BCryptSHA256PasswordHasher() encoded = hasher.encode("lètmein", hasher.salt()) self.assertIs(hasher.must_update(encoded), False) @override_settings( PASSWORD_HASHERS=[ "django.contrib.auth.hashers.PBKDF2PasswordHasher", "django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher", "django.contrib.auth.hashers.MD5PasswordHasher", ], ) def test_upgrade(self): self.assertEqual("pbkdf2_sha256", get_hasher("default").algorithm) for algo in ("pbkdf2_sha1", "md5"): with self.subTest(algo=algo): encoded = make_password("lètmein", hasher=algo) state = {"upgraded": False} def setter(password): state["upgraded"] = True self.assertTrue(check_password("lètmein", encoded, setter)) self.assertTrue(state["upgraded"]) def test_no_upgrade(self): encoded = make_password("lètmein") state = {"upgraded": False} def setter(): state["upgraded"] = True self.assertFalse(check_password("WRONG", encoded, setter)) self.assertFalse(state["upgraded"]) @override_settings( PASSWORD_HASHERS=[ "django.contrib.auth.hashers.PBKDF2PasswordHasher", "django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher", "django.contrib.auth.hashers.MD5PasswordHasher", ], ) def test_no_upgrade_on_incorrect_pass(self): self.assertEqual("pbkdf2_sha256", get_hasher("default").algorithm) for algo in ("pbkdf2_sha1", "md5"): with self.subTest(algo=algo): encoded = make_password("lètmein", hasher=algo) state = {"upgraded": False} def setter(): state["upgraded"] = True self.assertFalse(check_password("WRONG", encoded, setter)) self.assertFalse(state["upgraded"]) def test_pbkdf2_upgrade(self): hasher = get_hasher("default") self.assertEqual("pbkdf2_sha256", hasher.algorithm) self.assertNotEqual(hasher.iterations, 1) old_iterations = hasher.iterations try: # Generate a password with 1 iteration. hasher.iterations = 1 encoded = make_password("letmein") algo, iterations, salt, hash = encoded.split("$", 3) self.assertEqual(iterations, "1") state = {"upgraded": False} def setter(password): state["upgraded"] = True # No upgrade is triggered self.assertTrue(check_password("letmein", encoded, setter)) self.assertFalse(state["upgraded"]) # Revert to the old iteration count and ... hasher.iterations = old_iterations # ... check if the password would get updated to the new iteration count. self.assertTrue(check_password("letmein", encoded, setter)) self.assertTrue(state["upgraded"]) finally: hasher.iterations = old_iterations def test_pbkdf2_harden_runtime(self): hasher = get_hasher("default") self.assertEqual("pbkdf2_sha256", hasher.algorithm) with mock.patch.object(hasher, "iterations", 1): encoded = make_password("letmein") with mock.patch.object(hasher, "iterations", 6), mock.patch.object( hasher, "encode", side_effect=hasher.encode ): hasher.harden_runtime("wrong_password", encoded) # Encode should get called once ... self.assertEqual(hasher.encode.call_count, 1) # ... with the original salt and 5 iterations. algorithm, iterations, salt, hash = encoded.split("$", 3) expected_call = (("wrong_password", salt, 5),) self.assertEqual(hasher.encode.call_args, expected_call) def test_pbkdf2_upgrade_new_hasher(self): hasher = get_hasher("default") self.assertEqual("pbkdf2_sha256", hasher.algorithm) self.assertNotEqual(hasher.iterations, 1) state = {"upgraded": False} def setter(password): state["upgraded"] = True with self.settings( PASSWORD_HASHERS=["auth_tests.test_hashers.PBKDF2SingleIterationHasher"] ): encoded = make_password("letmein") algo, iterations, salt, hash = encoded.split("$", 3) self.assertEqual(iterations, "1") # No upgrade is triggered self.assertTrue(check_password("letmein", encoded, setter)) self.assertFalse(state["upgraded"]) # Revert to the old iteration count and check if the password would get # updated to the new iteration count. with self.settings( PASSWORD_HASHERS=[ "django.contrib.auth.hashers.PBKDF2PasswordHasher", "auth_tests.test_hashers.PBKDF2SingleIterationHasher", ] ): self.assertTrue(check_password("letmein", encoded, setter)) self.assertTrue(state["upgraded"]) def test_check_password_calls_harden_runtime(self): hasher = get_hasher("default") encoded = make_password("letmein") with mock.patch.object(hasher, "harden_runtime"), mock.patch.object( hasher, "must_update", return_value=True ): # Correct password supplied, no hardening needed check_password("letmein", encoded) self.assertEqual(hasher.harden_runtime.call_count, 0) # Wrong password supplied, hardening needed check_password("wrong_password", encoded) self.assertEqual(hasher.harden_runtime.call_count, 1) def test_encode_invalid_salt(self): hasher_classes = [ MD5PasswordHasher, PBKDF2PasswordHasher, PBKDF2SHA1PasswordHasher, ScryptPasswordHasher, ] msg = "salt must be provided and cannot contain $." for hasher_class in hasher_classes: hasher = hasher_class() for salt in [None, "", "sea$salt"]: with self.subTest(hasher_class.__name__, salt=salt): with self.assertRaisesMessage(ValueError, msg): hasher.encode("password", salt) def test_encode_password_required(self): hasher_classes = [ MD5PasswordHasher, PBKDF2PasswordHasher, PBKDF2SHA1PasswordHasher, ScryptPasswordHasher, ] msg = "password must be provided." for hasher_class in hasher_classes: hasher = hasher_class() with self.subTest(hasher_class.__name__): with self.assertRaisesMessage(TypeError, msg): hasher.encode(None, "seasalt") class BasePasswordHasherTests(SimpleTestCase): not_implemented_msg = "subclasses of BasePasswordHasher must provide %s() method" def setUp(self): self.hasher = BasePasswordHasher() def test_load_library_no_algorithm(self): msg = "Hasher 'BasePasswordHasher' doesn't specify a library attribute" with self.assertRaisesMessage(ValueError, msg): self.hasher._load_library() def test_load_library_importerror(self): PlainHasher = type( "PlainHasher", (BasePasswordHasher,), {"algorithm": "plain", "library": "plain"}, ) msg = "Couldn't load 'PlainHasher' algorithm library: No module named 'plain'" with self.assertRaisesMessage(ValueError, msg): PlainHasher()._load_library() def test_attributes(self): self.assertIsNone(self.hasher.algorithm) self.assertIsNone(self.hasher.library) def test_encode(self): msg = self.not_implemented_msg % "an encode" with self.assertRaisesMessage(NotImplementedError, msg): self.hasher.encode("password", "salt") def test_decode(self): msg = self.not_implemented_msg % "a decode" with self.assertRaisesMessage(NotImplementedError, msg): self.hasher.decode("encoded") def test_harden_runtime(self): msg = ( "subclasses of BasePasswordHasher should provide a harden_runtime() method" ) with self.assertWarnsMessage(Warning, msg): self.hasher.harden_runtime("password", "encoded") def test_must_update(self): self.assertIs(self.hasher.must_update("encoded"), False) def test_safe_summary(self): msg = self.not_implemented_msg % "a safe_summary" with self.assertRaisesMessage(NotImplementedError, msg): self.hasher.safe_summary("encoded") def test_verify(self): msg = self.not_implemented_msg % "a verify" with self.assertRaisesMessage(NotImplementedError, msg): self.hasher.verify("password", "encoded") @skipUnless(argon2, "argon2-cffi not installed") @override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS) class TestUtilsHashPassArgon2(SimpleTestCase): def test_argon2(self): encoded = make_password("lètmein", hasher="argon2") self.assertTrue(is_password_usable(encoded)) self.assertTrue(encoded.startswith("argon2$argon2id$")) self.assertTrue(check_password("lètmein", encoded)) self.assertFalse(check_password("lètmeinz", encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "argon2") # Blank passwords blank_encoded = make_password("", hasher="argon2") self.assertTrue(blank_encoded.startswith("argon2$argon2id$")) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password("", blank_encoded)) self.assertFalse(check_password(" ", blank_encoded)) # Old hashes without version attribute encoded = ( "argon2$argon2i$m=8,t=1,p=1$c29tZXNhbHQ$gwQOXSNhxiOxPOA0+PY10P9QFO" "4NAYysnqRt1GSQLE55m+2GYDt9FEjPMHhP2Cuf0nOEXXMocVrsJAtNSsKyfg" ) self.assertTrue(check_password("secret", encoded)) self.assertFalse(check_password("wrong", encoded)) # Old hashes with version attribute. encoded = "argon2$argon2i$v=19$m=8,t=1,p=1$c2FsdHNhbHQ$YC9+jJCrQhs5R6db7LlN8Q" self.assertIs(check_password("secret", encoded), True) self.assertIs(check_password("wrong", encoded), False) # Salt entropy check. hasher = get_hasher("argon2") encoded_weak_salt = make_password("lètmein", "iodizedsalt", "argon2") encoded_strong_salt = make_password("lètmein", hasher.salt(), "argon2") self.assertIs(hasher.must_update(encoded_weak_salt), True) self.assertIs(hasher.must_update(encoded_strong_salt), False) def test_argon2_decode(self): salt = "abcdefghijk" encoded = make_password("lètmein", salt=salt, hasher="argon2") hasher = get_hasher("argon2") decoded = hasher.decode(encoded) self.assertEqual(decoded["memory_cost"], hasher.memory_cost) self.assertEqual(decoded["parallelism"], hasher.parallelism) self.assertEqual(decoded["salt"], salt) self.assertEqual(decoded["time_cost"], hasher.time_cost) def test_argon2_upgrade(self): self._test_argon2_upgrade("time_cost", "time cost", 1) self._test_argon2_upgrade("memory_cost", "memory cost", 64) self._test_argon2_upgrade("parallelism", "parallelism", 1) def test_argon2_version_upgrade(self): hasher = get_hasher("argon2") state = {"upgraded": False} encoded = ( "argon2$argon2id$v=19$m=102400,t=2,p=8$Y041dExhNkljRUUy$TMa6A8fPJh" "CAUXRhJXCXdw" ) def setter(password): state["upgraded"] = True old_m = hasher.memory_cost old_t = hasher.time_cost old_p = hasher.parallelism try: hasher.memory_cost = 8 hasher.time_cost = 1 hasher.parallelism = 1 self.assertTrue(check_password("secret", encoded, setter, "argon2")) self.assertTrue(state["upgraded"]) finally: hasher.memory_cost = old_m hasher.time_cost = old_t hasher.parallelism = old_p def _test_argon2_upgrade(self, attr, summary_key, new_value): hasher = get_hasher("argon2") self.assertEqual("argon2", hasher.algorithm) self.assertNotEqual(getattr(hasher, attr), new_value) old_value = getattr(hasher, attr) try: # Generate hash with attr set to 1 setattr(hasher, attr, new_value) encoded = make_password("letmein", hasher="argon2") attr_value = hasher.safe_summary(encoded)[summary_key] self.assertEqual(attr_value, new_value) state = {"upgraded": False} def setter(password): state["upgraded"] = True # No upgrade is triggered. self.assertTrue(check_password("letmein", encoded, setter, "argon2")) self.assertFalse(state["upgraded"]) # Revert to the old rounds count and ... setattr(hasher, attr, old_value) # ... check if the password would get updated to the new count. self.assertTrue(check_password("letmein", encoded, setter, "argon2")) self.assertTrue(state["upgraded"]) finally: setattr(hasher, attr, old_value) @skipUnless(scrypt, "scrypt not available") @override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS) class TestUtilsHashPassScrypt(SimpleTestCase): def test_scrypt(self): encoded = make_password("lètmein", "seasalt", "scrypt") self.assertEqual( encoded, "scrypt$16384$seasalt$8$1$Qj3+9PPyRjSJIebHnG81TMjsqtaIGxNQG/aEB/NY" "afTJ7tibgfYz71m0ldQESkXFRkdVCBhhY8mx7rQwite/Pw==", ) self.assertIs(is_password_usable(encoded), True) self.assertIs(check_password("lètmein", encoded), True) self.assertIs(check_password("lètmeinz", encoded), False) self.assertEqual(identify_hasher(encoded).algorithm, "scrypt") # Blank passwords. blank_encoded = make_password("", "seasalt", "scrypt") self.assertIs(blank_encoded.startswith("scrypt$"), True) self.assertIs(is_password_usable(blank_encoded), True) self.assertIs(check_password("", blank_encoded), True) self.assertIs(check_password(" ", blank_encoded), False) def test_scrypt_decode(self): encoded = make_password("lètmein", "seasalt", "scrypt") hasher = get_hasher("scrypt") decoded = hasher.decode(encoded) tests = [ ("block_size", hasher.block_size), ("parallelism", hasher.parallelism), ("salt", "seasalt"), ("work_factor", hasher.work_factor), ] for key, excepted in tests: with self.subTest(key=key): self.assertEqual(decoded[key], excepted) def _test_scrypt_upgrade(self, attr, summary_key, new_value): hasher = get_hasher("scrypt") self.assertEqual(hasher.algorithm, "scrypt") self.assertNotEqual(getattr(hasher, attr), new_value) old_value = getattr(hasher, attr) try: # Generate hash with attr set to the new value. setattr(hasher, attr, new_value) encoded = make_password("lètmein", "seasalt", "scrypt") attr_value = hasher.safe_summary(encoded)[summary_key] self.assertEqual(attr_value, new_value) state = {"upgraded": False} def setter(password): state["upgraded"] = True # No update is triggered. self.assertIs(check_password("lètmein", encoded, setter, "scrypt"), True) self.assertIs(state["upgraded"], False) # Revert to the old value. setattr(hasher, attr, old_value) # Password is updated. self.assertIs(check_password("lètmein", encoded, setter, "scrypt"), True) self.assertIs(state["upgraded"], True) finally: setattr(hasher, attr, old_value) def test_scrypt_upgrade(self): tests = [ ("work_factor", "work factor", 2**11), ("block_size", "block size", 10), ("parallelism", "parallelism", 2), ] for attr, summary_key, new_value in tests: with self.subTest(attr=attr): self._test_scrypt_upgrade(attr, summary_key, new_value)
fca0931714ec3c4b049617f1fd78971d4776163aac616872a86c6191671bf712
import errno import gzip import os import struct import tempfile import unittest from io import BytesIO, StringIO, TextIOWrapper from pathlib import Path from unittest import mock from django.core.files import File, locks from django.core.files.base import ContentFile from django.core.files.move import file_move_safe from django.core.files.temp import NamedTemporaryFile from django.core.files.uploadedfile import ( InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile, UploadedFile, ) from django.test import override_settings try: from PIL import Image, features HAS_WEBP = features.check("webp") except ImportError: Image = None HAS_WEBP = False else: from django.core.files import images class FileTests(unittest.TestCase): def test_unicode_uploadedfile_name(self): uf = UploadedFile(name="¿Cómo?", content_type="text") self.assertIs(type(repr(uf)), str) def test_unicode_file_name(self): f = File(None, "djángö") self.assertIs(type(repr(f)), str) def test_context_manager(self): orig_file = tempfile.TemporaryFile() base_file = File(orig_file) with base_file as f: self.assertIs(base_file, f) self.assertFalse(f.closed) self.assertTrue(f.closed) self.assertTrue(orig_file.closed) def test_open_resets_opened_file_to_start_and_returns_context_manager(self): file = File(BytesIO(b"content")) file.read() with file.open() as f: self.assertEqual(f.read(), b"content") def test_open_reopens_closed_file_and_returns_context_manager(self): temporary_file = tempfile.NamedTemporaryFile(delete=False) file = File(temporary_file) try: file.close() with file.open() as f: self.assertFalse(f.closed) finally: # remove temporary file os.unlink(file.name) def test_namedtemporaryfile_closes(self): """ The symbol django.core.files.NamedTemporaryFile is assigned as a different class on different operating systems. In any case, the result should minimally mock some of the API of tempfile.NamedTemporaryFile from the Python standard library. """ tempfile = NamedTemporaryFile() self.assertTrue(hasattr(tempfile, "closed")) self.assertFalse(tempfile.closed) tempfile.close() self.assertTrue(tempfile.closed) def test_file_mode(self): # Should not set mode to None if it is not present. # See #14681, stdlib gzip module crashes if mode is set to None file = SimpleUploadedFile("mode_test.txt", b"content") self.assertFalse(hasattr(file, "mode")) gzip.GzipFile(fileobj=file) def test_file_iteration(self): """ File objects should yield lines when iterated over. Refs #22107. """ file = File(BytesIO(b"one\ntwo\nthree")) self.assertEqual(list(file), [b"one\n", b"two\n", b"three"]) def test_file_iteration_windows_newlines(self): """ #8149 - File objects with \r\n line endings should yield lines when iterated over. """ f = File(BytesIO(b"one\r\ntwo\r\nthree")) self.assertEqual(list(f), [b"one\r\n", b"two\r\n", b"three"]) def test_file_iteration_mac_newlines(self): """ #8149 - File objects with \r line endings should yield lines when iterated over. """ f = File(BytesIO(b"one\rtwo\rthree")) self.assertEqual(list(f), [b"one\r", b"two\r", b"three"]) def test_file_iteration_mixed_newlines(self): f = File(BytesIO(b"one\rtwo\nthree\r\nfour")) self.assertEqual(list(f), [b"one\r", b"two\n", b"three\r\n", b"four"]) def test_file_iteration_with_unix_newline_at_chunk_boundary(self): f = File(BytesIO(b"one\ntwo\nthree")) # Set chunk size to create a boundary after \n: # b'one\n... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b"one\n", b"two\n", b"three"]) def test_file_iteration_with_windows_newline_at_chunk_boundary(self): f = File(BytesIO(b"one\r\ntwo\r\nthree")) # Set chunk size to create a boundary between \r and \n: # b'one\r\n... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b"one\r\n", b"two\r\n", b"three"]) def test_file_iteration_with_mac_newline_at_chunk_boundary(self): f = File(BytesIO(b"one\rtwo\rthree")) # Set chunk size to create a boundary after \r: # b'one\r... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b"one\r", b"two\r", b"three"]) def test_file_iteration_with_text(self): f = File(StringIO("one\ntwo\nthree")) self.assertEqual(list(f), ["one\n", "two\n", "three"]) def test_readable(self): with tempfile.TemporaryFile() as temp, File( temp, name="something.txt" ) as test_file: self.assertTrue(test_file.readable()) self.assertFalse(test_file.readable()) def test_writable(self): with tempfile.TemporaryFile() as temp, File( temp, name="something.txt" ) as test_file: self.assertTrue(test_file.writable()) self.assertFalse(test_file.writable()) with tempfile.TemporaryFile("rb") as temp, File( temp, name="something.txt" ) as test_file: self.assertFalse(test_file.writable()) def test_seekable(self): with tempfile.TemporaryFile() as temp, File( temp, name="something.txt" ) as test_file: self.assertTrue(test_file.seekable()) self.assertFalse(test_file.seekable()) def test_io_wrapper(self): content = "vive l'été\n" with tempfile.TemporaryFile() as temp, File( temp, name="something.txt" ) as test_file: test_file.write(content.encode()) test_file.seek(0) wrapper = TextIOWrapper(test_file, "utf-8", newline="\n") self.assertEqual(wrapper.read(), content) wrapper.write(content) wrapper.seek(0) self.assertEqual(wrapper.read(), content * 2) test_file = wrapper.detach() test_file.seek(0) self.assertEqual(test_file.read(), (content * 2).encode()) def test_exclusive_lock(self): file_path = Path(__file__).parent / "test.png" with open(file_path) as f1, open(file_path) as f2: self.assertIs(locks.lock(f1, locks.LOCK_EX), True) self.assertIs(locks.lock(f2, locks.LOCK_EX | locks.LOCK_NB), False) self.assertIs(locks.lock(f2, locks.LOCK_SH | locks.LOCK_NB), False) self.assertIs(locks.unlock(f1), True) def test_shared_lock(self): file_path = Path(__file__).parent / "test.png" with open(file_path) as f1, open(file_path) as f2: self.assertIs(locks.lock(f1, locks.LOCK_SH), True) self.assertIs(locks.lock(f2, locks.LOCK_SH | locks.LOCK_NB), True) self.assertIs(locks.unlock(f1), True) self.assertIs(locks.unlock(f2), True) class NoNameFileTestCase(unittest.TestCase): """ Other examples of unnamed files may be tempfile.SpooledTemporaryFile or urllib.urlopen() """ def test_noname_file_default_name(self): self.assertIsNone(File(BytesIO(b"A file with no name")).name) def test_noname_file_get_size(self): self.assertEqual(File(BytesIO(b"A file with no name")).size, 19) class ContentFileTestCase(unittest.TestCase): def test_content_file_default_name(self): self.assertIsNone(ContentFile(b"content").name) def test_content_file_custom_name(self): """ The constructor of ContentFile accepts 'name' (#16590). """ name = "I can have a name too!" self.assertEqual(ContentFile(b"content", name=name).name, name) def test_content_file_input_type(self): """ ContentFile can accept both bytes and strings and the retrieved content is of the same type. """ self.assertIsInstance(ContentFile(b"content").read(), bytes) self.assertIsInstance(ContentFile("español").read(), str) def test_open_resets_file_to_start_and_returns_context_manager(self): file = ContentFile(b"content") with file.open() as f: self.assertEqual(f.read(), b"content") with file.open() as f: self.assertEqual(f.read(), b"content") def test_size_changing_after_writing(self): """ContentFile.size changes after a write().""" f = ContentFile("") self.assertEqual(f.size, 0) f.write("Test ") f.write("string") self.assertEqual(f.size, 11) with f.open() as fh: self.assertEqual(fh.read(), "Test string") class InMemoryUploadedFileTests(unittest.TestCase): def test_open_resets_file_to_start_and_returns_context_manager(self): uf = InMemoryUploadedFile(StringIO("1"), "", "test", "text/plain", 1, "utf8") uf.read() with uf.open() as f: self.assertEqual(f.read(), "1") class TemporaryUploadedFileTests(unittest.TestCase): def test_extension_kept(self): """The temporary file name has the same suffix as the original file.""" with TemporaryUploadedFile("test.txt", "text/plain", 1, "utf8") as temp_file: self.assertTrue(temp_file.file.name.endswith(".upload.txt")) def test_file_upload_temp_dir_pathlib(self): with tempfile.TemporaryDirectory() as tmp_dir: with override_settings(FILE_UPLOAD_TEMP_DIR=Path(tmp_dir)): with TemporaryUploadedFile( "test.txt", "text/plain", 1, "utf-8" ) as temp_file: self.assertTrue(os.path.exists(temp_file.file.name)) class DimensionClosingBug(unittest.TestCase): """ get_image_dimensions() properly closes files (#8817) """ @unittest.skipUnless(Image, "Pillow not installed") def test_not_closing_of_files(self): """ Open files passed into get_image_dimensions() should stay opened. """ empty_io = BytesIO() try: images.get_image_dimensions(empty_io) finally: self.assertTrue(not empty_io.closed) @unittest.skipUnless(Image, "Pillow not installed") def test_closing_of_filenames(self): """ get_image_dimensions() called with a filename should closed the file. """ # We need to inject a modified open() builtin into the images module # that checks if the file was closed properly if the function is # called with a filename instead of a file object. # get_image_dimensions will call our catching_open instead of the # regular builtin one. class FileWrapper: _closed = [] def __init__(self, f): self.f = f def __getattr__(self, name): return getattr(self.f, name) def close(self): self._closed.append(True) self.f.close() def catching_open(*args): return FileWrapper(open(*args)) images.open = catching_open try: images.get_image_dimensions( os.path.join(os.path.dirname(__file__), "test1.png") ) finally: del images.open self.assertTrue(FileWrapper._closed) class InconsistentGetImageDimensionsBug(unittest.TestCase): """ get_image_dimensions() works properly after various calls using a file handler (#11158) """ @unittest.skipUnless(Image, "Pillow not installed") def test_multiple_calls(self): """ Multiple calls of get_image_dimensions() should return the same size. """ img_path = os.path.join(os.path.dirname(__file__), "test.png") with open(img_path, "rb") as fh: image = images.ImageFile(fh) image_pil = Image.open(fh) size_1 = images.get_image_dimensions(image) size_2 = images.get_image_dimensions(image) self.assertEqual(image_pil.size, size_1) self.assertEqual(size_1, size_2) @unittest.skipUnless(Image, "Pillow not installed") def test_bug_19457(self): """ Regression test for #19457 get_image_dimensions() fails on some PNGs, while Image.size is working good on them. """ img_path = os.path.join(os.path.dirname(__file__), "magic.png") size = images.get_image_dimensions(img_path) with open(img_path, "rb") as fh: self.assertEqual(size, Image.open(fh).size) @unittest.skipUnless(Image, "Pillow not installed") class GetImageDimensionsTests(unittest.TestCase): def test_invalid_image(self): """ get_image_dimensions() should return (None, None) for the dimensions of invalid images (#24441). brokenimg.png is not a valid image and it has been generated by: $ echo "123" > brokenimg.png """ img_path = os.path.join(os.path.dirname(__file__), "brokenimg.png") with open(img_path, "rb") as fh: size = images.get_image_dimensions(fh) self.assertEqual(size, (None, None)) def test_valid_image(self): """ get_image_dimensions() should catch struct.error while feeding the PIL Image parser (#24544). Emulates the Parser feed error. Since the error is raised on every feed attempt, the resulting image size should be invalid: (None, None). """ img_path = os.path.join(os.path.dirname(__file__), "test.png") with mock.patch("PIL.ImageFile.Parser.feed", side_effect=struct.error): with open(img_path, "rb") as fh: size = images.get_image_dimensions(fh) self.assertEqual(size, (None, None)) def test_missing_file(self): size = images.get_image_dimensions("missing.png") self.assertEqual(size, (None, None)) @unittest.skipUnless(HAS_WEBP, "WEBP not installed") def test_webp(self): img_path = os.path.join(os.path.dirname(__file__), "test.webp") with open(img_path, "rb") as fh: size = images.get_image_dimensions(fh) self.assertEqual(size, (540, 405)) class FileMoveSafeTests(unittest.TestCase): def test_file_move_overwrite(self): handle_a, self.file_a = tempfile.mkstemp() handle_b, self.file_b = tempfile.mkstemp() # file_move_safe() raises OSError if the destination file exists and # allow_overwrite is False. with self.assertRaises(FileExistsError): file_move_safe(self.file_a, self.file_b, allow_overwrite=False) # should allow it and continue on if allow_overwrite is True self.assertIsNone( file_move_safe(self.file_a, self.file_b, allow_overwrite=True) ) os.close(handle_a) os.close(handle_b) def test_file_move_permissionerror(self): """ file_move_safe() ignores PermissionError thrown by copystat() and copymode(). For example, PermissionError can be raised when the destination filesystem is CIFS, or when relabel is disabled by SELinux across filesystems. """ permission_error = PermissionError(errno.EPERM, "msg") os_error = OSError("msg") handle_a, self.file_a = tempfile.mkstemp() handle_b, self.file_b = tempfile.mkstemp() handle_c, self.file_c = tempfile.mkstemp() try: # This exception is required to reach the copystat() call in # file_safe_move(). with mock.patch("django.core.files.move.os.rename", side_effect=OSError()): # An error besides PermissionError isn't ignored. with mock.patch( "django.core.files.move.copystat", side_effect=os_error ): with self.assertRaises(OSError): file_move_safe(self.file_a, self.file_b, allow_overwrite=True) # When copystat() throws PermissionError, copymode() error besides # PermissionError isn't ignored. with mock.patch( "django.core.files.move.copystat", side_effect=permission_error ): with mock.patch( "django.core.files.move.copymode", side_effect=os_error ): with self.assertRaises(OSError): file_move_safe( self.file_a, self.file_b, allow_overwrite=True ) # PermissionError raised by copystat() is ignored. with mock.patch( "django.core.files.move.copystat", side_effect=permission_error ): self.assertIsNone( file_move_safe(self.file_a, self.file_b, allow_overwrite=True) ) # PermissionError raised by copymode() is ignored too. with mock.patch( "django.core.files.move.copymode", side_effect=permission_error ): self.assertIsNone( file_move_safe( self.file_c, self.file_b, allow_overwrite=True ) ) finally: os.close(handle_a) os.close(handle_b) os.close(handle_c) class SpooledTempTests(unittest.TestCase): def test_in_memory_spooled_temp(self): with tempfile.SpooledTemporaryFile() as temp: temp.write(b"foo bar baz quux\n") django_file = File(temp, name="something.txt") self.assertEqual(django_file.size, 17) def test_written_spooled_temp(self): with tempfile.SpooledTemporaryFile(max_size=4) as temp: temp.write(b"foo bar baz quux\n") django_file = File(temp, name="something.txt") self.assertEqual(django_file.size, 17)
d2be3106b5261c0a99aa860bde14655d25b7b7b780a1d99fc64d716525855afa
import unittest from io import StringIO from django.db import connection from django.test import TestCase from django.test.runner import DiscoverRunner from django.utils.version import PY311 from .models import Person @unittest.skipUnless( connection.vendor == "sqlite", "Only run on sqlite so we can check output SQL." ) class TestDebugSQL(unittest.TestCase): class PassingTest(TestCase): def runTest(self): Person.objects.filter(first_name="pass").count() class FailingTest(TestCase): def runTest(self): Person.objects.filter(first_name="fail").count() self.fail() class ErrorTest(TestCase): def runTest(self): Person.objects.filter(first_name="error").count() raise Exception class ErrorSetUpTestDataTest(TestCase): @classmethod def setUpTestData(cls): raise Exception def runTest(self): pass class PassingSubTest(TestCase): def runTest(self): with self.subTest(): Person.objects.filter(first_name="subtest-pass").count() class FailingSubTest(TestCase): def runTest(self): with self.subTest(): Person.objects.filter(first_name="subtest-fail").count() self.fail() class ErrorSubTest(TestCase): def runTest(self): with self.subTest(): Person.objects.filter(first_name="subtest-error").count() raise Exception def _test_output(self, verbosity): runner = DiscoverRunner(debug_sql=True, verbosity=0) suite = runner.test_suite() suite.addTest(self.FailingTest()) suite.addTest(self.ErrorTest()) suite.addTest(self.PassingTest()) suite.addTest(self.PassingSubTest()) suite.addTest(self.FailingSubTest()) suite.addTest(self.ErrorSubTest()) old_config = runner.setup_databases() stream = StringIO() resultclass = runner.get_resultclass() runner.test_runner( verbosity=verbosity, stream=stream, resultclass=resultclass, ).run(suite) runner.teardown_databases(old_config) return stream.getvalue() def test_output_normal(self): full_output = self._test_output(1) for output in self.expected_outputs: self.assertIn(output, full_output) for output in self.verbose_expected_outputs: self.assertNotIn(output, full_output) def test_output_verbose(self): full_output = self._test_output(2) for output in self.expected_outputs: self.assertIn(output, full_output) for output in self.verbose_expected_outputs: self.assertIn(output, full_output) expected_outputs = [ ( """SELECT COUNT(*) AS "__count"\n""" """FROM "test_runner_person"\n""" """WHERE "test_runner_person"."first_name" = 'error';""" ), ( """SELECT COUNT(*) AS "__count"\n""" """FROM "test_runner_person"\n""" """WHERE "test_runner_person"."first_name" = 'fail';""" ), ( """SELECT COUNT(*) AS "__count"\n""" """FROM "test_runner_person"\n""" """WHERE "test_runner_person"."first_name" = 'subtest-error';""" ), ( """SELECT COUNT(*) AS "__count"\n""" """FROM "test_runner_person"\n""" """WHERE "test_runner_person"."first_name" = 'subtest-fail';""" ), ] # Python 3.11 uses fully qualified test name in the output. method_name = ".runTest" if PY311 else "" test_class_path = "test_runner.test_debug_sql.TestDebugSQL" verbose_expected_outputs = [ f"runTest ({test_class_path}.FailingTest{method_name}) ... FAIL", f"runTest ({test_class_path}.ErrorTest{method_name}) ... ERROR", f"runTest ({test_class_path}.PassingTest{method_name}) ... ok", # If there are errors/failures in subtests but not in test itself, # the status is not written. That behavior comes from Python. f"runTest ({test_class_path}.FailingSubTest{method_name}) ...", f"runTest ({test_class_path}.ErrorSubTest{method_name}) ...", ( """SELECT COUNT(*) AS "__count" """ """FROM "test_runner_person" WHERE """ """"test_runner_person"."first_name" = 'pass';""" ), ( """SELECT COUNT(*) AS "__count" """ """FROM "test_runner_person" WHERE """ """"test_runner_person"."first_name" = 'subtest-pass';""" ), ] def test_setupclass_exception(self): runner = DiscoverRunner(debug_sql=True, verbosity=0) suite = runner.test_suite() suite.addTest(self.ErrorSetUpTestDataTest()) old_config = runner.setup_databases() stream = StringIO() runner.test_runner( verbosity=0, stream=stream, resultclass=runner.get_resultclass(), ).run(suite) runner.teardown_databases(old_config) output = stream.getvalue() self.assertIn( "ERROR: setUpClass " "(test_runner.test_debug_sql.TestDebugSQL.ErrorSetUpTestDataTest)", output, )
01c19eab74e78c27c695dacc0d889b1cb612b42000d09b2c2ffd2460b30163df
""" A series of tests to establish that the command-line management tools work as advertised - especially with regards to the handling of the DJANGO_SETTINGS_MODULE and default settings.py files. """ import os import re import shutil import socket import stat import subprocess import sys import tempfile import unittest from io import StringIO from unittest import mock from django import conf, get_version from django.conf import settings from django.core.management import ( BaseCommand, CommandError, call_command, color, execute_from_command_line, ) from django.core.management.commands.loaddata import Command as LoaddataCommand from django.core.management.commands.runserver import Command as RunserverCommand from django.core.management.commands.testserver import Command as TestserverCommand from django.db import ConnectionHandler, connection from django.db.migrations.recorder import MigrationRecorder from django.test import LiveServerTestCase, SimpleTestCase, TestCase, override_settings from django.test.utils import captured_stderr, captured_stdout from django.urls import path from django.utils.version import PY39 from django.views.static import serve from . import urls custom_templates_dir = os.path.join(os.path.dirname(__file__), "custom_templates") SYSTEM_CHECK_MSG = "System check identified no issues" HAS_BLACK = shutil.which("black") class AdminScriptTestCase(SimpleTestCase): def setUp(self): tmpdir = tempfile.TemporaryDirectory() self.addCleanup(tmpdir.cleanup) # os.path.realpath() is required for temporary directories on macOS, # where `/var` is a symlink to `/private/var`. self.test_dir = os.path.realpath(os.path.join(tmpdir.name, "test_project")) os.mkdir(self.test_dir) def write_settings(self, filename, apps=None, is_dir=False, sdict=None, extra=None): if is_dir: settings_dir = os.path.join(self.test_dir, filename) os.mkdir(settings_dir) settings_file_path = os.path.join(settings_dir, "__init__.py") else: settings_file_path = os.path.join(self.test_dir, filename) with open(settings_file_path, "w") as settings_file: settings_file.write( "# Settings file automatically generated by admin_scripts test case\n" ) if extra: settings_file.write("%s\n" % extra) exports = [ "DATABASES", "DEFAULT_AUTO_FIELD", "ROOT_URLCONF", "SECRET_KEY", "USE_TZ", ] for s in exports: if hasattr(settings, s): o = getattr(settings, s) if not isinstance(o, (dict, tuple, list)): o = "'%s'" % o settings_file.write("%s = %s\n" % (s, o)) if apps is None: apps = [ "django.contrib.auth", "django.contrib.contenttypes", "admin_scripts", ] settings_file.write("INSTALLED_APPS = %s\n" % apps) if sdict: for k, v in sdict.items(): settings_file.write("%s = %s\n" % (k, v)) def _ext_backend_paths(self): """ Returns the paths for any external backend packages. """ paths = [] for backend in settings.DATABASES.values(): package = backend["ENGINE"].split(".")[0] if package != "django": backend_pkg = __import__(package) backend_dir = os.path.dirname(backend_pkg.__file__) paths.append(os.path.dirname(backend_dir)) return paths def run_test(self, args, settings_file=None, apps=None, umask=None): base_dir = os.path.dirname(self.test_dir) # The base dir for Django's tests is one level up. tests_dir = os.path.dirname(os.path.dirname(__file__)) # The base dir for Django is one level above the test dir. We don't use # `import django` to figure that out, so we don't pick up a Django # from site-packages or similar. django_dir = os.path.dirname(tests_dir) ext_backend_base_dirs = self._ext_backend_paths() # Define a temporary environment for the subprocess test_environ = os.environ.copy() # Set the test environment if settings_file: test_environ["DJANGO_SETTINGS_MODULE"] = settings_file elif "DJANGO_SETTINGS_MODULE" in test_environ: del test_environ["DJANGO_SETTINGS_MODULE"] python_path = [base_dir, django_dir, tests_dir] python_path.extend(ext_backend_base_dirs) test_environ["PYTHONPATH"] = os.pathsep.join(python_path) test_environ["PYTHONWARNINGS"] = "" p = subprocess.run( [sys.executable, *args], capture_output=True, cwd=self.test_dir, env=test_environ, text=True, # subprocess.run()'s umask was added in Python 3.9. **({"umask": umask} if umask and PY39 else {}), ) return p.stdout, p.stderr def run_django_admin(self, args, settings_file=None, umask=None): return self.run_test(["-m", "django", *args], settings_file, umask=umask) def run_manage(self, args, settings_file=None, manage_py=None): template_manage_py = ( os.path.join(os.path.dirname(__file__), manage_py) if manage_py else os.path.join( os.path.dirname(conf.__file__), "project_template", "manage.py-tpl" ) ) test_manage_py = os.path.join(self.test_dir, "manage.py") shutil.copyfile(template_manage_py, test_manage_py) with open(test_manage_py) as fp: manage_py_contents = fp.read() manage_py_contents = manage_py_contents.replace( "{{ project_name }}", "test_project" ) with open(test_manage_py, "w") as fp: fp.write(manage_py_contents) return self.run_test(["./manage.py", *args], settings_file) def assertNoOutput(self, stream): "Utility assertion: assert that the given stream is empty" self.assertEqual( len(stream), 0, "Stream should be empty: actually contains '%s'" % stream ) def assertOutput(self, stream, msg, regex=False): "Utility assertion: assert that the given message exists in the output" if regex: self.assertIsNotNone( re.search(msg, stream), "'%s' does not match actual output text '%s'" % (msg, stream), ) else: self.assertIn( msg, stream, "'%s' does not match actual output text '%s'" % (msg, stream), ) def assertNotInOutput(self, stream, msg): "Utility assertion: assert that the given message doesn't exist in the output" self.assertNotIn( msg, stream, "'%s' matches actual output text '%s'" % (msg, stream) ) ########################################################################## # DJANGO ADMIN TESTS # This first series of test classes checks the environment processing # of the django-admin. ########################################################################## class DjangoAdminNoSettings(AdminScriptTestCase): "A series of tests for django-admin when there is no settings.py file." def test_builtin_command(self): """ no settings: django-admin builtin commands fail with an error when no settings provided. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "settings are not configured") def test_builtin_with_bad_settings(self): """ no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_commands_with_invalid_settings(self): """ Commands that don't require settings succeed if the settings file doesn't exist. """ args = ["startproject"] out, err = self.run_django_admin(args, settings_file="bad_settings") self.assertNoOutput(out) self.assertOutput(err, "You must provide a project name", regex=True) class DjangoAdminDefaultSettings(AdminScriptTestCase): """ A series of tests for django-admin when using a settings.py file that contains the test application. """ def setUp(self): super().setUp() self.write_settings("settings.py") def test_builtin_command(self): """ default: django-admin builtin commands fail with an error when no settings provided. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "settings are not configured") def test_builtin_with_settings(self): """ default: django-admin builtin commands succeed if settings are provided as argument. """ args = ["check", "--settings=test_project.settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): """ default: django-admin builtin commands succeed if settings are provided in the environment. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): """ default: django-admin builtin commands fail if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ default: django-admin builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): """ default: django-admin can't execute user commands if it isn't provided settings. """ args = ["noargs_command"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): """ default: django-admin can execute user commands if settings are provided as argument. """ args = ["noargs_command", "--settings=test_project.settings"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): """ default: django-admin can execute user commands if settings are provided in environment. """ args = ["noargs_command"] out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminFullPathDefaultSettings(AdminScriptTestCase): """ A series of tests for django-admin when using a settings.py file that contains the test application specified using a full path. """ def setUp(self): super().setUp() self.write_settings( "settings.py", [ "django.contrib.auth", "django.contrib.contenttypes", "admin_scripts", "admin_scripts.complex_app", ], ) def test_builtin_command(self): """ fulldefault: django-admin builtin commands fail with an error when no settings provided. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "settings are not configured") def test_builtin_with_settings(self): """ fulldefault: django-admin builtin commands succeed if a settings file is provided. """ args = ["check", "--settings=test_project.settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): """ fulldefault: django-admin builtin commands succeed if the environment contains settings. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): """ fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): """ fulldefault: django-admin can't execute user commands unless settings are provided. """ args = ["noargs_command"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): """ fulldefault: django-admin can execute user commands if settings are provided as argument. """ args = ["noargs_command", "--settings=test_project.settings"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): """ fulldefault: django-admin can execute user commands if settings are provided in environment. """ args = ["noargs_command"] out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminMinimalSettings(AdminScriptTestCase): """ A series of tests for django-admin when using a settings.py file that doesn't contain the test application. """ def setUp(self): super().setUp() self.write_settings( "settings.py", apps=["django.contrib.auth", "django.contrib.contenttypes"] ) def test_builtin_command(self): """ minimal: django-admin builtin commands fail with an error when no settings provided. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "settings are not configured") def test_builtin_with_settings(self): """ minimal: django-admin builtin commands fail if settings are provided as argument. """ args = ["check", "--settings=test_project.settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_environment(self): """ minimal: django-admin builtin commands fail if settings are provided in the environment. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_bad_settings(self): """ minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "minimal: django-admin can't execute user commands unless settings are provided" args = ["noargs_command"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): """ minimal: django-admin can't execute user commands, even if settings are provided as argument. """ args = ["noargs_command", "--settings=test_project.settings"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_environment(self): """ minimal: django-admin can't execute user commands, even if settings are provided in environment. """ args = ["noargs_command"] out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") class DjangoAdminAlternateSettings(AdminScriptTestCase): """ A series of tests for django-admin when using a settings file with a name other than 'settings.py'. """ def setUp(self): super().setUp() self.write_settings("alternate_settings.py") def test_builtin_command(self): """ alternate: django-admin builtin commands fail with an error when no settings provided. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "settings are not configured") def test_builtin_with_settings(self): """ alternate: django-admin builtin commands succeed if settings are provided as argument. """ args = ["check", "--settings=test_project.alternate_settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): """ alternate: django-admin builtin commands succeed if settings are provided in the environment. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "test_project.alternate_settings") self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): """ alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): """ alternate: django-admin can't execute user commands unless settings are provided. """ args = ["noargs_command"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): """ alternate: django-admin can execute user commands if settings are provided as argument. """ args = ["noargs_command", "--settings=test_project.alternate_settings"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): """ alternate: django-admin can execute user commands if settings are provided in environment. """ args = ["noargs_command"] out, err = self.run_django_admin(args, "test_project.alternate_settings") self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminMultipleSettings(AdminScriptTestCase): """ A series of tests for django-admin when multiple settings files (including the default 'settings.py') are available. The default settings file is insufficient for performing the operations described, so the alternate settings must be used by the running script. """ def setUp(self): super().setUp() self.write_settings( "settings.py", apps=["django.contrib.auth", "django.contrib.contenttypes"] ) self.write_settings("alternate_settings.py") def test_builtin_command(self): """ alternate: django-admin builtin commands fail with an error when no settings provided. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "settings are not configured") def test_builtin_with_settings(self): """ alternate: django-admin builtin commands succeed if settings are provided as argument. """ args = ["check", "--settings=test_project.alternate_settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): """ alternate: django-admin builtin commands succeed if settings are provided in the environment. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "test_project.alternate_settings") self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): """ alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): """ alternate: django-admin can't execute user commands unless settings are provided. """ args = ["noargs_command"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): """ alternate: django-admin can execute user commands if settings are provided as argument. """ args = ["noargs_command", "--settings=test_project.alternate_settings"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): """ alternate: django-admin can execute user commands if settings are provided in environment. """ args = ["noargs_command"] out, err = self.run_django_admin(args, "test_project.alternate_settings") self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class DjangoAdminSettingsDirectory(AdminScriptTestCase): """ A series of tests for django-admin when the settings file is in a directory. (see #9751). """ def setUp(self): super().setUp() self.write_settings("settings", is_dir=True) def test_setup_environ(self): "directory: startapp creates the correct directory" args = ["startapp", "settings_test"] app_path = os.path.join(self.test_dir, "settings_test") out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(err) self.assertTrue(os.path.exists(app_path)) with open(os.path.join(app_path, "apps.py")) as f: content = f.read() self.assertIn("class SettingsTestConfig(AppConfig)", content) self.assertIn( 'name = "settings_test"' if HAS_BLACK else "name = 'settings_test'", content, ) def test_setup_environ_custom_template(self): "directory: startapp creates the correct directory with a custom template" template_path = os.path.join(custom_templates_dir, "app_template") args = ["startapp", "--template", template_path, "custom_settings_test"] app_path = os.path.join(self.test_dir, "custom_settings_test") out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(err) self.assertTrue(os.path.exists(app_path)) self.assertTrue(os.path.exists(os.path.join(app_path, "api.py"))) def test_startapp_unicode_name(self): """startapp creates the correct directory with Unicode characters.""" args = ["startapp", "こんにちは"] app_path = os.path.join(self.test_dir, "こんにちは") out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(err) self.assertTrue(os.path.exists(app_path)) with open(os.path.join(app_path, "apps.py"), encoding="utf8") as f: content = f.read() self.assertIn("class こんにちはConfig(AppConfig)", content) self.assertIn('name = "こんにちは"' if HAS_BLACK else "name = 'こんにちは'", content) def test_builtin_command(self): """ directory: django-admin builtin commands fail with an error when no settings provided. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "settings are not configured") def test_builtin_with_bad_settings(self): """ directory: django-admin builtin commands fail if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ directory: django-admin builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): """ directory: django-admin can't execute user commands unless settings are provided. """ args = ["noargs_command"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "No Django settings specified") self.assertOutput(err, "Unknown command: 'noargs_command'") def test_builtin_with_settings(self): """ directory: django-admin builtin commands succeed if settings are provided as argument. """ args = ["check", "--settings=test_project.settings", "admin_scripts"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): """ directory: django-admin builtin commands succeed if settings are provided in the environment. """ args = ["check", "admin_scripts"] out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) ########################################################################## # MANAGE.PY TESTS # This next series of test classes checks the environment processing # of the generated manage.py script ########################################################################## class ManageManuallyConfiguredSettings(AdminScriptTestCase): """Customized manage.py calling settings.configure().""" def test_non_existent_command_output(self): out, err = self.run_manage( ["invalid_command"], manage_py="configured_settings_manage.py" ) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'invalid_command'") self.assertNotInOutput(err, "No Django settings specified") class ManageNoSettings(AdminScriptTestCase): "A series of tests for manage.py when there is no settings.py file." def test_builtin_command(self): """ no settings: manage.py builtin commands fail with an error when no settings provided. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput( err, r"No module named '?(test_project\.)?settings'?", regex=True ) def test_builtin_with_bad_settings(self): """ no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) class ManageDefaultSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings.py file that contains the test application. """ def setUp(self): super().setUp() self.write_settings("settings.py") def test_builtin_command(self): """ default: manage.py builtin commands succeed when default settings are appropriate. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_settings(self): """ default: manage.py builtin commands succeed if settings are provided as argument. """ args = ["check", "--settings=test_project.settings", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): """ default: manage.py builtin commands succeed if settings are provided in the environment. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args, "test_project.settings") self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): """ default: manage.py builtin commands succeed if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ default: manage.py builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): """ default: manage.py can execute user commands when default settings are appropriate. """ args = ["noargs_command"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_settings(self): """ default: manage.py can execute user commands when settings are provided as argument. """ args = ["noargs_command", "--settings=test_project.settings"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): """ default: manage.py can execute user commands when settings are provided in environment. """ args = ["noargs_command"] out, err = self.run_manage(args, "test_project.settings") self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class ManageFullPathDefaultSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings.py file that contains the test application specified using a full path. """ def setUp(self): super().setUp() self.write_settings( "settings.py", ["django.contrib.auth", "django.contrib.contenttypes", "admin_scripts"], ) def test_builtin_command(self): """ fulldefault: manage.py builtin commands succeed when default settings are appropriate. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_settings(self): """ fulldefault: manage.py builtin commands succeed if settings are provided as argument. """ args = ["check", "--settings=test_project.settings", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): """ fulldefault: manage.py builtin commands succeed if settings are provided in the environment. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args, "test_project.settings") self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): """ fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): """ fulldefault: manage.py can execute user commands when default settings are appropriate. """ args = ["noargs_command"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_settings(self): """ fulldefault: manage.py can execute user commands when settings are provided as argument. """ args = ["noargs_command", "--settings=test_project.settings"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): """ fulldefault: manage.py can execute user commands when settings are provided in environment. """ args = ["noargs_command"] out, err = self.run_manage(args, "test_project.settings") self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class ManageMinimalSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings.py file that doesn't contain the test application. """ def setUp(self): super().setUp() self.write_settings( "settings.py", apps=["django.contrib.auth", "django.contrib.contenttypes"] ) def test_builtin_command(self): """ minimal: manage.py builtin commands fail with an error when no settings provided. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_settings(self): "minimal: manage.py builtin commands fail if settings are provided as argument" args = ["check", "--settings=test_project.settings", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_environment(self): """ minimal: manage.py builtin commands fail if settings are provided in the environment. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args, "test_project.settings") self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_bad_settings(self): """ minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "minimal: manage.py can't execute user commands without appropriate settings" args = ["noargs_command"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): """ minimal: manage.py can't execute user commands, even if settings are provided as argument. """ args = ["noargs_command", "--settings=test_project.settings"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_environment(self): """ minimal: manage.py can't execute user commands, even if settings are provided in environment. """ args = ["noargs_command"] out, err = self.run_manage(args, "test_project.settings") self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") class ManageAlternateSettings(AdminScriptTestCase): """A series of tests for manage.py when using a settings file with a name other than 'settings.py'. """ def setUp(self): super().setUp() self.write_settings("alternate_settings.py") def test_builtin_command(self): """ alternate: manage.py builtin commands fail with an error when no default settings provided. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput( err, r"No module named '?(test_project\.)?settings'?", regex=True ) def test_builtin_with_settings(self): "alternate: manage.py builtin commands work with settings provided as argument" args = ["check", "--settings=alternate_settings", "admin_scripts"] out, err = self.run_manage(args) self.assertOutput(out, SYSTEM_CHECK_MSG) self.assertNoOutput(err) def test_builtin_with_environment(self): """ alternate: manage.py builtin commands work if settings are provided in the environment """ args = ["check", "admin_scripts"] out, err = self.run_manage(args, "alternate_settings") self.assertOutput(out, SYSTEM_CHECK_MSG) self.assertNoOutput(err) def test_builtin_with_bad_settings(self): """ alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist """ args = ["check", "admin_scripts"] out, err = self.run_manage(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "alternate: manage.py can't execute user commands without settings" args = ["noargs_command"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput( err, r"No module named '?(test_project\.)?settings'?", regex=True ) def test_custom_command_with_settings(self): """ alternate: manage.py can execute user commands if settings are provided as argument """ args = ["noargs_command", "--settings=alternate_settings"] out, err = self.run_manage(args) self.assertOutput( out, "EXECUTE: noargs_command options=[('force_color', False), " "('no_color', False), ('pythonpath', None), ('settings', " "'alternate_settings'), ('traceback', False), ('verbosity', 1)]", ) self.assertNoOutput(err) def test_custom_command_with_environment(self): """ alternate: manage.py can execute user commands if settings are provided in environment. """ args = ["noargs_command"] out, err = self.run_manage(args, "alternate_settings") self.assertOutput( out, "EXECUTE: noargs_command options=[('force_color', False), " "('no_color', False), ('pythonpath', None), ('settings', None), " "('traceback', False), ('verbosity', 1)]", ) self.assertNoOutput(err) def test_custom_command_output_color(self): """ alternate: manage.py output syntax color can be deactivated with the `--no-color` option. """ args = ["noargs_command", "--no-color", "--settings=alternate_settings"] out, err = self.run_manage(args) self.assertOutput( out, "EXECUTE: noargs_command options=[('force_color', False), " "('no_color', True), ('pythonpath', None), ('settings', " "'alternate_settings'), ('traceback', False), ('verbosity', 1)]", ) self.assertNoOutput(err) class ManageMultipleSettings(AdminScriptTestCase): """A series of tests for manage.py when multiple settings files (including the default 'settings.py') are available. The default settings file is insufficient for performing the operations described, so the alternate settings must be used by the running script. """ def setUp(self): super().setUp() self.write_settings( "settings.py", apps=["django.contrib.auth", "django.contrib.contenttypes"] ) self.write_settings("alternate_settings.py") def test_builtin_command(self): """ multiple: manage.py builtin commands fail with an error when no settings provided. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No installed app with label 'admin_scripts'.") def test_builtin_with_settings(self): """ multiple: manage.py builtin commands succeed if settings are provided as argument. """ args = ["check", "--settings=alternate_settings", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_environment(self): """ multiple: manage.py can execute builtin commands if settings are provided in the environment. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args, "alternate_settings") self.assertNoOutput(err) self.assertOutput(out, SYSTEM_CHECK_MSG) def test_builtin_with_bad_settings(self): """ multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist. """ args = ["check", "--settings=bad_settings", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_builtin_with_bad_environment(self): """ multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist. """ args = ["check", "admin_scripts"] out, err = self.run_manage(args, "bad_settings") self.assertNoOutput(out) self.assertOutput(err, "No module named '?bad_settings'?", regex=True) def test_custom_command(self): "multiple: manage.py can't execute user commands using default settings" args = ["noargs_command"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'noargs_command'") def test_custom_command_with_settings(self): """ multiple: manage.py can execute user commands if settings are provided as argument. """ args = ["noargs_command", "--settings=alternate_settings"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") def test_custom_command_with_environment(self): """ multiple: manage.py can execute user commands if settings are provided in environment. """ args = ["noargs_command"] out, err = self.run_manage(args, "alternate_settings") self.assertNoOutput(err) self.assertOutput(out, "EXECUTE: noargs_command") class ManageSettingsWithSettingsErrors(AdminScriptTestCase): """ Tests for manage.py when using the default settings.py file containing runtime errors. """ def write_settings_with_import_error(self, filename): settings_file_path = os.path.join(self.test_dir, filename) with open(settings_file_path, "w") as settings_file: settings_file.write( "# Settings file automatically generated by admin_scripts test case\n" ) settings_file.write( "# The next line will cause an import error:\nimport foo42bar\n" ) def test_import_error(self): """ import error: manage.py builtin commands shows useful diagnostic info when settings with import errors is provided (#14130). """ self.write_settings_with_import_error("settings.py") args = ["check", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named") self.assertOutput(err, "foo42bar") def test_attribute_error(self): """ manage.py builtin commands does not swallow attribute error due to bad settings (#18845). """ self.write_settings("settings.py", sdict={"BAD_VAR": "INSTALLED_APPS.crash"}) args = ["collectstatic", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "AttributeError: 'list' object has no attribute 'crash'") def test_key_error(self): self.write_settings("settings.py", sdict={"BAD_VAR": 'DATABASES["blah"]'}) args = ["collectstatic", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "KeyError: 'blah'") def test_help(self): """ Test listing available commands output note when only core commands are available. """ self.write_settings( "settings.py", extra="from django.core.exceptions import ImproperlyConfigured\n" "raise ImproperlyConfigured()", ) args = ["help"] out, err = self.run_manage(args) self.assertOutput(out, "only Django core commands are listed") self.assertNoOutput(err) class ManageCheck(AdminScriptTestCase): def test_nonexistent_app(self): """check reports an error on a nonexistent app in INSTALLED_APPS.""" self.write_settings( "settings.py", apps=["admin_scriptz.broken_app"], sdict={"USE_I18N": False}, ) args = ["check"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "ModuleNotFoundError") self.assertOutput(err, "No module named") self.assertOutput(err, "admin_scriptz") def test_broken_app(self): """manage.py check reports an ImportError if an app's models.py raises one on import""" self.write_settings("settings.py", apps=["admin_scripts.broken_app"]) args = ["check"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "ImportError") def test_complex_app(self): """manage.py check does not raise an ImportError validating a complex app with nested calls to load_app""" self.write_settings( "settings.py", apps=[ "admin_scripts.complex_app", "admin_scripts.simple_app", "django.contrib.admin.apps.SimpleAdminConfig", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.messages", ], sdict={ "DEBUG": True, "MIDDLEWARE": [ "django.contrib.messages.middleware.MessageMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", ], "TEMPLATES": [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ], }, }, ], }, ) args = ["check"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertEqual(out, "System check identified no issues (0 silenced).\n") def test_app_with_import(self): """manage.py check does not raise errors when an app imports a base class that itself has an abstract base.""" self.write_settings( "settings.py", apps=[ "admin_scripts.app_with_import", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sites", ], sdict={"DEBUG": True}, ) args = ["check"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertEqual(out, "System check identified no issues (0 silenced).\n") def test_output_format(self): """All errors/warnings should be sorted by level and by message.""" self.write_settings( "settings.py", apps=[ "admin_scripts.app_raising_messages", "django.contrib.auth", "django.contrib.contenttypes", ], sdict={"DEBUG": True}, ) args = ["check"] out, err = self.run_manage(args) expected_err = ( "SystemCheckError: System check identified some issues:\n" "\n" "ERRORS:\n" "?: An error\n" "\tHINT: Error hint\n" "\n" "WARNINGS:\n" "a: Second warning\n" "obj: First warning\n" "\tHINT: Hint\n" "\n" "System check identified 3 issues (0 silenced).\n" ) self.assertEqual(err, expected_err) self.assertNoOutput(out) def test_warning_does_not_halt(self): """ When there are only warnings or less serious messages, then Django should not prevent user from launching their project, so `check` command should not raise `CommandError` exception. In this test we also test output format. """ self.write_settings( "settings.py", apps=[ "admin_scripts.app_raising_warning", "django.contrib.auth", "django.contrib.contenttypes", ], sdict={"DEBUG": True}, ) args = ["check"] out, err = self.run_manage(args) expected_err = ( "System check identified some issues:\n" # No "CommandError: " part "\n" "WARNINGS:\n" "?: A warning\n" "\n" "System check identified 1 issue (0 silenced).\n" ) self.assertEqual(err, expected_err) self.assertNoOutput(out) class ManageRunserver(SimpleTestCase): def setUp(self): def monkey_run(*args, **options): return self.output = StringIO() self.cmd = RunserverCommand(stdout=self.output) self.cmd.run = monkey_run def assertServerSettings(self, addr, port, ipv6=False, raw_ipv6=False): self.assertEqual(self.cmd.addr, addr) self.assertEqual(self.cmd.port, port) self.assertEqual(self.cmd.use_ipv6, ipv6) self.assertEqual(self.cmd._raw_ipv6, raw_ipv6) def test_runserver_addrport(self): call_command(self.cmd) self.assertServerSettings("127.0.0.1", "8000") call_command(self.cmd, addrport="1.2.3.4:8000") self.assertServerSettings("1.2.3.4", "8000") call_command(self.cmd, addrport="7000") self.assertServerSettings("127.0.0.1", "7000") @mock.patch("django.core.management.commands.runserver.run") @mock.patch("django.core.management.base.BaseCommand.check_migrations") def test_zero_ip_addr(self, *mocked_objects): call_command( "runserver", addrport="0:8000", use_reloader=False, skip_checks=True, stdout=self.output, ) self.assertIn( "Starting development server at http://0.0.0.0:8000/", self.output.getvalue(), ) @unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6") def test_runner_addrport_ipv6(self): call_command(self.cmd, addrport="", use_ipv6=True) self.assertServerSettings("::1", "8000", ipv6=True, raw_ipv6=True) call_command(self.cmd, addrport="7000", use_ipv6=True) self.assertServerSettings("::1", "7000", ipv6=True, raw_ipv6=True) call_command(self.cmd, addrport="[2001:0db8:1234:5678::9]:7000") self.assertServerSettings( "2001:0db8:1234:5678::9", "7000", ipv6=True, raw_ipv6=True ) def test_runner_hostname(self): call_command(self.cmd, addrport="localhost:8000") self.assertServerSettings("localhost", "8000") call_command(self.cmd, addrport="test.domain.local:7000") self.assertServerSettings("test.domain.local", "7000") @unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6") def test_runner_hostname_ipv6(self): call_command(self.cmd, addrport="test.domain.local:7000", use_ipv6=True) self.assertServerSettings("test.domain.local", "7000", ipv6=True) def test_runner_custom_defaults(self): self.cmd.default_addr = "0.0.0.0" self.cmd.default_port = "5000" call_command(self.cmd) self.assertServerSettings("0.0.0.0", "5000") @unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6") def test_runner_custom_defaults_ipv6(self): self.cmd.default_addr_ipv6 = "::" call_command(self.cmd, use_ipv6=True) self.assertServerSettings("::", "8000", ipv6=True, raw_ipv6=True) def test_runner_ambiguous(self): # Only 4 characters, all of which could be in an ipv6 address call_command(self.cmd, addrport="beef:7654") self.assertServerSettings("beef", "7654") # Uses only characters that could be in an ipv6 address call_command(self.cmd, addrport="deadbeef:7654") self.assertServerSettings("deadbeef", "7654") def test_no_database(self): """ Ensure runserver.check_migrations doesn't choke on empty DATABASES. """ tested_connections = ConnectionHandler({}) with mock.patch( "django.core.management.base.connections", new=tested_connections ): self.cmd.check_migrations() def test_readonly_database(self): """ runserver.check_migrations() doesn't choke when a database is read-only. """ with mock.patch.object(MigrationRecorder, "has_table", return_value=False): self.cmd.check_migrations() # You have # ... self.assertIn("unapplied migration(s)", self.output.getvalue()) @mock.patch("django.core.management.commands.runserver.run") @mock.patch("django.core.management.base.BaseCommand.check_migrations") @mock.patch("django.core.management.base.BaseCommand.check") def test_skip_checks(self, mocked_check, *mocked_objects): call_command( "runserver", use_reloader=False, skip_checks=True, stdout=self.output, ) self.assertNotIn("Performing system checks...", self.output.getvalue()) mocked_check.assert_not_called() self.output.truncate(0) call_command( "runserver", use_reloader=False, skip_checks=False, stdout=self.output, ) self.assertIn("Performing system checks...", self.output.getvalue()) mocked_check.assert_called() class ManageRunserverMigrationWarning(TestCase): def setUp(self): self.stdout = StringIO() self.runserver_command = RunserverCommand(stdout=self.stdout) @override_settings(INSTALLED_APPS=["admin_scripts.app_waiting_migration"]) def test_migration_warning_one_app(self): self.runserver_command.check_migrations() output = self.stdout.getvalue() self.assertIn("You have 1 unapplied migration(s)", output) self.assertIn("apply the migrations for app(s): app_waiting_migration.", output) @override_settings( INSTALLED_APPS=[ "admin_scripts.app_waiting_migration", "admin_scripts.another_app_waiting_migration", ], ) def test_migration_warning_multiple_apps(self): self.runserver_command.check_migrations() output = self.stdout.getvalue() self.assertIn("You have 2 unapplied migration(s)", output) self.assertIn( "apply the migrations for app(s): another_app_waiting_migration, " "app_waiting_migration.", output, ) class ManageRunserverEmptyAllowedHosts(AdminScriptTestCase): def setUp(self): super().setUp() self.write_settings( "settings.py", sdict={ "ALLOWED_HOSTS": [], "DEBUG": False, }, ) def test_empty_allowed_hosts_error(self): out, err = self.run_manage(["runserver"]) self.assertNoOutput(out) self.assertOutput( err, "CommandError: You must set settings.ALLOWED_HOSTS if DEBUG is False." ) class ManageRunserverHelpOutput(AdminScriptTestCase): def test_suppressed_options(self): """runserver doesn't support --verbosity and --trackback options.""" out, err = self.run_manage(["runserver", "--help"]) self.assertNotInOutput(out, "--verbosity") self.assertNotInOutput(out, "--trackback") self.assertOutput(out, "--settings") class ManageTestserver(SimpleTestCase): @mock.patch.object(TestserverCommand, "handle", return_value="") def test_testserver_handle_params(self, mock_handle): out = StringIO() call_command("testserver", "blah.json", stdout=out) mock_handle.assert_called_with( "blah.json", stdout=out, settings=None, pythonpath=None, verbosity=1, traceback=False, addrport="", no_color=False, use_ipv6=False, skip_checks=True, interactive=True, force_color=False, ) @mock.patch("django.db.connection.creation.create_test_db", return_value="test_db") @mock.patch.object(LoaddataCommand, "handle", return_value="") @mock.patch.object(RunserverCommand, "handle", return_value="") def test_params_to_runserver( self, mock_runserver_handle, mock_loaddata_handle, mock_create_test_db ): call_command("testserver", "blah.json") mock_runserver_handle.assert_called_with( addrport="", force_color=False, insecure_serving=False, no_color=False, pythonpath=None, settings=None, shutdown_message=( "\nServer stopped.\nNote that the test database, 'test_db', " "has not been deleted. You can explore it on your own." ), skip_checks=True, traceback=False, use_ipv6=False, use_reloader=False, use_static_handler=True, use_threading=connection.features.test_db_allows_multiple_connections, verbosity=1, ) ########################################################################## # COMMAND PROCESSING TESTS # user-space commands are correctly handled - in particular, arguments to # the commands are correctly parsed and processed. ########################################################################## class ColorCommand(BaseCommand): requires_system_checks = [] def handle(self, *args, **options): self.stdout.write("Hello, world!", self.style.ERROR) self.stderr.write("Hello, world!", self.style.ERROR) class CommandTypes(AdminScriptTestCase): "Tests for the various types of base command types that can be defined." def setUp(self): super().setUp() self.write_settings("settings.py") def test_version(self): "version is handled as a special case" args = ["version"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, get_version()) def test_version_alternative(self): "--version is equivalent to version" args1, args2 = ["version"], ["--version"] # It's possible one outputs on stderr and the other on stdout, hence the set self.assertEqual(set(self.run_manage(args1)), set(self.run_manage(args2))) def test_help(self): "help is handled as a special case" args = ["help"] out, err = self.run_manage(args) self.assertOutput( out, "Type 'manage.py help <subcommand>' for help on a specific subcommand." ) self.assertOutput(out, "[django]") self.assertOutput(out, "startapp") self.assertOutput(out, "startproject") def test_help_commands(self): "help --commands shows the list of all available commands" args = ["help", "--commands"] out, err = self.run_manage(args) self.assertNotInOutput(out, "usage:") self.assertNotInOutput(out, "Options:") self.assertNotInOutput(out, "[django]") self.assertOutput(out, "startapp") self.assertOutput(out, "startproject") self.assertNotInOutput(out, "\n\n") def test_help_alternative(self): "--help is equivalent to help" args1, args2 = ["help"], ["--help"] self.assertEqual(self.run_manage(args1), self.run_manage(args2)) def test_help_short_altert(self): "-h is handled as a short form of --help" args1, args2 = ["--help"], ["-h"] self.assertEqual(self.run_manage(args1), self.run_manage(args2)) def test_specific_help(self): "--help can be used on a specific command" args = ["check", "--help"] out, err = self.run_manage(args) self.assertNoOutput(err) # Command-specific options like --tag appear before options common to # all commands like --version. tag_location = out.find("--tag") version_location = out.find("--version") self.assertNotEqual(tag_location, -1) self.assertNotEqual(version_location, -1) self.assertLess(tag_location, version_location) self.assertOutput( out, "Checks the entire Django project for potential problems." ) def test_help_default_options_with_custom_arguments(self): args = ["base_command", "--help"] out, err = self.run_manage(args) self.assertNoOutput(err) expected_options = [ "-h", "--option_a OPTION_A", "--option_b OPTION_B", "--option_c OPTION_C", "--version", "-v {0,1,2,3}", "--settings SETTINGS", "--pythonpath PYTHONPATH", "--traceback", "--no-color", "--force-color", "args ...", ] for option in expected_options: self.assertOutput(out, f"[{option}]") self.assertOutput(out, "--option_a OPTION_A, -a OPTION_A") self.assertOutput(out, "--option_b OPTION_B, -b OPTION_B") self.assertOutput(out, "--option_c OPTION_C, -c OPTION_C") self.assertOutput(out, "-v {0,1,2,3}, --verbosity {0,1,2,3}") def test_color_style(self): style = color.no_style() self.assertEqual(style.ERROR("Hello, world!"), "Hello, world!") style = color.make_style("nocolor") self.assertEqual(style.ERROR("Hello, world!"), "Hello, world!") style = color.make_style("dark") self.assertIn("Hello, world!", style.ERROR("Hello, world!")) self.assertNotEqual(style.ERROR("Hello, world!"), "Hello, world!") # Default palette has color. style = color.make_style("") self.assertIn("Hello, world!", style.ERROR("Hello, world!")) self.assertNotEqual(style.ERROR("Hello, world!"), "Hello, world!") def test_command_color(self): out = StringIO() err = StringIO() command = ColorCommand(stdout=out, stderr=err) call_command(command) if color.supports_color(): self.assertIn("Hello, world!\n", out.getvalue()) self.assertIn("Hello, world!\n", err.getvalue()) self.assertNotEqual(out.getvalue(), "Hello, world!\n") self.assertNotEqual(err.getvalue(), "Hello, world!\n") else: self.assertEqual(out.getvalue(), "Hello, world!\n") self.assertEqual(err.getvalue(), "Hello, world!\n") def test_command_no_color(self): "--no-color prevent colorization of the output" out = StringIO() err = StringIO() command = ColorCommand(stdout=out, stderr=err, no_color=True) call_command(command) self.assertEqual(out.getvalue(), "Hello, world!\n") self.assertEqual(err.getvalue(), "Hello, world!\n") out = StringIO() err = StringIO() command = ColorCommand(stdout=out, stderr=err) call_command(command, no_color=True) self.assertEqual(out.getvalue(), "Hello, world!\n") self.assertEqual(err.getvalue(), "Hello, world!\n") def test_force_color_execute(self): out = StringIO() err = StringIO() with mock.patch.object(sys.stdout, "isatty", lambda: False): command = ColorCommand(stdout=out, stderr=err) call_command(command, force_color=True) self.assertEqual(out.getvalue(), "\x1b[31;1mHello, world!\n\x1b[0m") self.assertEqual(err.getvalue(), "\x1b[31;1mHello, world!\n\x1b[0m") def test_force_color_command_init(self): out = StringIO() err = StringIO() with mock.patch.object(sys.stdout, "isatty", lambda: False): command = ColorCommand(stdout=out, stderr=err, force_color=True) call_command(command) self.assertEqual(out.getvalue(), "\x1b[31;1mHello, world!\n\x1b[0m") self.assertEqual(err.getvalue(), "\x1b[31;1mHello, world!\n\x1b[0m") def test_no_color_force_color_mutually_exclusive_execute(self): msg = "The --no-color and --force-color options can't be used together." with self.assertRaisesMessage(CommandError, msg): call_command(BaseCommand(), no_color=True, force_color=True) def test_no_color_force_color_mutually_exclusive_command_init(self): msg = "'no_color' and 'force_color' can't be used together." with self.assertRaisesMessage(CommandError, msg): call_command(BaseCommand(no_color=True, force_color=True)) def test_custom_stdout(self): class Command(BaseCommand): requires_system_checks = [] def handle(self, *args, **options): self.stdout.write("Hello, World!") out = StringIO() command = Command(stdout=out) call_command(command) self.assertEqual(out.getvalue(), "Hello, World!\n") out.truncate(0) new_out = StringIO() call_command(command, stdout=new_out) self.assertEqual(out.getvalue(), "") self.assertEqual(new_out.getvalue(), "Hello, World!\n") def test_custom_stderr(self): class Command(BaseCommand): requires_system_checks = [] def handle(self, *args, **options): self.stderr.write("Hello, World!") err = StringIO() command = Command(stderr=err) call_command(command) self.assertEqual(err.getvalue(), "Hello, World!\n") err.truncate(0) new_err = StringIO() call_command(command, stderr=new_err) self.assertEqual(err.getvalue(), "") self.assertEqual(new_err.getvalue(), "Hello, World!\n") def test_base_command(self): "User BaseCommands can execute when a label is provided" args = ["base_command", "testlabel"] expected_labels = "('testlabel',)" self._test_base_command(args, expected_labels) def test_base_command_no_label(self): "User BaseCommands can execute when no labels are provided" args = ["base_command"] expected_labels = "()" self._test_base_command(args, expected_labels) def test_base_command_multiple_label(self): "User BaseCommands can execute when no labels are provided" args = ["base_command", "testlabel", "anotherlabel"] expected_labels = "('testlabel', 'anotherlabel')" self._test_base_command(args, expected_labels) def test_base_command_with_option(self): "User BaseCommands can execute with options when a label is provided" args = ["base_command", "testlabel", "--option_a=x"] expected_labels = "('testlabel',)" self._test_base_command(args, expected_labels, option_a="'x'") def test_base_command_with_options(self): "User BaseCommands can execute with multiple options when a label is provided" args = ["base_command", "testlabel", "-a", "x", "--option_b=y"] expected_labels = "('testlabel',)" self._test_base_command(args, expected_labels, option_a="'x'", option_b="'y'") def test_base_command_with_wrong_option(self): "User BaseCommands outputs command usage when wrong option is specified" args = ["base_command", "--invalid"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "usage: manage.py base_command") self.assertOutput(err, "error: unrecognized arguments: --invalid") def _test_base_command(self, args, labels, option_a="'1'", option_b="'2'"): out, err = self.run_manage(args) expected_out = ( "EXECUTE:BaseCommand labels=%s, " "options=[('force_color', False), ('no_color', False), " "('option_a', %s), ('option_b', %s), ('option_c', '3'), " "('pythonpath', None), ('settings', None), ('traceback', False), " "('verbosity', 1)]" ) % (labels, option_a, option_b) self.assertNoOutput(err) self.assertOutput(out, expected_out) def test_base_run_from_argv(self): """ Test run_from_argv properly terminates even with custom execute() (#19665) Also test proper traceback display. """ err = StringIO() command = BaseCommand(stderr=err) def raise_command_error(*args, **kwargs): raise CommandError("Custom error") command.execute = lambda args: args # This will trigger TypeError # If the Exception is not CommandError it should always # raise the original exception. with self.assertRaises(TypeError): command.run_from_argv(["", ""]) # If the Exception is CommandError and --traceback is not present # this command should raise a SystemExit and don't print any # traceback to the stderr. command.execute = raise_command_error err.truncate(0) with self.assertRaises(SystemExit): command.run_from_argv(["", ""]) err_message = err.getvalue() self.assertNotIn("Traceback", err_message) self.assertIn("CommandError", err_message) # If the Exception is CommandError and --traceback is present # this command should raise the original CommandError as if it # were not a CommandError. err.truncate(0) with self.assertRaises(CommandError): command.run_from_argv(["", "", "--traceback"]) def test_run_from_argv_non_ascii_error(self): """ Non-ASCII message of CommandError does not raise any UnicodeDecodeError in run_from_argv. """ def raise_command_error(*args, **kwargs): raise CommandError("Erreur personnalisée") command = BaseCommand(stderr=StringIO()) command.execute = raise_command_error with self.assertRaises(SystemExit): command.run_from_argv(["", ""]) def test_run_from_argv_closes_connections(self): """ A command called from the command line should close connections after being executed (#21255). """ command = BaseCommand() command.check = lambda: [] command.handle = lambda *args, **kwargs: args with mock.patch("django.core.management.base.connections") as mock_connections: command.run_from_argv(["", ""]) # Test connections have been closed self.assertTrue(mock_connections.close_all.called) def test_noargs(self): "NoArg Commands can be executed" args = ["noargs_command"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE: noargs_command options=[('force_color', False), " "('no_color', False), ('pythonpath', None), ('settings', None), " "('traceback', False), ('verbosity', 1)]", ) def test_noargs_with_args(self): "NoArg Commands raise an error if an argument is provided" args = ["noargs_command", "argument"] out, err = self.run_manage(args) self.assertOutput(err, "error: unrecognized arguments: argument") def test_app_command(self): "User AppCommands can execute when a single app name is provided" args = ["app_command", "auth"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=") self.assertOutput( out, ", options=[('force_color', False), ('no_color', False), " "('pythonpath', None), ('settings', None), ('traceback', False), " "('verbosity', 1)]", ) def test_app_command_no_apps(self): "User AppCommands raise an error when no app name is provided" args = ["app_command"] out, err = self.run_manage(args) self.assertOutput(err, "error: Enter at least one application label.") def test_app_command_multiple_apps(self): "User AppCommands raise an error when multiple app names are provided" args = ["app_command", "auth", "contenttypes"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "EXECUTE:AppCommand name=django.contrib.auth, options=") self.assertOutput( out, ", options=[('force_color', False), ('no_color', False), " "('pythonpath', None), ('settings', None), ('traceback', False), " "('verbosity', 1)]", ) self.assertOutput( out, "EXECUTE:AppCommand name=django.contrib.contenttypes, options=" ) self.assertOutput( out, ", options=[('force_color', False), ('no_color', False), " "('pythonpath', None), ('settings', None), ('traceback', False), " "('verbosity', 1)]", ) def test_app_command_invalid_app_label(self): "User AppCommands can execute when a single app name is provided" args = ["app_command", "NOT_AN_APP"] out, err = self.run_manage(args) self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.") def test_app_command_some_invalid_app_labels(self): "User AppCommands can execute when some of the provided app names are invalid" args = ["app_command", "auth", "NOT_AN_APP"] out, err = self.run_manage(args) self.assertOutput(err, "No installed app with label 'NOT_AN_APP'.") def test_label_command(self): "User LabelCommands can execute when a label is provided" args = ["label_command", "testlabel"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE:LabelCommand label=testlabel, options=[('force_color', " "False), ('no_color', False), ('pythonpath', None), ('settings', " "None), ('traceback', False), ('verbosity', 1)]", ) def test_label_command_no_label(self): "User LabelCommands raise an error if no label is provided" args = ["label_command"] out, err = self.run_manage(args) self.assertOutput(err, "Enter at least one label") def test_label_command_multiple_label(self): "User LabelCommands are executed multiple times if multiple labels are provided" args = ["label_command", "testlabel", "anotherlabel"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE:LabelCommand label=testlabel, options=[('force_color', " "False), ('no_color', False), ('pythonpath', None), " "('settings', None), ('traceback', False), ('verbosity', 1)]", ) self.assertOutput( out, "EXECUTE:LabelCommand label=anotherlabel, options=[('force_color', " "False), ('no_color', False), ('pythonpath', None), " "('settings', None), ('traceback', False), ('verbosity', 1)]", ) def test_suppress_base_options_command_help(self): args = ["suppress_base_options_command", "--help"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "Test suppress base options command.") self.assertNotInOutput(out, "input file") self.assertOutput(out, "-h, --help") self.assertNotInOutput(out, "--version") self.assertNotInOutput(out, "--verbosity") self.assertNotInOutput(out, "-v {0,1,2,3}") self.assertNotInOutput(out, "--settings") self.assertNotInOutput(out, "--pythonpath") self.assertNotInOutput(out, "--traceback") self.assertNotInOutput(out, "--no-color") self.assertNotInOutput(out, "--force-color") def test_suppress_base_options_command_defaults(self): args = ["suppress_base_options_command"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE:SuppressBaseOptionsCommand options=[('file', None), " "('force_color', False), ('no_color', False), " "('pythonpath', None), ('settings', None), " "('traceback', False), ('verbosity', 1)]", ) class Discovery(SimpleTestCase): def test_precedence(self): """ Apps listed first in INSTALLED_APPS have precedence. """ with self.settings( INSTALLED_APPS=[ "admin_scripts.complex_app", "admin_scripts.simple_app", "django.contrib.auth", "django.contrib.contenttypes", ] ): out = StringIO() call_command("duplicate", stdout=out) self.assertEqual(out.getvalue().strip(), "complex_app") with self.settings( INSTALLED_APPS=[ "admin_scripts.simple_app", "admin_scripts.complex_app", "django.contrib.auth", "django.contrib.contenttypes", ] ): out = StringIO() call_command("duplicate", stdout=out) self.assertEqual(out.getvalue().strip(), "simple_app") class ArgumentOrder(AdminScriptTestCase): """Tests for 2-stage argument parsing scheme. django-admin command arguments are parsed in 2 parts; the core arguments (--settings, --traceback and --pythonpath) are parsed using a basic parser, ignoring any unknown options. Then the full settings are passed to the command parser, which extracts commands of interest to the individual command. """ def setUp(self): super().setUp() self.write_settings( "settings.py", apps=["django.contrib.auth", "django.contrib.contenttypes"] ) self.write_settings("alternate_settings.py") def test_setting_then_option(self): """Options passed after settings are correctly handled.""" args = [ "base_command", "testlabel", "--settings=alternate_settings", "--option_a=x", ] self._test(args) def test_setting_then_short_option(self): """Short options passed after settings are correctly handled.""" args = ["base_command", "testlabel", "--settings=alternate_settings", "-a", "x"] self._test(args) def test_option_then_setting(self): """Options passed before settings are correctly handled.""" args = [ "base_command", "testlabel", "--option_a=x", "--settings=alternate_settings", ] self._test(args) def test_short_option_then_setting(self): """Short options passed before settings are correctly handled.""" args = ["base_command", "testlabel", "-a", "x", "--settings=alternate_settings"] self._test(args) def test_option_then_setting_then_option(self): """Options are correctly handled when they are passed before and after a setting.""" args = [ "base_command", "testlabel", "--option_a=x", "--settings=alternate_settings", "--option_b=y", ] self._test(args, option_b="'y'") def _test(self, args, option_b="'2'"): out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput( out, "EXECUTE:BaseCommand labels=('testlabel',), options=[" "('force_color', False), ('no_color', False), ('option_a', 'x'), " "('option_b', %s), ('option_c', '3'), ('pythonpath', None), " "('settings', 'alternate_settings'), ('traceback', False), " "('verbosity', 1)]" % option_b, ) class ExecuteFromCommandLine(SimpleTestCase): def test_program_name_from_argv(self): """ Program name is computed from the execute_from_command_line()'s argv argument, not sys.argv. """ args = ["help", "shell"] with captured_stdout() as out, captured_stderr() as err: with mock.patch("sys.argv", [None] + args): execute_from_command_line(["django-admin"] + args) self.assertIn("usage: django-admin shell", out.getvalue()) self.assertEqual(err.getvalue(), "") @override_settings(ROOT_URLCONF="admin_scripts.urls") class StartProject(LiveServerTestCase, AdminScriptTestCase): available_apps = [ "admin_scripts", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", ] def test_wrong_args(self): """ Passing the wrong kinds of arguments outputs an error and prints usage. """ out, err = self.run_django_admin(["startproject"]) self.assertNoOutput(out) self.assertOutput(err, "usage:") self.assertOutput(err, "You must provide a project name.") def test_simple_project(self): "Make sure the startproject management command creates a project" args = ["startproject", "testproject"] testproject_dir = os.path.join(self.test_dir, "testproject") out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) # running again.. out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput( err, "CommandError: 'testproject' conflicts with the name of an " "existing Python module and cannot be used as a project name. " "Please try another name.", ) def test_invalid_project_name(self): "Make sure the startproject management command validates a project name" for bad_name in ("7testproject", "../testproject"): with self.subTest(project_name=bad_name): args = ["startproject", bad_name] testproject_dir = os.path.join(self.test_dir, bad_name) out, err = self.run_django_admin(args) self.assertOutput( err, "Error: '%s' is not a valid project name. Please make " "sure the name is a valid identifier." % bad_name, ) self.assertFalse(os.path.exists(testproject_dir)) def test_importable_project_name(self): """ startproject validates that project name doesn't clash with existing Python modules. """ bad_name = "os" args = ["startproject", bad_name] testproject_dir = os.path.join(self.test_dir, bad_name) out, err = self.run_django_admin(args) self.assertOutput( err, "CommandError: 'os' conflicts with the name of an existing " "Python module and cannot be used as a project name. Please try " "another name.", ) self.assertFalse(os.path.exists(testproject_dir)) def test_simple_project_different_directory(self): """ The startproject management command creates a project in a specific directory. """ args = ["startproject", "testproject", "othertestproject"] testproject_dir = os.path.join(self.test_dir, "othertestproject") os.mkdir(testproject_dir) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.exists(os.path.join(testproject_dir, "manage.py"))) # running again.. out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput( err, "already exists. Overlaying a project into an existing directory " "won't replace conflicting files.", ) def test_custom_project_template(self): """ The startproject management command is able to use a different project template. """ template_path = os.path.join(custom_templates_dir, "project_template") args = ["startproject", "--template", template_path, "customtestproject"] testproject_dir = os.path.join(self.test_dir, "customtestproject") out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, "additional_dir"))) def test_custom_project_template_non_python_files_not_formatted(self): template_path = os.path.join(custom_templates_dir, "project_template") args = ["startproject", "--template", template_path, "customtestproject"] testproject_dir = os.path.join(self.test_dir, "customtestproject") _, err = self.run_django_admin(args) self.assertNoOutput(err) with open( os.path.join(template_path, "additional_dir", "requirements.in") ) as f: expected = f.read() with open( os.path.join(testproject_dir, "additional_dir", "requirements.in") ) as f: result = f.read() self.assertEqual(expected, result) def test_template_dir_with_trailing_slash(self): "Ticket 17475: Template dir passed has a trailing path separator" template_path = os.path.join(custom_templates_dir, "project_template" + os.sep) args = ["startproject", "--template", template_path, "customtestproject"] testproject_dir = os.path.join(self.test_dir, "customtestproject") out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, "additional_dir"))) def test_custom_project_template_from_tarball_by_path(self): """ The startproject management command is able to use a different project template from a tarball. """ template_path = os.path.join(custom_templates_dir, "project_template.tgz") args = ["startproject", "--template", template_path, "tarballtestproject"] testproject_dir = os.path.join(self.test_dir, "tarballtestproject") out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, "run.py"))) def test_custom_project_template_from_tarball_to_alternative_location(self): """ Startproject can use a project template from a tarball and create it in a specified location. """ template_path = os.path.join(custom_templates_dir, "project_template.tgz") args = [ "startproject", "--template", template_path, "tarballtestproject", "altlocation", ] testproject_dir = os.path.join(self.test_dir, "altlocation") os.mkdir(testproject_dir) out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, "run.py"))) def test_custom_project_template_from_tarball_by_url(self): """ The startproject management command is able to use a different project template from a tarball via a URL. """ template_url = "%s/custom_templates/project_template.tgz" % self.live_server_url args = ["startproject", "--template", template_url, "urltestproject"] testproject_dir = os.path.join(self.test_dir, "urltestproject") out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, "run.py"))) def test_custom_project_template_from_tarball_by_url_django_user_agent(self): user_agent = None def serve_template(request, *args, **kwargs): nonlocal user_agent user_agent = request.headers["User-Agent"] return serve(request, *args, **kwargs) old_urlpatterns = urls.urlpatterns[:] try: urls.urlpatterns += [ path( "user_agent_check/<path:path>", serve_template, {"document_root": os.path.join(urls.here, "custom_templates")}, ), ] template_url = ( f"{self.live_server_url}/user_agent_check/project_template.tgz" ) args = ["startproject", "--template", template_url, "urltestproject"] _, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertIn("Django/%s" % get_version(), user_agent) finally: urls.urlpatterns = old_urlpatterns def test_project_template_tarball_url(self): """ " Startproject management command handles project template tar/zip balls from non-canonical urls. """ template_url = ( "%s/custom_templates/project_template.tgz/" % self.live_server_url ) args = ["startproject", "--template", template_url, "urltestproject"] testproject_dir = os.path.join(self.test_dir, "urltestproject") out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, "run.py"))) def test_file_without_extension(self): "Make sure the startproject management command is able to render custom files" template_path = os.path.join(custom_templates_dir, "project_template") args = [ "startproject", "--template", template_path, "customtestproject", "-e", "txt", "-n", "Procfile", ] testproject_dir = os.path.join(self.test_dir, "customtestproject") out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) self.assertTrue(os.path.exists(os.path.join(testproject_dir, "additional_dir"))) base_path = os.path.join(testproject_dir, "additional_dir") for f in ("Procfile", "additional_file.py", "requirements.txt"): self.assertTrue(os.path.exists(os.path.join(base_path, f))) with open(os.path.join(base_path, f)) as fh: self.assertEqual( fh.read().strip(), "# some file for customtestproject test project" ) def test_custom_project_template_context_variables(self): "Make sure template context variables are rendered with proper values" template_path = os.path.join(custom_templates_dir, "project_template") args = [ "startproject", "--template", template_path, "another_project", "project_dir", ] testproject_dir = os.path.join(self.test_dir, "project_dir") os.mkdir(testproject_dir) out, err = self.run_django_admin(args) self.assertNoOutput(err) test_manage_py = os.path.join(testproject_dir, "manage.py") with open(test_manage_py) as fp: content = fp.read() self.assertIn('project_name = "another_project"', content) self.assertIn('project_directory = "%s"' % testproject_dir, content) def test_no_escaping_of_project_variables(self): "Make sure template context variables are not html escaped" # We're using a custom command so we need the alternate settings self.write_settings("alternate_settings.py") template_path = os.path.join(custom_templates_dir, "project_template") args = [ "custom_startproject", "--template", template_path, "another_project", "project_dir", "--extra", "<&>", "--settings=alternate_settings", ] testproject_dir = os.path.join(self.test_dir, "project_dir") os.mkdir(testproject_dir) out, err = self.run_manage(args) self.assertNoOutput(err) test_manage_py = os.path.join(testproject_dir, "additional_dir", "extra.py") with open(test_manage_py) as fp: content = fp.read() self.assertIn("<&>", content) def test_custom_project_destination_missing(self): """ Make sure an exception is raised when the provided destination directory doesn't exist """ template_path = os.path.join(custom_templates_dir, "project_template") args = [ "startproject", "--template", template_path, "yet_another_project", "project_dir2", ] testproject_dir = os.path.join(self.test_dir, "project_dir2") out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput( err, "Destination directory '%s' does not exist, please create it first." % testproject_dir, ) self.assertFalse(os.path.exists(testproject_dir)) def test_custom_project_template_with_non_ascii_templates(self): """ The startproject management command is able to render templates with non-ASCII content. """ template_path = os.path.join(custom_templates_dir, "project_template") args = [ "startproject", "--template", template_path, "--extension=txt", "customtestproject", ] testproject_dir = os.path.join(self.test_dir, "customtestproject") out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertTrue(os.path.isdir(testproject_dir)) path = os.path.join(testproject_dir, "ticket-18091-non-ascii-template.txt") with open(path, encoding="utf-8") as f: self.assertEqual( f.read().splitlines(False), ["Some non-ASCII text for testing ticket #18091:", "üäö €"], ) def test_custom_project_template_hidden_directory_default_excluded(self): """Hidden directories are excluded by default.""" template_path = os.path.join(custom_templates_dir, "project_template") args = [ "startproject", "--template", template_path, "custom_project_template_hidden_directories", "project_dir", ] testproject_dir = os.path.join(self.test_dir, "project_dir") os.mkdir(testproject_dir) _, err = self.run_django_admin(args) self.assertNoOutput(err) hidden_dir = os.path.join(testproject_dir, ".hidden") self.assertIs(os.path.exists(hidden_dir), False) def test_custom_project_template_hidden_directory_included(self): """ Template context variables in hidden directories are rendered, if not excluded. """ template_path = os.path.join(custom_templates_dir, "project_template") project_name = "custom_project_template_hidden_directories_included" args = [ "startproject", "--template", template_path, project_name, "project_dir", "--exclude", ] testproject_dir = os.path.join(self.test_dir, "project_dir") os.mkdir(testproject_dir) _, err = self.run_django_admin(args) self.assertNoOutput(err) render_py_path = os.path.join(testproject_dir, ".hidden", "render.py") with open(render_py_path) as fp: self.assertIn( f"# The {project_name} should be rendered.", fp.read(), ) def test_custom_project_template_exclude_directory(self): """ Excluded directories (in addition to .git and __pycache__) are not included in the project. """ template_path = os.path.join(custom_templates_dir, "project_template") project_name = "custom_project_with_excluded_directories" args = [ "startproject", "--template", template_path, project_name, "project_dir", "--exclude", "additional_dir", "-x", ".hidden", ] testproject_dir = os.path.join(self.test_dir, "project_dir") os.mkdir(testproject_dir) _, err = self.run_django_admin(args) self.assertNoOutput(err) excluded_directories = [ ".hidden", "additional_dir", ".git", "__pycache__", ] for directory in excluded_directories: self.assertIs( os.path.exists(os.path.join(testproject_dir, directory)), False, ) not_excluded = os.path.join(testproject_dir, project_name) self.assertIs(os.path.exists(not_excluded), True) @unittest.skipIf( sys.platform == "win32", "Windows only partially supports umasks and chmod.", ) @unittest.skipUnless(PY39, "subprocess.run()'s umask was added in Python 3.9.") def test_honor_umask(self): _, err = self.run_django_admin(["startproject", "testproject"], umask=0o077) self.assertNoOutput(err) testproject_dir = os.path.join(self.test_dir, "testproject") self.assertIs(os.path.isdir(testproject_dir), True) tests = [ (["manage.py"], 0o700), (["testproject"], 0o700), (["testproject", "settings.py"], 0o600), ] for paths, expected_mode in tests: file_path = os.path.join(testproject_dir, *paths) with self.subTest(paths[-1]): self.assertEqual( stat.S_IMODE(os.stat(file_path).st_mode), expected_mode, ) class StartApp(AdminScriptTestCase): def test_invalid_name(self): """startapp validates that app name is a valid Python identifier.""" for bad_name in ("7testproject", "../testproject"): with self.subTest(app_name=bad_name): args = ["startapp", bad_name] testproject_dir = os.path.join(self.test_dir, bad_name) out, err = self.run_django_admin(args) self.assertOutput( err, "CommandError: '{}' is not a valid app name. Please make " "sure the name is a valid identifier.".format(bad_name), ) self.assertFalse(os.path.exists(testproject_dir)) def test_importable_name(self): """ startapp validates that app name doesn't clash with existing Python modules. """ bad_name = "os" args = ["startapp", bad_name] testproject_dir = os.path.join(self.test_dir, bad_name) out, err = self.run_django_admin(args) self.assertOutput( err, "CommandError: 'os' conflicts with the name of an existing " "Python module and cannot be used as an app name. Please try " "another name.", ) self.assertFalse(os.path.exists(testproject_dir)) def test_invalid_target_name(self): for bad_target in ( "invalid.dir_name", "7invalid_dir_name", ".invalid_dir_name", ): with self.subTest(bad_target): _, err = self.run_django_admin(["startapp", "app", bad_target]) self.assertOutput( err, "CommandError: '%s' is not a valid app directory. Please " "make sure the directory is a valid identifier." % bad_target, ) def test_importable_target_name(self): _, err = self.run_django_admin(["startapp", "app", "os"]) self.assertOutput( err, "CommandError: 'os' conflicts with the name of an existing Python " "module and cannot be used as an app directory. Please try " "another directory.", ) def test_trailing_slash_in_target_app_directory_name(self): app_dir = os.path.join(self.test_dir, "apps", "app1") os.makedirs(app_dir) _, err = self.run_django_admin( ["startapp", "app", os.path.join("apps", "app1", "")] ) self.assertNoOutput(err) self.assertIs(os.path.exists(os.path.join(app_dir, "apps.py")), True) def test_overlaying_app(self): # Use a subdirectory so it is outside the PYTHONPATH. os.makedirs(os.path.join(self.test_dir, "apps/app1")) self.run_django_admin(["startapp", "app1", "apps/app1"]) out, err = self.run_django_admin(["startapp", "app2", "apps/app1"]) self.assertOutput( err, "already exists. Overlaying an app into an existing directory " "won't replace conflicting files.", ) def test_template(self): out, err = self.run_django_admin(["startapp", "new_app"]) self.assertNoOutput(err) app_path = os.path.join(self.test_dir, "new_app") self.assertIs(os.path.exists(app_path), True) with open(os.path.join(app_path, "apps.py")) as f: content = f.read() self.assertIn("class NewAppConfig(AppConfig)", content) if HAS_BLACK: test_str = 'default_auto_field = "django.db.models.BigAutoField"' else: test_str = "default_auto_field = 'django.db.models.BigAutoField'" self.assertIn(test_str, content) self.assertIn( 'name = "new_app"' if HAS_BLACK else "name = 'new_app'", content, ) class DiffSettings(AdminScriptTestCase): """Tests for diffsettings management command.""" def test_basic(self): """Runs without error and emits settings diff.""" self.write_settings("settings_to_diff.py", sdict={"FOO": '"bar"'}) args = ["diffsettings", "--settings=settings_to_diff"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "FOO = 'bar' ###") # Attributes from django.conf.Settings don't appear. self.assertNotInOutput(out, "is_overridden = ") def test_settings_configured(self): out, err = self.run_manage( ["diffsettings"], manage_py="configured_settings_manage.py" ) self.assertNoOutput(err) self.assertOutput(out, "CUSTOM = 1 ###\nDEBUG = True") # Attributes from django.conf.UserSettingsHolder don't appear. self.assertNotInOutput(out, "default_settings = ") def test_dynamic_settings_configured(self): # Custom default settings appear. out, err = self.run_manage( ["diffsettings"], manage_py="configured_dynamic_settings_manage.py" ) self.assertNoOutput(err) self.assertOutput(out, "FOO = 'bar' ###") def test_all(self): """The all option also shows settings with the default value.""" self.write_settings("settings_to_diff.py", sdict={"STATIC_URL": "None"}) args = ["diffsettings", "--settings=settings_to_diff", "--all"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "### STATIC_URL = None") def test_custom_default(self): """ The --default option specifies an alternate settings module for comparison. """ self.write_settings( "settings_default.py", sdict={"FOO": '"foo"', "BAR": '"bar1"'} ) self.write_settings( "settings_to_diff.py", sdict={"FOO": '"foo"', "BAR": '"bar2"'} ) out, err = self.run_manage( [ "diffsettings", "--settings=settings_to_diff", "--default=settings_default", ] ) self.assertNoOutput(err) self.assertNotInOutput(out, "FOO") self.assertOutput(out, "BAR = 'bar2'") def test_unified(self): """--output=unified emits settings diff in unified mode.""" self.write_settings("settings_to_diff.py", sdict={"FOO": '"bar"'}) args = ["diffsettings", "--settings=settings_to_diff", "--output=unified"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "+ FOO = 'bar'") self.assertOutput(out, "- SECRET_KEY = ''") self.assertOutput(out, "+ SECRET_KEY = 'django_tests_secret_key'") self.assertNotInOutput(out, " APPEND_SLASH = True") def test_unified_all(self): """ --output=unified --all emits settings diff in unified mode and includes settings with the default value. """ self.write_settings("settings_to_diff.py", sdict={"FOO": '"bar"'}) args = [ "diffsettings", "--settings=settings_to_diff", "--output=unified", "--all", ] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, " APPEND_SLASH = True") self.assertOutput(out, "+ FOO = 'bar'") self.assertOutput(out, "- SECRET_KEY = ''") class Dumpdata(AdminScriptTestCase): """Tests for dumpdata management command.""" def setUp(self): super().setUp() self.write_settings("settings.py") def test_pks_parsing(self): """Regression for #20509 Test would raise an exception rather than printing an error message. """ args = ["dumpdata", "--pks=1"] out, err = self.run_manage(args) self.assertOutput(err, "You can only use --pks option with one model") self.assertNoOutput(out) class MainModule(AdminScriptTestCase): """python -m django works like django-admin.""" def test_program_name_in_help(self): out, err = self.run_test(["-m", "django", "help"]) self.assertOutput( out, "Type 'python -m django help <subcommand>' for help on a specific " "subcommand.", ) class DjangoAdminSuggestions(AdminScriptTestCase): def setUp(self): super().setUp() self.write_settings("settings.py") def test_suggestions(self): args = ["rnserver", "--settings=test_project.settings"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertOutput(err, "Unknown command: 'rnserver'. Did you mean runserver?") def test_no_suggestions(self): args = ["abcdef", "--settings=test_project.settings"] out, err = self.run_django_admin(args) self.assertNoOutput(out) self.assertNotInOutput(err, "Did you mean")
85f7c642477d0b919576ae2e54f74dfd2bfcb7849b3fb8624b6d6592770a89d1
from datetime import datetime from operator import attrgetter from django.core.exceptions import FieldError from django.db.models import ( CharField, Count, DateTimeField, F, Max, OuterRef, Subquery, Value, ) from django.db.models.functions import Length, Upper from django.test import TestCase from .models import ( Article, Author, ChildArticle, OrderedByExpression, OrderedByExpressionChild, OrderedByExpressionGrandChild, OrderedByFArticle, Reference, ) class OrderingTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Article.objects.create( headline="Article 1", pub_date=datetime(2005, 7, 26) ) cls.a2 = Article.objects.create( headline="Article 2", pub_date=datetime(2005, 7, 27) ) cls.a3 = Article.objects.create( headline="Article 3", pub_date=datetime(2005, 7, 27) ) cls.a4 = Article.objects.create( headline="Article 4", pub_date=datetime(2005, 7, 28) ) cls.author_1 = Author.objects.create(name="Name 1") cls.author_2 = Author.objects.create(name="Name 2") for i in range(2): Author.objects.create() def test_default_ordering(self): """ By default, Article.objects.all() orders by pub_date descending, then headline ascending. """ self.assertQuerySetEqual( Article.objects.all(), [ "Article 4", "Article 2", "Article 3", "Article 1", ], attrgetter("headline"), ) # Getting a single item should work too: self.assertEqual(Article.objects.all()[0], self.a4) def test_default_ordering_override(self): """ Override ordering with order_by, which is in the same format as the ordering attribute in models. """ self.assertQuerySetEqual( Article.objects.order_by("headline"), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline"), ) self.assertQuerySetEqual( Article.objects.order_by("pub_date", "-headline"), [ "Article 1", "Article 3", "Article 2", "Article 4", ], attrgetter("headline"), ) def test_default_ordering_override_unknown_field(self): """ Attempts to override default ordering on related models with an unknown field should result in an error. """ msg = ( "Cannot resolve keyword 'unknown_field' into field. Choices are: " "article, author, editor, editor_id, id, name" ) with self.assertRaisesMessage(FieldError, msg): list(Article.objects.order_by("author__unknown_field")) def test_order_by_override(self): """ Only the last order_by has any effect (since they each override any previous ordering). """ self.assertQuerySetEqual( Article.objects.order_by("id"), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline"), ) self.assertQuerySetEqual( Article.objects.order_by("id").order_by("-headline"), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline"), ) def test_order_by_nulls_first_and_last(self): msg = "nulls_first and nulls_last are mutually exclusive" with self.assertRaisesMessage(ValueError, msg): Article.objects.order_by( F("author").desc(nulls_last=True, nulls_first=True) ) def assertQuerySetEqualReversible(self, queryset, sequence): self.assertSequenceEqual(queryset, sequence) self.assertSequenceEqual(queryset.reverse(), list(reversed(sequence))) def test_order_by_nulls_last(self): Article.objects.filter(headline="Article 3").update(author=self.author_1) Article.objects.filter(headline="Article 4").update(author=self.author_2) # asc and desc are chainable with nulls_last. self.assertQuerySetEqualReversible( Article.objects.order_by(F("author").desc(nulls_last=True), "headline"), [self.a4, self.a3, self.a1, self.a2], ) self.assertQuerySetEqualReversible( Article.objects.order_by(F("author").asc(nulls_last=True), "headline"), [self.a3, self.a4, self.a1, self.a2], ) self.assertQuerySetEqualReversible( Article.objects.order_by( Upper("author__name").desc(nulls_last=True), "headline" ), [self.a4, self.a3, self.a1, self.a2], ) self.assertQuerySetEqualReversible( Article.objects.order_by( Upper("author__name").asc(nulls_last=True), "headline" ), [self.a3, self.a4, self.a1, self.a2], ) def test_order_by_nulls_first(self): Article.objects.filter(headline="Article 3").update(author=self.author_1) Article.objects.filter(headline="Article 4").update(author=self.author_2) # asc and desc are chainable with nulls_first. self.assertQuerySetEqualReversible( Article.objects.order_by(F("author").asc(nulls_first=True), "headline"), [self.a1, self.a2, self.a3, self.a4], ) self.assertQuerySetEqualReversible( Article.objects.order_by(F("author").desc(nulls_first=True), "headline"), [self.a1, self.a2, self.a4, self.a3], ) self.assertQuerySetEqualReversible( Article.objects.order_by( Upper("author__name").asc(nulls_first=True), "headline" ), [self.a1, self.a2, self.a3, self.a4], ) self.assertQuerySetEqualReversible( Article.objects.order_by( Upper("author__name").desc(nulls_first=True), "headline" ), [self.a1, self.a2, self.a4, self.a3], ) def test_orders_nulls_first_on_filtered_subquery(self): Article.objects.filter(headline="Article 1").update(author=self.author_1) Article.objects.filter(headline="Article 2").update(author=self.author_1) Article.objects.filter(headline="Article 4").update(author=self.author_2) Author.objects.filter(name__isnull=True).delete() author_3 = Author.objects.create(name="Name 3") article_subquery = ( Article.objects.filter( author=OuterRef("pk"), headline__icontains="Article", ) .order_by() .values("author") .annotate( last_date=Max("pub_date"), ) .values("last_date") ) self.assertQuerySetEqualReversible( Author.objects.annotate( last_date=Subquery(article_subquery, output_field=DateTimeField()) ) .order_by(F("last_date").asc(nulls_first=True)) .distinct(), [author_3, self.author_1, self.author_2], ) def test_stop_slicing(self): """ Use the 'stop' part of slicing notation to limit the results. """ self.assertQuerySetEqual( Article.objects.order_by("headline")[:2], [ "Article 1", "Article 2", ], attrgetter("headline"), ) def test_stop_start_slicing(self): """ Use the 'stop' and 'start' parts of slicing notation to offset the result list. """ self.assertQuerySetEqual( Article.objects.order_by("headline")[1:3], [ "Article 2", "Article 3", ], attrgetter("headline"), ) def test_random_ordering(self): """ Use '?' to order randomly. """ self.assertEqual(len(list(Article.objects.order_by("?"))), 4) def test_reversed_ordering(self): """ Ordering can be reversed using the reverse() method on a queryset. This allows you to extract things like "the last two items" (reverse and then take the first two). """ self.assertQuerySetEqual( Article.objects.reverse()[:2], [ "Article 1", "Article 3", ], attrgetter("headline"), ) def test_reverse_ordering_pure(self): qs1 = Article.objects.order_by(F("headline").asc()) qs2 = qs1.reverse() self.assertQuerySetEqual( qs2, [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline"), ) self.assertQuerySetEqual( qs1, [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline"), ) def test_reverse_meta_ordering_pure(self): Article.objects.create( headline="Article 5", pub_date=datetime(2005, 7, 30), author=self.author_1, second_author=self.author_2, ) Article.objects.create( headline="Article 5", pub_date=datetime(2005, 7, 30), author=self.author_2, second_author=self.author_1, ) self.assertQuerySetEqual( Article.objects.filter(headline="Article 5").reverse(), ["Name 2", "Name 1"], attrgetter("author.name"), ) self.assertQuerySetEqual( Article.objects.filter(headline="Article 5"), ["Name 1", "Name 2"], attrgetter("author.name"), ) def test_no_reordering_after_slicing(self): msg = "Cannot reverse a query once a slice has been taken." qs = Article.objects.all()[0:2] with self.assertRaisesMessage(TypeError, msg): qs.reverse() with self.assertRaisesMessage(TypeError, msg): qs.last() def test_extra_ordering(self): """ Ordering can be based on fields included from an 'extra' clause """ self.assertQuerySetEqual( Article.objects.extra( select={"foo": "pub_date"}, order_by=["foo", "headline"] ), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline"), ) def test_extra_ordering_quoting(self): """ If the extra clause uses an SQL keyword for a name, it will be protected by quoting. """ self.assertQuerySetEqual( Article.objects.extra( select={"order": "pub_date"}, order_by=["order", "headline"] ), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline"), ) def test_extra_ordering_with_table_name(self): self.assertQuerySetEqual( Article.objects.extra(order_by=["ordering_article.headline"]), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline"), ) self.assertQuerySetEqual( Article.objects.extra(order_by=["-ordering_article.headline"]), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline"), ) def test_order_by_pk(self): """ 'pk' works as an ordering option in Meta. """ self.assertEqual( [a.pk for a in Author.objects.all()], [a.pk for a in Author.objects.order_by("-pk")], ) def test_order_by_fk_attname(self): """ ordering by a foreign key by its attribute name prevents the query from inheriting its related model ordering option (#19195). """ authors = list(Author.objects.order_by("id")) for i in range(1, 5): author = authors[i - 1] article = getattr(self, "a%d" % (5 - i)) article.author = author article.save(update_fields={"author"}) self.assertQuerySetEqual( Article.objects.order_by("author_id"), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline"), ) def test_order_by_self_referential_fk(self): self.a1.author = Author.objects.create(editor=self.author_1) self.a1.save() self.a2.author = Author.objects.create(editor=self.author_2) self.a2.save() self.assertQuerySetEqual( Article.objects.filter(author__isnull=False).order_by("author__editor"), ["Article 2", "Article 1"], attrgetter("headline"), ) self.assertQuerySetEqual( Article.objects.filter(author__isnull=False).order_by("author__editor_id"), ["Article 1", "Article 2"], attrgetter("headline"), ) def test_order_by_f_expression(self): self.assertQuerySetEqual( Article.objects.order_by(F("headline")), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline"), ) self.assertQuerySetEqual( Article.objects.order_by(F("headline").asc()), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline"), ) self.assertQuerySetEqual( Article.objects.order_by(F("headline").desc()), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline"), ) def test_order_by_f_expression_duplicates(self): """ A column may only be included once (the first occurrence) so we check to ensure there are no duplicates by inspecting the SQL. """ qs = Article.objects.order_by(F("headline").asc(), F("headline").desc()) sql = str(qs.query).upper() fragment = sql[sql.find("ORDER BY") :] self.assertEqual(fragment.count("HEADLINE"), 1) self.assertQuerySetEqual( qs, [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline"), ) qs = Article.objects.order_by(F("headline").desc(), F("headline").asc()) sql = str(qs.query).upper() fragment = sql[sql.find("ORDER BY") :] self.assertEqual(fragment.count("HEADLINE"), 1) self.assertQuerySetEqual( qs, [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline"), ) def test_order_by_constant_value(self): # Order by annotated constant from selected columns. qs = Article.objects.annotate( constant=Value("1", output_field=CharField()), ).order_by("constant", "-headline") self.assertSequenceEqual(qs, [self.a4, self.a3, self.a2, self.a1]) # Order by annotated constant which is out of selected columns. self.assertSequenceEqual( qs.values_list("headline", flat=True), [ "Article 4", "Article 3", "Article 2", "Article 1", ], ) # Order by constant. qs = Article.objects.order_by(Value("1", output_field=CharField()), "-headline") self.assertSequenceEqual(qs, [self.a4, self.a3, self.a2, self.a1]) def test_related_ordering_duplicate_table_reference(self): """ An ordering referencing a model with an ordering referencing a model multiple time no circular reference should be detected (#24654). """ first_author = Author.objects.create() second_author = Author.objects.create() self.a1.author = first_author self.a1.second_author = second_author self.a1.save() self.a2.author = second_author self.a2.second_author = first_author self.a2.save() r1 = Reference.objects.create(article_id=self.a1.pk) r2 = Reference.objects.create(article_id=self.a2.pk) self.assertSequenceEqual(Reference.objects.all(), [r2, r1]) def test_default_ordering_by_f_expression(self): """F expressions can be used in Meta.ordering.""" articles = OrderedByFArticle.objects.all() articles.filter(headline="Article 2").update(author=self.author_2) articles.filter(headline="Article 3").update(author=self.author_1) self.assertQuerySetEqual( articles, ["Article 1", "Article 4", "Article 3", "Article 2"], attrgetter("headline"), ) def test_order_by_ptr_field_with_default_ordering_by_expression(self): ca1 = ChildArticle.objects.create( headline="h2", pub_date=datetime(2005, 7, 27), author=self.author_2, ) ca2 = ChildArticle.objects.create( headline="h2", pub_date=datetime(2005, 7, 27), author=self.author_1, ) ca3 = ChildArticle.objects.create( headline="h3", pub_date=datetime(2005, 7, 27), author=self.author_1, ) ca4 = ChildArticle.objects.create(headline="h1", pub_date=datetime(2005, 7, 28)) articles = ChildArticle.objects.order_by("article_ptr") self.assertSequenceEqual(articles, [ca4, ca2, ca1, ca3]) def test_default_ordering_does_not_affect_group_by(self): Article.objects.exclude(headline="Article 4").update(author=self.author_1) Article.objects.filter(headline="Article 4").update(author=self.author_2) articles = Article.objects.values("author").annotate(count=Count("author")) self.assertCountEqual( articles, [ {"author": self.author_1.pk, "count": 3}, {"author": self.author_2.pk, "count": 1}, ], ) def test_order_by_parent_fk_with_expression_in_default_ordering(self): p3 = OrderedByExpression.objects.create(name="oBJ 3") p2 = OrderedByExpression.objects.create(name="OBJ 2") p1 = OrderedByExpression.objects.create(name="obj 1") c3 = OrderedByExpressionChild.objects.create(parent=p3) c2 = OrderedByExpressionChild.objects.create(parent=p2) c1 = OrderedByExpressionChild.objects.create(parent=p1) self.assertSequenceEqual( OrderedByExpressionChild.objects.order_by("parent"), [c1, c2, c3], ) def test_order_by_grandparent_fk_with_expression_in_default_ordering(self): p3 = OrderedByExpression.objects.create(name="oBJ 3") p2 = OrderedByExpression.objects.create(name="OBJ 2") p1 = OrderedByExpression.objects.create(name="obj 1") c3 = OrderedByExpressionChild.objects.create(parent=p3) c2 = OrderedByExpressionChild.objects.create(parent=p2) c1 = OrderedByExpressionChild.objects.create(parent=p1) g3 = OrderedByExpressionGrandChild.objects.create(parent=c3) g2 = OrderedByExpressionGrandChild.objects.create(parent=c2) g1 = OrderedByExpressionGrandChild.objects.create(parent=c1) self.assertSequenceEqual( OrderedByExpressionGrandChild.objects.order_by("parent"), [g1, g2, g3], ) def test_order_by_expression_ref(self): self.assertQuerySetEqual( Author.objects.annotate(upper_name=Upper("name")).order_by( Length("upper_name") ), Author.objects.order_by(Length(Upper("name"))), )
05df19e4d98149f168711f6c2e72ba0bc95d21417639e6800ea08e448e21371d
import gettext import os import re from datetime import datetime, timedelta from importlib import import_module from unittest import skipUnless try: import zoneinfo except ImportError: from backports import zoneinfo from django import forms from django.conf import settings from django.contrib import admin from django.contrib.admin import widgets from django.contrib.admin.tests import AdminSeleniumTestCase from django.contrib.auth.models import User from django.core.files.storage import default_storage from django.core.files.uploadedfile import SimpleUploadedFile from django.db.models import ( CharField, DateField, DateTimeField, ForeignKey, ManyToManyField, UUIDField, ) from django.test import SimpleTestCase, TestCase, override_settings from django.urls import reverse from django.utils import translation from .models import ( Advisor, Album, Band, Bee, Car, Company, Event, Honeycomb, Image, Individual, Inventory, Member, MyFileField, Profile, ReleaseEvent, School, Student, UnsafeLimitChoicesTo, VideoStream, ) from .widgetadmin import site as widget_admin_site class TestDataMixin: @classmethod def setUpTestData(cls): cls.superuser = User.objects.create_superuser( username="super", password="secret", email=None ) cls.u2 = User.objects.create_user(username="testser", password="secret") Car.objects.create(owner=cls.superuser, make="Volkswagen", model="Passat") Car.objects.create(owner=cls.u2, make="BMW", model="M3") class AdminFormfieldForDBFieldTests(SimpleTestCase): """ Tests for correct behavior of ModelAdmin.formfield_for_dbfield """ def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides): """ Helper to call formfield_for_dbfield for a given model and field name and verify that the returned formfield is appropriate. """ # Override any settings on the model admin class MyModelAdmin(admin.ModelAdmin): pass for k in admin_overrides: setattr(MyModelAdmin, k, admin_overrides[k]) # Construct the admin, and ask it for a formfield ma = MyModelAdmin(model, admin.site) ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None) # "unwrap" the widget wrapper, if needed if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper): widget = ff.widget.widget else: widget = ff.widget self.assertIsInstance(widget, widgetclass) # Return the formfield so that other tests can continue return ff def test_DateField(self): self.assertFormfield(Event, "start_date", widgets.AdminDateWidget) def test_DateTimeField(self): self.assertFormfield(Member, "birthdate", widgets.AdminSplitDateTime) def test_TimeField(self): self.assertFormfield(Event, "start_time", widgets.AdminTimeWidget) def test_TextField(self): self.assertFormfield(Event, "description", widgets.AdminTextareaWidget) def test_URLField(self): self.assertFormfield(Event, "link", widgets.AdminURLFieldWidget) def test_IntegerField(self): self.assertFormfield(Event, "min_age", widgets.AdminIntegerFieldWidget) def test_CharField(self): self.assertFormfield(Member, "name", widgets.AdminTextInputWidget) def test_EmailField(self): self.assertFormfield(Member, "email", widgets.AdminEmailInputWidget) def test_FileField(self): self.assertFormfield(Album, "cover_art", widgets.AdminFileWidget) def test_ForeignKey(self): self.assertFormfield(Event, "main_band", forms.Select) def test_raw_id_ForeignKey(self): self.assertFormfield( Event, "main_band", widgets.ForeignKeyRawIdWidget, raw_id_fields=["main_band"], ) def test_radio_fields_ForeignKey(self): ff = self.assertFormfield( Event, "main_band", widgets.AdminRadioSelect, radio_fields={"main_band": admin.VERTICAL}, ) self.assertIsNone(ff.empty_label) def test_radio_fields_foreignkey_formfield_overrides_empty_label(self): class MyModelAdmin(admin.ModelAdmin): radio_fields = {"parent": admin.VERTICAL} formfield_overrides = { ForeignKey: {"empty_label": "Custom empty label"}, } ma = MyModelAdmin(Inventory, admin.site) ff = ma.formfield_for_dbfield(Inventory._meta.get_field("parent"), request=None) self.assertEqual(ff.empty_label, "Custom empty label") def test_many_to_many(self): self.assertFormfield(Band, "members", forms.SelectMultiple) def test_raw_id_many_to_many(self): self.assertFormfield( Band, "members", widgets.ManyToManyRawIdWidget, raw_id_fields=["members"] ) def test_filtered_many_to_many(self): self.assertFormfield( Band, "members", widgets.FilteredSelectMultiple, filter_vertical=["members"] ) def test_formfield_overrides(self): self.assertFormfield( Event, "start_date", forms.TextInput, formfield_overrides={DateField: {"widget": forms.TextInput}}, ) def test_formfield_overrides_widget_instances(self): """ Widget instances in formfield_overrides are not shared between different fields. (#19423) """ class BandAdmin(admin.ModelAdmin): formfield_overrides = { CharField: {"widget": forms.TextInput(attrs={"size": "10"})} } ma = BandAdmin(Band, admin.site) f1 = ma.formfield_for_dbfield(Band._meta.get_field("name"), request=None) f2 = ma.formfield_for_dbfield(Band._meta.get_field("style"), request=None) self.assertNotEqual(f1.widget, f2.widget) self.assertEqual(f1.widget.attrs["maxlength"], "100") self.assertEqual(f2.widget.attrs["maxlength"], "20") self.assertEqual(f2.widget.attrs["size"], "10") def test_formfield_overrides_m2m_filter_widget(self): """ The autocomplete_fields, raw_id_fields, filter_vertical, and filter_horizontal widgets for ManyToManyFields may be overridden by specifying a widget in formfield_overrides. """ class BandAdmin(admin.ModelAdmin): filter_vertical = ["members"] formfield_overrides = { ManyToManyField: {"widget": forms.CheckboxSelectMultiple}, } ma = BandAdmin(Band, admin.site) field = ma.formfield_for_dbfield(Band._meta.get_field("members"), request=None) self.assertIsInstance(field.widget.widget, forms.CheckboxSelectMultiple) def test_formfield_overrides_for_datetime_field(self): """ Overriding the widget for DateTimeField doesn't overrides the default form_class for that field (#26449). """ class MemberAdmin(admin.ModelAdmin): formfield_overrides = { DateTimeField: {"widget": widgets.AdminSplitDateTime} } ma = MemberAdmin(Member, admin.site) f1 = ma.formfield_for_dbfield(Member._meta.get_field("birthdate"), request=None) self.assertIsInstance(f1.widget, widgets.AdminSplitDateTime) self.assertIsInstance(f1, forms.SplitDateTimeField) def test_formfield_overrides_for_custom_field(self): """ formfield_overrides works for a custom field class. """ class AlbumAdmin(admin.ModelAdmin): formfield_overrides = {MyFileField: {"widget": forms.TextInput()}} ma = AlbumAdmin(Member, admin.site) f1 = ma.formfield_for_dbfield( Album._meta.get_field("backside_art"), request=None ) self.assertIsInstance(f1.widget, forms.TextInput) def test_field_with_choices(self): self.assertFormfield(Member, "gender", forms.Select) def test_choices_with_radio_fields(self): self.assertFormfield( Member, "gender", widgets.AdminRadioSelect, radio_fields={"gender": admin.VERTICAL}, ) def test_inheritance(self): self.assertFormfield(Album, "backside_art", widgets.AdminFileWidget) def test_m2m_widgets(self): """m2m fields help text as it applies to admin app (#9321).""" class AdvisorAdmin(admin.ModelAdmin): filter_vertical = ["companies"] self.assertFormfield( Advisor, "companies", widgets.FilteredSelectMultiple, filter_vertical=["companies"], ) ma = AdvisorAdmin(Advisor, admin.site) f = ma.formfield_for_dbfield(Advisor._meta.get_field("companies"), request=None) self.assertEqual( f.help_text, "Hold down “Control”, or “Command” on a Mac, to select more than one.", ) def test_m2m_widgets_no_allow_multiple_selected(self): class NoAllowMultipleSelectedWidget(forms.SelectMultiple): allow_multiple_selected = False class AdvisorAdmin(admin.ModelAdmin): filter_vertical = ["companies"] formfield_overrides = { ManyToManyField: {"widget": NoAllowMultipleSelectedWidget}, } self.assertFormfield( Advisor, "companies", widgets.FilteredSelectMultiple, filter_vertical=["companies"], ) ma = AdvisorAdmin(Advisor, admin.site) f = ma.formfield_for_dbfield(Advisor._meta.get_field("companies"), request=None) self.assertEqual(f.help_text, "") @override_settings(ROOT_URLCONF="admin_widgets.urls") class AdminFormfieldForDBFieldWithRequestTests(TestDataMixin, TestCase): def test_filter_choices_by_request_user(self): """ Ensure the user can only see their own cars in the foreign key dropdown. """ self.client.force_login(self.superuser) response = self.client.get(reverse("admin:admin_widgets_cartire_add")) self.assertNotContains(response, "BMW M3") self.assertContains(response, "Volkswagen Passat") @override_settings(ROOT_URLCONF="admin_widgets.urls") class AdminForeignKeyWidgetChangeList(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) def test_changelist_ForeignKey(self): response = self.client.get(reverse("admin:admin_widgets_car_changelist")) self.assertContains(response, "/auth/user/add/") @override_settings(ROOT_URLCONF="admin_widgets.urls") class AdminForeignKeyRawIdWidget(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) def test_nonexistent_target_id(self): band = Band.objects.create(name="Bogey Blues") pk = band.pk band.delete() post_data = { "main_band": str(pk), } # Try posting with a nonexistent pk in a raw id field: this # should result in an error message, not a server exception. response = self.client.post(reverse("admin:admin_widgets_event_add"), post_data) self.assertContains( response, "Select a valid choice. That choice is not one of the available choices.", ) def test_invalid_target_id(self): for test_str in ("Iñtërnâtiônàlizætiøn", "1234'", -1234): # This should result in an error message, not a server exception. response = self.client.post( reverse("admin:admin_widgets_event_add"), {"main_band": test_str} ) self.assertContains( response, "Select a valid choice. That choice is not one of the available " "choices.", ) def test_url_params_from_lookup_dict_any_iterable(self): lookup1 = widgets.url_params_from_lookup_dict({"color__in": ("red", "blue")}) lookup2 = widgets.url_params_from_lookup_dict({"color__in": ["red", "blue"]}) self.assertEqual(lookup1, {"color__in": "red,blue"}) self.assertEqual(lookup1, lookup2) def test_url_params_from_lookup_dict_callable(self): def my_callable(): return "works" lookup1 = widgets.url_params_from_lookup_dict({"myfield": my_callable}) lookup2 = widgets.url_params_from_lookup_dict({"myfield": my_callable()}) self.assertEqual(lookup1, lookup2) def test_label_and_url_for_value_invalid_uuid(self): field = Bee._meta.get_field("honeycomb") self.assertIsInstance(field.target_field, UUIDField) widget = widgets.ForeignKeyRawIdWidget(field.remote_field, admin.site) self.assertEqual(widget.label_and_url_for_value("invalid-uuid"), ("", "")) class FilteredSelectMultipleWidgetTest(SimpleTestCase): def test_render(self): # Backslash in verbose_name to ensure it is JavaScript escaped. w = widgets.FilteredSelectMultiple("test\\", False) self.assertHTMLEqual( w.render("test", "test"), '<select multiple name="test" class="selectfilter" ' 'data-field-name="test\\" data-is-stacked="0">\n</select>', ) def test_stacked_render(self): # Backslash in verbose_name to ensure it is JavaScript escaped. w = widgets.FilteredSelectMultiple("test\\", True) self.assertHTMLEqual( w.render("test", "test"), '<select multiple name="test" class="selectfilterstacked" ' 'data-field-name="test\\" data-is-stacked="1">\n</select>', ) class AdminDateWidgetTest(SimpleTestCase): def test_attrs(self): w = widgets.AdminDateWidget() self.assertHTMLEqual( w.render("test", datetime(2007, 12, 1, 9, 30)), '<input value="2007-12-01" type="text" class="vDateField" name="test" ' 'size="10">', ) # pass attrs to widget w = widgets.AdminDateWidget(attrs={"size": 20, "class": "myDateField"}) self.assertHTMLEqual( w.render("test", datetime(2007, 12, 1, 9, 30)), '<input value="2007-12-01" type="text" class="myDateField" name="test" ' 'size="20">', ) class AdminTimeWidgetTest(SimpleTestCase): def test_attrs(self): w = widgets.AdminTimeWidget() self.assertHTMLEqual( w.render("test", datetime(2007, 12, 1, 9, 30)), '<input value="09:30:00" type="text" class="vTimeField" name="test" ' 'size="8">', ) # pass attrs to widget w = widgets.AdminTimeWidget(attrs={"size": 20, "class": "myTimeField"}) self.assertHTMLEqual( w.render("test", datetime(2007, 12, 1, 9, 30)), '<input value="09:30:00" type="text" class="myTimeField" name="test" ' 'size="20">', ) class AdminSplitDateTimeWidgetTest(SimpleTestCase): def test_render(self): w = widgets.AdminSplitDateTime() self.assertHTMLEqual( w.render("test", datetime(2007, 12, 1, 9, 30)), '<p class="datetime">' 'Date: <input value="2007-12-01" type="text" class="vDateField" ' 'name="test_0" size="10"><br>' 'Time: <input value="09:30:00" type="text" class="vTimeField" ' 'name="test_1" size="8"></p>', ) def test_localization(self): w = widgets.AdminSplitDateTime() with translation.override("de-at"): w.is_localized = True self.assertHTMLEqual( w.render("test", datetime(2007, 12, 1, 9, 30)), '<p class="datetime">' 'Datum: <input value="01.12.2007" type="text" ' 'class="vDateField" name="test_0"size="10"><br>' 'Zeit: <input value="09:30:00" type="text" class="vTimeField" ' 'name="test_1" size="8"></p>', ) class AdminURLWidgetTest(SimpleTestCase): def test_get_context_validates_url(self): w = widgets.AdminURLFieldWidget() for invalid in ["", "/not/a/full/url/", 'javascript:alert("Danger XSS!")']: with self.subTest(url=invalid): self.assertFalse(w.get_context("name", invalid, {})["url_valid"]) self.assertTrue(w.get_context("name", "http://example.com", {})["url_valid"]) def test_render(self): w = widgets.AdminURLFieldWidget() self.assertHTMLEqual( w.render("test", ""), '<input class="vURLField" name="test" type="url">' ) self.assertHTMLEqual( w.render("test", "http://example.com"), '<p class="url">Currently:<a href="http://example.com">' "http://example.com</a><br>" 'Change:<input class="vURLField" name="test" type="url" ' 'value="http://example.com"></p>', ) def test_render_idn(self): w = widgets.AdminURLFieldWidget() self.assertHTMLEqual( w.render("test", "http://example-äüö.com"), '<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">' "http://example-äüö.com</a><br>" 'Change:<input class="vURLField" name="test" type="url" ' 'value="http://example-äüö.com"></p>', ) def test_render_quoting(self): """ WARNING: This test doesn't use assertHTMLEqual since it will get rid of some escapes which are tested here! """ HREF_RE = re.compile('href="([^"]+)"') VALUE_RE = re.compile('value="([^"]+)"') TEXT_RE = re.compile("<a[^>]+>([^>]+)</a>") w = widgets.AdminURLFieldWidget() output = w.render("test", "http://example.com/<sometag>some-text</sometag>") self.assertEqual( HREF_RE.search(output)[1], "http://example.com/%3Csometag%3Esome-text%3C/sometag%3E", ) self.assertEqual( TEXT_RE.search(output)[1], "http://example.com/&lt;sometag&gt;some-text&lt;/sometag&gt;", ) self.assertEqual( VALUE_RE.search(output)[1], "http://example.com/&lt;sometag&gt;some-text&lt;/sometag&gt;", ) output = w.render("test", "http://example-äüö.com/<sometag>some-text</sometag>") self.assertEqual( HREF_RE.search(output)[1], "http://xn--example--7za4pnc.com/%3Csometag%3Esome-text%3C/sometag%3E", ) self.assertEqual( TEXT_RE.search(output)[1], "http://example-äüö.com/&lt;sometag&gt;some-text&lt;/sometag&gt;", ) self.assertEqual( VALUE_RE.search(output)[1], "http://example-äüö.com/&lt;sometag&gt;some-text&lt;/sometag&gt;", ) output = w.render( "test", 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"' ) self.assertEqual( HREF_RE.search(output)[1], "http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)" "%3C/script%3E%22", ) self.assertEqual( TEXT_RE.search(output)[1], "http://www.example.com/%C3%A4&quot;&gt;&lt;script&gt;" "alert(&quot;XSS!&quot;)&lt;/script&gt;&quot;", ) self.assertEqual( VALUE_RE.search(output)[1], "http://www.example.com/%C3%A4&quot;&gt;&lt;script&gt;" "alert(&quot;XSS!&quot;)&lt;/script&gt;&quot;", ) class AdminUUIDWidgetTests(SimpleTestCase): def test_attrs(self): w = widgets.AdminUUIDInputWidget() self.assertHTMLEqual( w.render("test", "550e8400-e29b-41d4-a716-446655440000"), '<input value="550e8400-e29b-41d4-a716-446655440000" type="text" ' 'class="vUUIDField" name="test">', ) w = widgets.AdminUUIDInputWidget(attrs={"class": "myUUIDInput"}) self.assertHTMLEqual( w.render("test", "550e8400-e29b-41d4-a716-446655440000"), '<input value="550e8400-e29b-41d4-a716-446655440000" type="text" ' 'class="myUUIDInput" name="test">', ) @override_settings(ROOT_URLCONF="admin_widgets.urls") class AdminFileWidgetTests(TestDataMixin, TestCase): @classmethod def setUpTestData(cls): super().setUpTestData() band = Band.objects.create(name="Linkin Park") cls.album = band.album_set.create( name="Hybrid Theory", cover_art=r"albums\hybrid_theory.jpg" ) def test_render(self): w = widgets.AdminFileWidget() self.assertHTMLEqual( w.render("test", self.album.cover_art), '<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/' r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> ' '<span class="clearable-file-input">' '<input type="checkbox" name="test-clear" id="test-clear_id"> ' '<label for="test-clear_id">Clear</label></span><br>' 'Change: <input type="file" name="test"></p>' % { "STORAGE_URL": default_storage.url(""), }, ) self.assertHTMLEqual( w.render("test", SimpleUploadedFile("test", b"content")), '<input type="file" name="test">', ) def test_render_required(self): widget = widgets.AdminFileWidget() widget.is_required = True self.assertHTMLEqual( widget.render("test", self.album.cover_art), '<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/' r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a><br>' 'Change: <input type="file" name="test"></p>' % { "STORAGE_URL": default_storage.url(""), }, ) def test_render_disabled(self): widget = widgets.AdminFileWidget(attrs={"disabled": True}) self.assertHTMLEqual( widget.render("test", self.album.cover_art), '<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/' r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> ' '<span class="clearable-file-input">' '<input type="checkbox" name="test-clear" id="test-clear_id" disabled>' '<label for="test-clear_id">Clear</label></span><br>' 'Change: <input type="file" name="test" disabled></p>' % { "STORAGE_URL": default_storage.url(""), }, ) def test_readonly_fields(self): """ File widgets should render as a link when they're marked "read only." """ self.client.force_login(self.superuser) response = self.client.get( reverse("admin:admin_widgets_album_change", args=(self.album.id,)) ) self.assertContains( response, '<div class="readonly"><a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">' r"albums\hybrid_theory.jpg</a></div>" % {"STORAGE_URL": default_storage.url("")}, html=True, ) self.assertNotContains( response, '<input type="file" name="cover_art" id="id_cover_art">', html=True, ) response = self.client.get(reverse("admin:admin_widgets_album_add")) self.assertContains( response, '<div class="readonly"></div>', html=True, ) @override_settings(ROOT_URLCONF="admin_widgets.urls") class ForeignKeyRawIdWidgetTest(TestCase): def test_render(self): band = Band.objects.create(name="Linkin Park") band.album_set.create( name="Hybrid Theory", cover_art=r"albums\hybrid_theory.jpg" ) rel_uuid = Album._meta.get_field("band").remote_field w = widgets.ForeignKeyRawIdWidget(rel_uuid, widget_admin_site) self.assertHTMLEqual( w.render("test", band.uuid, attrs={}), '<input type="text" name="test" value="%(banduuid)s" ' 'class="vForeignKeyRawIdAdminField vUUIDField">' '<a href="/admin_widgets/band/?_to_field=uuid" class="related-lookup" ' 'id="lookup_id_test" title="Lookup"></a>&nbsp;<strong>' '<a href="/admin_widgets/band/%(bandpk)s/change/">Linkin Park</a>' "</strong>" % {"banduuid": band.uuid, "bandpk": band.pk}, ) rel_id = ReleaseEvent._meta.get_field("album").remote_field w = widgets.ForeignKeyRawIdWidget(rel_id, widget_admin_site) self.assertHTMLEqual( w.render("test", None, attrs={}), '<input type="text" name="test" class="vForeignKeyRawIdAdminField">' '<a href="/admin_widgets/album/?_to_field=id" class="related-lookup" ' 'id="lookup_id_test" title="Lookup"></a>', ) def test_relations_to_non_primary_key(self): # ForeignKeyRawIdWidget works with fields which aren't related to # the model's primary key. apple = Inventory.objects.create(barcode=86, name="Apple") Inventory.objects.create(barcode=22, name="Pear") core = Inventory.objects.create(barcode=87, name="Core", parent=apple) rel = Inventory._meta.get_field("parent").remote_field w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site) self.assertHTMLEqual( w.render("test", core.parent_id, attrs={}), '<input type="text" name="test" value="86" ' 'class="vForeignKeyRawIdAdminField">' '<a href="/admin_widgets/inventory/?_to_field=barcode" ' 'class="related-lookup" id="lookup_id_test" title="Lookup"></a>' '&nbsp;<strong><a href="/admin_widgets/inventory/%(pk)s/change/">' "Apple</a></strong>" % {"pk": apple.pk}, ) def test_fk_related_model_not_in_admin(self): # FK to a model not registered with admin site. Raw ID widget should # have no magnifying glass link. See #16542 big_honeycomb = Honeycomb.objects.create(location="Old tree") big_honeycomb.bee_set.create() rel = Bee._meta.get_field("honeycomb").remote_field w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site) self.assertHTMLEqual( w.render("honeycomb_widget", big_honeycomb.pk, attrs={}), '<input type="text" name="honeycomb_widget" value="%(hcombpk)s">' "&nbsp;<strong>%(hcomb)s</strong>" % {"hcombpk": big_honeycomb.pk, "hcomb": big_honeycomb}, ) def test_fk_to_self_model_not_in_admin(self): # FK to self, not registered with admin site. Raw ID widget should have # no magnifying glass link. See #16542 subject1 = Individual.objects.create(name="Subject #1") Individual.objects.create(name="Child", parent=subject1) rel = Individual._meta.get_field("parent").remote_field w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site) self.assertHTMLEqual( w.render("individual_widget", subject1.pk, attrs={}), '<input type="text" name="individual_widget" value="%(subj1pk)s">' "&nbsp;<strong>%(subj1)s</strong>" % {"subj1pk": subject1.pk, "subj1": subject1}, ) def test_proper_manager_for_label_lookup(self): # see #9258 rel = Inventory._meta.get_field("parent").remote_field w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site) hidden = Inventory.objects.create(barcode=93, name="Hidden", hidden=True) child_of_hidden = Inventory.objects.create( barcode=94, name="Child of hidden", parent=hidden ) self.assertHTMLEqual( w.render("test", child_of_hidden.parent_id, attrs={}), '<input type="text" name="test" value="93" ' ' class="vForeignKeyRawIdAdminField">' '<a href="/admin_widgets/inventory/?_to_field=barcode" ' 'class="related-lookup" id="lookup_id_test" title="Lookup"></a>' '&nbsp;<strong><a href="/admin_widgets/inventory/%(pk)s/change/">' "Hidden</a></strong>" % {"pk": hidden.pk}, ) def test_render_unsafe_limit_choices_to(self): rel = UnsafeLimitChoicesTo._meta.get_field("band").remote_field w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site) self.assertHTMLEqual( w.render("test", None), '<input type="text" name="test" class="vForeignKeyRawIdAdminField">\n' '<a href="/admin_widgets/band/?name=%22%26%3E%3Cescapeme&amp;' '_to_field=artist_ptr" class="related-lookup" id="lookup_id_test" ' 'title="Lookup"></a>', ) def test_render_fk_as_pk_model(self): rel = VideoStream._meta.get_field("release_event").remote_field w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site) self.assertHTMLEqual( w.render("test", None), '<input type="text" name="test" class="vForeignKeyRawIdAdminField">\n' '<a href="/admin_widgets/releaseevent/?_to_field=album" ' 'class="related-lookup" id="lookup_id_test" title="Lookup"></a>', ) @override_settings(ROOT_URLCONF="admin_widgets.urls") class ManyToManyRawIdWidgetTest(TestCase): def test_render(self): band = Band.objects.create(name="Linkin Park") m1 = Member.objects.create(name="Chester") m2 = Member.objects.create(name="Mike") band.members.add(m1, m2) rel = Band._meta.get_field("members").remote_field w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site) self.assertHTMLEqual( w.render("test", [m1.pk, m2.pk], attrs={}), ( '<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" ' ' class="vManyToManyRawIdAdminField">' '<a href="/admin_widgets/member/" class="related-lookup" ' ' id="lookup_id_test" title="Lookup"></a>' ) % {"m1pk": m1.pk, "m2pk": m2.pk}, ) self.assertHTMLEqual( w.render("test", [m1.pk]), ( '<input type="text" name="test" value="%(m1pk)s" ' ' class="vManyToManyRawIdAdminField">' '<a href="/admin_widgets/member/" class="related-lookup" ' ' id="lookup_id_test" title="Lookup"></a>' ) % {"m1pk": m1.pk}, ) def test_m2m_related_model_not_in_admin(self): # M2M relationship with model not registered with admin site. Raw ID # widget should have no magnifying glass link. See #16542 consultor1 = Advisor.objects.create(name="Rockstar Techie") c1 = Company.objects.create(name="Doodle") c2 = Company.objects.create(name="Pear") consultor1.companies.add(c1, c2) rel = Advisor._meta.get_field("companies").remote_field w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site) self.assertHTMLEqual( w.render("company_widget1", [c1.pk, c2.pk], attrs={}), '<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s">' % {"c1pk": c1.pk, "c2pk": c2.pk}, ) self.assertHTMLEqual( w.render("company_widget2", [c1.pk]), '<input type="text" name="company_widget2" value="%(c1pk)s">' % {"c1pk": c1.pk}, ) @override_settings(ROOT_URLCONF="admin_widgets.urls") class RelatedFieldWidgetWrapperTests(SimpleTestCase): def test_no_can_add_related(self): rel = Individual._meta.get_field("parent").remote_field w = widgets.AdminRadioSelect() # Used to fail with a name error. w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site) self.assertFalse(w.can_add_related) def test_select_multiple_widget_cant_change_delete_related(self): rel = Individual._meta.get_field("parent").remote_field widget = forms.SelectMultiple() wrapper = widgets.RelatedFieldWidgetWrapper( widget, rel, widget_admin_site, can_add_related=True, can_change_related=True, can_delete_related=True, ) self.assertTrue(wrapper.can_add_related) self.assertFalse(wrapper.can_change_related) self.assertFalse(wrapper.can_delete_related) def test_on_delete_cascade_rel_cant_delete_related(self): rel = Individual._meta.get_field("soulmate").remote_field widget = forms.Select() wrapper = widgets.RelatedFieldWidgetWrapper( widget, rel, widget_admin_site, can_add_related=True, can_change_related=True, can_delete_related=True, ) self.assertTrue(wrapper.can_add_related) self.assertTrue(wrapper.can_change_related) self.assertFalse(wrapper.can_delete_related) def test_custom_widget_render(self): class CustomWidget(forms.Select): def render(self, *args, **kwargs): return "custom render output" rel = Album._meta.get_field("band").remote_field widget = CustomWidget() wrapper = widgets.RelatedFieldWidgetWrapper( widget, rel, widget_admin_site, can_add_related=True, can_change_related=True, can_delete_related=True, ) output = wrapper.render("name", "value") self.assertIn("custom render output", output) def test_widget_delegates_value_omitted_from_data(self): class CustomWidget(forms.Select): def value_omitted_from_data(self, data, files, name): return False rel = Album._meta.get_field("band").remote_field widget = CustomWidget() wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site) self.assertIs(wrapper.value_omitted_from_data({}, {}, "band"), False) def test_widget_is_hidden(self): rel = Album._meta.get_field("band").remote_field widget = forms.HiddenInput() widget.choices = () wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site) self.assertIs(wrapper.is_hidden, True) context = wrapper.get_context("band", None, {}) self.assertIs(context["is_hidden"], True) output = wrapper.render("name", "value") # Related item links are hidden. self.assertNotIn("<a ", output) def test_widget_is_not_hidden(self): rel = Album._meta.get_field("band").remote_field widget = forms.Select() wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site) self.assertIs(wrapper.is_hidden, False) context = wrapper.get_context("band", None, {}) self.assertIs(context["is_hidden"], False) output = wrapper.render("name", "value") # Related item links are present. self.assertIn("<a ", output) @override_settings(ROOT_URLCONF="admin_widgets.urls") class AdminWidgetSeleniumTestCase(AdminSeleniumTestCase): available_apps = ["admin_widgets"] + AdminSeleniumTestCase.available_apps def setUp(self): self.u1 = User.objects.create_superuser( username="super", password="secret", email="[email protected]" ) class DateTimePickerSeleniumTests(AdminWidgetSeleniumTestCase): def test_show_hide_date_time_picker_widgets(self): """ Pressing the ESC key or clicking on a widget value closes the date and time picker widgets. """ from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys self.admin_login(username="super", password="secret", login_url="/") # Open a page that has a date and time picker widgets self.selenium.get( self.live_server_url + reverse("admin:admin_widgets_member_add") ) # First, with the date picker widget --------------------------------- cal_icon = self.selenium.find_element(By.ID, "calendarlink0") # The date picker is hidden self.assertFalse( self.selenium.find_element(By.ID, "calendarbox0").is_displayed() ) # Click the calendar icon cal_icon.click() # The date picker is visible self.assertTrue( self.selenium.find_element(By.ID, "calendarbox0").is_displayed() ) # Press the ESC key self.selenium.find_element(By.TAG_NAME, "body").send_keys([Keys.ESCAPE]) # The date picker is hidden again self.assertFalse( self.selenium.find_element(By.ID, "calendarbox0").is_displayed() ) # Click the calendar icon, then on the 15th of current month cal_icon.click() self.selenium.find_element(By.XPATH, "//a[contains(text(), '15')]").click() self.assertFalse( self.selenium.find_element(By.ID, "calendarbox0").is_displayed() ) self.assertEqual( self.selenium.find_element(By.ID, "id_birthdate_0").get_attribute("value"), datetime.today().strftime("%Y-%m-") + "15", ) # Then, with the time picker widget ---------------------------------- time_icon = self.selenium.find_element(By.ID, "clocklink0") # The time picker is hidden self.assertFalse(self.selenium.find_element(By.ID, "clockbox0").is_displayed()) # Click the time icon time_icon.click() # The time picker is visible self.assertTrue(self.selenium.find_element(By.ID, "clockbox0").is_displayed()) self.assertEqual( [ x.text for x in self.selenium.find_elements( By.XPATH, "//ul[@class='timelist']/li/a" ) ], ["Now", "Midnight", "6 a.m.", "Noon", "6 p.m."], ) # Press the ESC key self.selenium.find_element(By.TAG_NAME, "body").send_keys([Keys.ESCAPE]) # The time picker is hidden again self.assertFalse(self.selenium.find_element(By.ID, "clockbox0").is_displayed()) # Click the time icon, then select the 'Noon' value time_icon.click() self.selenium.find_element(By.XPATH, "//a[contains(text(), 'Noon')]").click() self.assertFalse(self.selenium.find_element(By.ID, "clockbox0").is_displayed()) self.assertEqual( self.selenium.find_element(By.ID, "id_birthdate_1").get_attribute("value"), "12:00:00", ) def test_calendar_nonday_class(self): """ Ensure cells that are not days of the month have the `nonday` CSS class. Refs #4574. """ from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret", login_url="/") # Open a page that has a date and time picker widgets self.selenium.get( self.live_server_url + reverse("admin:admin_widgets_member_add") ) # fill in the birth date. self.selenium.find_element(By.ID, "id_birthdate_0").send_keys("2013-06-01") # Click the calendar icon self.selenium.find_element(By.ID, "calendarlink0").click() # get all the tds within the calendar calendar0 = self.selenium.find_element(By.ID, "calendarin0") tds = calendar0.find_elements(By.TAG_NAME, "td") # make sure the first and last 6 cells have class nonday for td in tds[:6] + tds[-6:]: self.assertEqual(td.get_attribute("class"), "nonday") def test_calendar_selected_class(self): """ Ensure cell for the day in the input has the `selected` CSS class. Refs #4574. """ from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret", login_url="/") # Open a page that has a date and time picker widgets self.selenium.get( self.live_server_url + reverse("admin:admin_widgets_member_add") ) # fill in the birth date. self.selenium.find_element(By.ID, "id_birthdate_0").send_keys("2013-06-01") # Click the calendar icon self.selenium.find_element(By.ID, "calendarlink0").click() # get all the tds within the calendar calendar0 = self.selenium.find_element(By.ID, "calendarin0") tds = calendar0.find_elements(By.TAG_NAME, "td") # verify the selected cell selected = tds[6] self.assertEqual(selected.get_attribute("class"), "selected") self.assertEqual(selected.text, "1") def test_calendar_no_selected_class(self): """ Ensure no cells are given the selected class when the field is empty. Refs #4574. """ from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret", login_url="/") # Open a page that has a date and time picker widgets self.selenium.get( self.live_server_url + reverse("admin:admin_widgets_member_add") ) # Click the calendar icon self.selenium.find_element(By.ID, "calendarlink0").click() # get all the tds within the calendar calendar0 = self.selenium.find_element(By.ID, "calendarin0") tds = calendar0.find_elements(By.TAG_NAME, "td") # verify there are no cells with the selected class selected = [td for td in tds if td.get_attribute("class") == "selected"] self.assertEqual(len(selected), 0) def test_calendar_show_date_from_input(self): """ The calendar shows the date from the input field for every locale supported by Django. """ from selenium.webdriver.common.by import By self.selenium.set_window_size(1024, 768) self.admin_login(username="super", password="secret", login_url="/") # Enter test data member = Member.objects.create( name="Bob", birthdate=datetime(1984, 5, 15), gender="M" ) # Get month name translations for every locale month_string = "May" path = os.path.join( os.path.dirname(import_module("django.contrib.admin").__file__), "locale" ) for language_code, language_name in settings.LANGUAGES: try: catalog = gettext.translation("djangojs", path, [language_code]) except OSError: continue if month_string in catalog._catalog: month_name = catalog._catalog[month_string] else: month_name = month_string # Get the expected caption may_translation = month_name expected_caption = "{:s} {:d}".format(may_translation.upper(), 1984) # Test with every locale with override_settings(LANGUAGE_CODE=language_code): # Open a page that has a date picker widget url = reverse("admin:admin_widgets_member_change", args=(member.pk,)) self.selenium.get(self.live_server_url + url) # Click on the calendar icon self.selenium.find_element(By.ID, "calendarlink0").click() # Make sure that the right month and year are displayed self.wait_for_text("#calendarin0 caption", expected_caption) @override_settings(TIME_ZONE="Asia/Singapore") class DateTimePickerShortcutsSeleniumTests(AdminWidgetSeleniumTestCase): def test_date_time_picker_shortcuts(self): """ date/time/datetime picker shortcuts work in the current time zone. Refs #20663. This test case is fairly tricky, it relies on selenium still running the browser in the default time zone "America/Chicago" despite `override_settings` changing the time zone to "Asia/Singapore". """ from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret", login_url="/") error_margin = timedelta(seconds=10) # If we are neighbouring a DST, we add an hour of error margin. tz = zoneinfo.ZoneInfo("America/Chicago") utc_now = datetime.now(zoneinfo.ZoneInfo("UTC")) tz_yesterday = (utc_now - timedelta(days=1)).astimezone(tz).tzname() tz_tomorrow = (utc_now + timedelta(days=1)).astimezone(tz).tzname() if tz_yesterday != tz_tomorrow: error_margin += timedelta(hours=1) self.selenium.get( self.live_server_url + reverse("admin:admin_widgets_member_add") ) self.selenium.find_element(By.ID, "id_name").send_keys("test") # Click on the "today" and "now" shortcuts. shortcuts = self.selenium.find_elements( By.CSS_SELECTOR, ".field-birthdate .datetimeshortcuts" ) now = datetime.now() for shortcut in shortcuts: shortcut.find_element(By.TAG_NAME, "a").click() # There is a time zone mismatch warning. # Warning: This would effectively fail if the TIME_ZONE defined in the # settings has the same UTC offset as "Asia/Singapore" because the # mismatch warning would be rightfully missing from the page. self.assertCountSeleniumElements(".field-birthdate .timezonewarning", 1) # Submit the form. with self.wait_page_loaded(): self.selenium.find_element(By.NAME, "_save").click() # Make sure that "now" in JavaScript is within 10 seconds # from "now" on the server side. member = Member.objects.get(name="test") self.assertGreater(member.birthdate, now - error_margin) self.assertLess(member.birthdate, now + error_margin) # The above tests run with Asia/Singapore which are on the positive side of # UTC. Here we test with a timezone on the negative side. @override_settings(TIME_ZONE="US/Eastern") class DateTimePickerAltTimezoneSeleniumTests(DateTimePickerShortcutsSeleniumTests): pass class HorizontalVerticalFilterSeleniumTests(AdminWidgetSeleniumTestCase): def setUp(self): super().setUp() self.lisa = Student.objects.create(name="Lisa") self.john = Student.objects.create(name="John") self.bob = Student.objects.create(name="Bob") self.peter = Student.objects.create(name="Peter") self.jenny = Student.objects.create(name="Jenny") self.jason = Student.objects.create(name="Jason") self.cliff = Student.objects.create(name="Cliff") self.arthur = Student.objects.create(name="Arthur") self.school = School.objects.create(name="School of Awesome") def assertActiveButtons( self, mode, field_name, choose, remove, choose_all=None, remove_all=None ): choose_link = "#id_%s_add_link" % field_name choose_all_link = "#id_%s_add_all_link" % field_name remove_link = "#id_%s_remove_link" % field_name remove_all_link = "#id_%s_remove_all_link" % field_name self.assertEqual(self.has_css_class(choose_link, "active"), choose) self.assertEqual(self.has_css_class(remove_link, "active"), remove) if mode == "horizontal": self.assertEqual(self.has_css_class(choose_all_link, "active"), choose_all) self.assertEqual(self.has_css_class(remove_all_link, "active"), remove_all) def execute_basic_operations(self, mode, field_name): from selenium.webdriver.common.by import By original_url = self.selenium.current_url from_box = "#id_%s_from" % field_name to_box = "#id_%s_to" % field_name choose_link = "id_%s_add_link" % field_name choose_all_link = "id_%s_add_all_link" % field_name remove_link = "id_%s_remove_link" % field_name remove_all_link = "id_%s_remove_all_link" % field_name # Initial positions --------------------------------------------------- self.assertSelectOptions( from_box, [ str(self.arthur.id), str(self.bob.id), str(self.cliff.id), str(self.jason.id), str(self.jenny.id), str(self.john.id), ], ) self.assertSelectOptions(to_box, [str(self.lisa.id), str(self.peter.id)]) self.assertActiveButtons(mode, field_name, False, False, True, True) # Click 'Choose all' -------------------------------------------------- if mode == "horizontal": self.selenium.find_element(By.ID, choose_all_link).click() elif mode == "vertical": # There 's no 'Choose all' button in vertical mode, so individually # select all options and click 'Choose'. for option in self.selenium.find_elements( By.CSS_SELECTOR, from_box + " > option" ): option.click() self.selenium.find_element(By.ID, choose_link).click() self.assertSelectOptions(from_box, []) self.assertSelectOptions( to_box, [ str(self.lisa.id), str(self.peter.id), str(self.arthur.id), str(self.bob.id), str(self.cliff.id), str(self.jason.id), str(self.jenny.id), str(self.john.id), ], ) self.assertActiveButtons(mode, field_name, False, False, False, True) # Click 'Remove all' -------------------------------------------------- if mode == "horizontal": self.selenium.find_element(By.ID, remove_all_link).click() elif mode == "vertical": # There 's no 'Remove all' button in vertical mode, so individually # select all options and click 'Remove'. for option in self.selenium.find_elements( By.CSS_SELECTOR, to_box + " > option" ): option.click() self.selenium.find_element(By.ID, remove_link).click() self.assertSelectOptions( from_box, [ str(self.lisa.id), str(self.peter.id), str(self.arthur.id), str(self.bob.id), str(self.cliff.id), str(self.jason.id), str(self.jenny.id), str(self.john.id), ], ) self.assertSelectOptions(to_box, []) self.assertActiveButtons(mode, field_name, False, False, True, False) # Choose some options ------------------------------------------------ from_lisa_select_option = self.selenium.find_element( By.CSS_SELECTOR, '{} > option[value="{}"]'.format(from_box, self.lisa.id) ) # Check the title attribute is there for tool tips: ticket #20821 self.assertEqual( from_lisa_select_option.get_attribute("title"), from_lisa_select_option.get_attribute("text"), ) self.select_option(from_box, str(self.lisa.id)) self.select_option(from_box, str(self.jason.id)) self.select_option(from_box, str(self.bob.id)) self.select_option(from_box, str(self.john.id)) self.assertActiveButtons(mode, field_name, True, False, True, False) self.selenium.find_element(By.ID, choose_link).click() self.assertActiveButtons(mode, field_name, False, False, True, True) self.assertSelectOptions( from_box, [ str(self.peter.id), str(self.arthur.id), str(self.cliff.id), str(self.jenny.id), ], ) self.assertSelectOptions( to_box, [ str(self.lisa.id), str(self.bob.id), str(self.jason.id), str(self.john.id), ], ) # Check the tooltip is still there after moving: ticket #20821 to_lisa_select_option = self.selenium.find_element( By.CSS_SELECTOR, '{} > option[value="{}"]'.format(to_box, self.lisa.id) ) self.assertEqual( to_lisa_select_option.get_attribute("title"), to_lisa_select_option.get_attribute("text"), ) # Remove some options ------------------------------------------------- self.select_option(to_box, str(self.lisa.id)) self.select_option(to_box, str(self.bob.id)) self.assertActiveButtons(mode, field_name, False, True, True, True) self.selenium.find_element(By.ID, remove_link).click() self.assertActiveButtons(mode, field_name, False, False, True, True) self.assertSelectOptions( from_box, [ str(self.peter.id), str(self.arthur.id), str(self.cliff.id), str(self.jenny.id), str(self.lisa.id), str(self.bob.id), ], ) self.assertSelectOptions(to_box, [str(self.jason.id), str(self.john.id)]) # Choose some more options -------------------------------------------- self.select_option(from_box, str(self.arthur.id)) self.select_option(from_box, str(self.cliff.id)) self.selenium.find_element(By.ID, choose_link).click() self.assertSelectOptions( from_box, [ str(self.peter.id), str(self.jenny.id), str(self.lisa.id), str(self.bob.id), ], ) self.assertSelectOptions( to_box, [ str(self.jason.id), str(self.john.id), str(self.arthur.id), str(self.cliff.id), ], ) # Choose some more options -------------------------------------------- self.select_option(from_box, str(self.peter.id)) self.select_option(from_box, str(self.lisa.id)) # Confirm they're selected after clicking inactive buttons: ticket #26575 self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)]) self.selenium.find_element(By.ID, remove_link).click() self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)]) # Unselect the options ------------------------------------------------ self.deselect_option(from_box, str(self.peter.id)) self.deselect_option(from_box, str(self.lisa.id)) # Choose some more options -------------------------------------------- self.select_option(to_box, str(self.jason.id)) self.select_option(to_box, str(self.john.id)) # Confirm they're selected after clicking inactive buttons: ticket #26575 self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)]) self.selenium.find_element(By.ID, choose_link).click() self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)]) # Unselect the options ------------------------------------------------ self.deselect_option(to_box, str(self.jason.id)) self.deselect_option(to_box, str(self.john.id)) # Pressing buttons shouldn't change the URL. self.assertEqual(self.selenium.current_url, original_url) def test_basic(self): from selenium.webdriver.common.by import By self.selenium.set_window_size(1024, 768) self.school.students.set([self.lisa, self.peter]) self.school.alumni.set([self.lisa, self.peter]) self.admin_login(username="super", password="secret", login_url="/") self.selenium.get( self.live_server_url + reverse("admin:admin_widgets_school_change", args=(self.school.id,)) ) self.wait_page_ready() self.execute_basic_operations("vertical", "students") self.execute_basic_operations("horizontal", "alumni") # Save and check that everything is properly stored in the database --- self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click() self.wait_page_ready() self.school = School.objects.get(id=self.school.id) # Reload from database self.assertEqual( list(self.school.students.all()), [self.arthur, self.cliff, self.jason, self.john], ) self.assertEqual( list(self.school.alumni.all()), [self.arthur, self.cliff, self.jason, self.john], ) def test_filter(self): """ Typing in the search box filters out options displayed in the 'from' box. """ from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys self.selenium.set_window_size(1024, 768) self.school.students.set([self.lisa, self.peter]) self.school.alumni.set([self.lisa, self.peter]) self.admin_login(username="super", password="secret", login_url="/") self.selenium.get( self.live_server_url + reverse("admin:admin_widgets_school_change", args=(self.school.id,)) ) for field_name in ["students", "alumni"]: from_box = "#id_%s_from" % field_name to_box = "#id_%s_to" % field_name choose_link = "id_%s_add_link" % field_name remove_link = "id_%s_remove_link" % field_name input = self.selenium.find_element(By.ID, "id_%s_input" % field_name) # Initial values self.assertSelectOptions( from_box, [ str(self.arthur.id), str(self.bob.id), str(self.cliff.id), str(self.jason.id), str(self.jenny.id), str(self.john.id), ], ) # Typing in some characters filters out non-matching options input.send_keys("a") self.assertSelectOptions( from_box, [str(self.arthur.id), str(self.jason.id)] ) input.send_keys("R") self.assertSelectOptions(from_box, [str(self.arthur.id)]) # Clearing the text box makes the other options reappear input.send_keys([Keys.BACK_SPACE]) self.assertSelectOptions( from_box, [str(self.arthur.id), str(self.jason.id)] ) input.send_keys([Keys.BACK_SPACE]) self.assertSelectOptions( from_box, [ str(self.arthur.id), str(self.bob.id), str(self.cliff.id), str(self.jason.id), str(self.jenny.id), str(self.john.id), ], ) # ----------------------------------------------------------------- # Choosing a filtered option sends it properly to the 'to' box. input.send_keys("a") self.assertSelectOptions( from_box, [str(self.arthur.id), str(self.jason.id)] ) self.select_option(from_box, str(self.jason.id)) self.selenium.find_element(By.ID, choose_link).click() self.assertSelectOptions(from_box, [str(self.arthur.id)]) self.assertSelectOptions( to_box, [ str(self.lisa.id), str(self.peter.id), str(self.jason.id), ], ) self.select_option(to_box, str(self.lisa.id)) self.selenium.find_element(By.ID, remove_link).click() self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.lisa.id)]) self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)]) input.send_keys([Keys.BACK_SPACE]) # Clear text box self.assertSelectOptions( from_box, [ str(self.arthur.id), str(self.bob.id), str(self.cliff.id), str(self.jenny.id), str(self.john.id), str(self.lisa.id), ], ) self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)]) # ----------------------------------------------------------------- # Pressing enter on a filtered option sends it properly to # the 'to' box. self.select_option(to_box, str(self.jason.id)) self.selenium.find_element(By.ID, remove_link).click() input.send_keys("ja") self.assertSelectOptions(from_box, [str(self.jason.id)]) input.send_keys([Keys.ENTER]) self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)]) input.send_keys([Keys.BACK_SPACE, Keys.BACK_SPACE]) # Save and check that everything is properly stored in the database --- with self.wait_page_loaded(): self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click() self.school = School.objects.get(id=self.school.id) # Reload from database self.assertEqual(list(self.school.students.all()), [self.jason, self.peter]) self.assertEqual(list(self.school.alumni.all()), [self.jason, self.peter]) def test_back_button_bug(self): """ Some browsers had a bug where navigating away from the change page and then clicking the browser's back button would clear the filter_horizontal/filter_vertical widgets (#13614). """ from selenium.webdriver.common.by import By self.school.students.set([self.lisa, self.peter]) self.school.alumni.set([self.lisa, self.peter]) self.admin_login(username="super", password="secret", login_url="/") change_url = reverse( "admin:admin_widgets_school_change", args=(self.school.id,) ) self.selenium.get(self.live_server_url + change_url) # Navigate away and go back to the change form page. self.selenium.find_element(By.LINK_TEXT, "Home").click() self.selenium.back() expected_unselected_values = [ str(self.arthur.id), str(self.bob.id), str(self.cliff.id), str(self.jason.id), str(self.jenny.id), str(self.john.id), ] expected_selected_values = [str(self.lisa.id), str(self.peter.id)] # Everything is still in place self.assertSelectOptions("#id_students_from", expected_unselected_values) self.assertSelectOptions("#id_students_to", expected_selected_values) self.assertSelectOptions("#id_alumni_from", expected_unselected_values) self.assertSelectOptions("#id_alumni_to", expected_selected_values) def test_refresh_page(self): """ Horizontal and vertical filter widgets keep selected options on page reload (#22955). """ self.school.students.add(self.arthur, self.jason) self.school.alumni.add(self.arthur, self.jason) self.admin_login(username="super", password="secret", login_url="/") change_url = reverse( "admin:admin_widgets_school_change", args=(self.school.id,) ) self.selenium.get(self.live_server_url + change_url) self.assertCountSeleniumElements("#id_students_to > option", 2) # self.selenium.refresh() or send_keys(Keys.F5) does hard reload and # doesn't replicate what happens when a user clicks the browser's # 'Refresh' button. with self.wait_page_loaded(): self.selenium.execute_script("location.reload()") self.assertCountSeleniumElements("#id_students_to > option", 2) class AdminRawIdWidgetSeleniumTests(AdminWidgetSeleniumTestCase): def setUp(self): super().setUp() Band.objects.create(id=42, name="Bogey Blues") Band.objects.create(id=98, name="Green Potatoes") def test_ForeignKey(self): from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret", login_url="/") self.selenium.get( self.live_server_url + reverse("admin:admin_widgets_event_add") ) main_window = self.selenium.current_window_handle # No value has been selected yet self.assertEqual( self.selenium.find_element(By.ID, "id_main_band").get_attribute("value"), "" ) # Open the popup window and click on a band self.selenium.find_element(By.ID, "lookup_id_main_band").click() self.wait_for_and_switch_to_popup() link = self.selenium.find_element(By.LINK_TEXT, "Bogey Blues") self.assertIn("/band/42/", link.get_attribute("href")) link.click() # The field now contains the selected band's id self.selenium.switch_to.window(main_window) self.wait_for_value("#id_main_band", "42") # Reopen the popup window and click on another band self.selenium.find_element(By.ID, "lookup_id_main_band").click() self.wait_for_and_switch_to_popup() link = self.selenium.find_element(By.LINK_TEXT, "Green Potatoes") self.assertIn("/band/98/", link.get_attribute("href")) link.click() # The field now contains the other selected band's id self.selenium.switch_to.window(main_window) self.wait_for_value("#id_main_band", "98") def test_many_to_many(self): from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret", login_url="/") self.selenium.get( self.live_server_url + reverse("admin:admin_widgets_event_add") ) main_window = self.selenium.current_window_handle # No value has been selected yet self.assertEqual( self.selenium.find_element(By.ID, "id_supporting_bands").get_attribute( "value" ), "", ) # Help text for the field is displayed self.assertEqual( self.selenium.find_element( By.CSS_SELECTOR, ".field-supporting_bands div.help" ).text, "Supporting Bands.", ) # Open the popup window and click on a band self.selenium.find_element(By.ID, "lookup_id_supporting_bands").click() self.wait_for_and_switch_to_popup() link = self.selenium.find_element(By.LINK_TEXT, "Bogey Blues") self.assertIn("/band/42/", link.get_attribute("href")) link.click() # The field now contains the selected band's id self.selenium.switch_to.window(main_window) self.wait_for_value("#id_supporting_bands", "42") # Reopen the popup window and click on another band self.selenium.find_element(By.ID, "lookup_id_supporting_bands").click() self.wait_for_and_switch_to_popup() link = self.selenium.find_element(By.LINK_TEXT, "Green Potatoes") self.assertIn("/band/98/", link.get_attribute("href")) link.click() # The field now contains the two selected bands' ids self.selenium.switch_to.window(main_window) self.wait_for_value("#id_supporting_bands", "42,98") class RelatedFieldWidgetSeleniumTests(AdminWidgetSeleniumTestCase): def test_ForeignKey_using_to_field(self): from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import Select self.admin_login(username="super", password="secret", login_url="/") self.selenium.get( self.live_server_url + reverse("admin:admin_widgets_profile_add") ) main_window = self.selenium.current_window_handle # Click the Add User button to add new self.selenium.find_element(By.ID, "add_id_user").click() self.wait_for_and_switch_to_popup() password_field = self.selenium.find_element(By.ID, "id_password") password_field.send_keys("password") username_field = self.selenium.find_element(By.ID, "id_username") username_value = "newuser" username_field.send_keys(username_value) save_button_css_selector = ".submit-row > input[type=submit]" self.selenium.find_element(By.CSS_SELECTOR, save_button_css_selector).click() self.selenium.switch_to.window(main_window) # The field now contains the new user self.selenium.find_element(By.CSS_SELECTOR, "#id_user option[value=newuser]") self.selenium.find_element(By.ID, "view_id_user").click() self.wait_for_value("#id_username", "newuser") self.selenium.back() # Chrome and Safari don't update related object links when selecting # the same option as previously submitted. As a consequence, the # "pencil" and "eye" buttons remain disable, so select "---------" # first. select = Select(self.selenium.find_element(By.ID, "id_user")) select.select_by_index(0) select.select_by_value("newuser") # Click the Change User button to change it self.selenium.find_element(By.ID, "change_id_user").click() self.wait_for_and_switch_to_popup() username_field = self.selenium.find_element(By.ID, "id_username") username_value = "changednewuser" username_field.clear() username_field.send_keys(username_value) save_button_css_selector = ".submit-row > input[type=submit]" self.selenium.find_element(By.CSS_SELECTOR, save_button_css_selector).click() self.selenium.switch_to.window(main_window) self.selenium.find_element( By.CSS_SELECTOR, "#id_user option[value=changednewuser]" ) self.selenium.find_element(By.ID, "view_id_user").click() self.wait_for_value("#id_username", "changednewuser") self.selenium.back() select = Select(self.selenium.find_element(By.ID, "id_user")) select.select_by_value("changednewuser") # Go ahead and submit the form to make sure it works self.selenium.find_element(By.CSS_SELECTOR, save_button_css_selector).click() self.wait_for_text( "li.success", "The profile “changednewuser” was added successfully." ) profiles = Profile.objects.all() self.assertEqual(len(profiles), 1) self.assertEqual(profiles[0].user.username, username_value) @skipUnless(Image, "Pillow not installed") class ImageFieldWidgetsSeleniumTests(AdminWidgetSeleniumTestCase): def test_clearablefileinput_widget(self): from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret", login_url="/") self.selenium.get( self.live_server_url + reverse("admin:admin_widgets_student_add"), ) photo_input_id = "id_photo" save_and_edit_button_css_selector = "input[value='Save and continue editing']" tests_files_folder = "%s/files" % os.getcwd() clear_checkbox_id = "photo-clear_id" def _submit_and_wait(): with self.wait_page_loaded(): self.selenium.find_element( By.CSS_SELECTOR, save_and_edit_button_css_selector ).click() # Add a student. title_input = self.selenium.find_element(By.ID, "id_name") title_input.send_keys("Joe Doe") photo_input = self.selenium.find_element(By.ID, photo_input_id) photo_input.send_keys(f"{tests_files_folder}/test.png") _submit_and_wait() student = Student.objects.last() self.assertEqual(student.name, "Joe Doe") self.assertEqual(student.photo.name, "photos/test.png") # Uploading non-image files is not supported by Safari with Selenium, # so upload a broken one instead. photo_input = self.selenium.find_element(By.ID, photo_input_id) photo_input.send_keys(f"{tests_files_folder}/brokenimg.png") _submit_and_wait() self.assertEqual( self.selenium.find_element(By.CSS_SELECTOR, ".errorlist li").text, ( "Upload a valid image. The file you uploaded was either not an image " "or a corrupted image." ), ) # "Currently" with "Clear" checkbox and "Change" still shown. cover_field_row = self.selenium.find_element(By.CSS_SELECTOR, ".field-photo") self.assertIn("Currently", cover_field_row.text) self.assertIn("Change", cover_field_row.text) # "Clear" box works. self.selenium.find_element(By.ID, clear_checkbox_id).click() _submit_and_wait() student.refresh_from_db() self.assertEqual(student.name, "Joe Doe") self.assertEqual(student.photo.name, "")
43e4271752b3abe0eaa15ea50012276def014caab8c20b12be4924b5781eb45d
import tempfile import uuid from django.contrib.auth.models import User from django.core.files.storage import FileSystemStorage from django.db import models try: from PIL import Image except ImportError: Image = None else: temp_storage_dir = tempfile.mkdtemp() temp_storage = FileSystemStorage(temp_storage_dir) class MyFileField(models.FileField): pass class Member(models.Model): name = models.CharField(max_length=100) birthdate = models.DateTimeField(blank=True, null=True) gender = models.CharField( max_length=1, blank=True, choices=[("M", "Male"), ("F", "Female")] ) email = models.EmailField(blank=True) def __str__(self): return self.name class Artist(models.Model): pass class Band(Artist): uuid = models.UUIDField(unique=True, default=uuid.uuid4) name = models.CharField(max_length=100) style = models.CharField(max_length=20) members = models.ManyToManyField(Member) def __str__(self): return self.name class UnsafeLimitChoicesTo(models.Model): band = models.ForeignKey( Band, models.CASCADE, limit_choices_to={"name": '"&><escapeme'}, ) class Album(models.Model): band = models.ForeignKey(Band, models.CASCADE, to_field="uuid") featuring = models.ManyToManyField(Band, related_name="featured") name = models.CharField(max_length=100) cover_art = models.FileField(upload_to="albums") backside_art = MyFileField(upload_to="albums_back", null=True) def __str__(self): return self.name class ReleaseEvent(models.Model): """ Used to check that autocomplete widget correctly resolves attname for FK as PK example. """ album = models.ForeignKey(Album, models.CASCADE, primary_key=True) name = models.CharField(max_length=100) class Meta: ordering = ["name"] def __str__(self): return self.name class VideoStream(models.Model): release_event = models.ForeignKey(ReleaseEvent, models.CASCADE) class HiddenInventoryManager(models.Manager): def get_queryset(self): return super().get_queryset().filter(hidden=False) class Inventory(models.Model): barcode = models.PositiveIntegerField(unique=True) parent = models.ForeignKey( "self", models.SET_NULL, to_field="barcode", blank=True, null=True ) name = models.CharField(blank=False, max_length=20) hidden = models.BooleanField(default=False) # see #9258 default_manager = models.Manager() objects = HiddenInventoryManager() def __str__(self): return self.name class Event(models.Model): main_band = models.ForeignKey( Band, models.CASCADE, limit_choices_to=models.Q(pk__gt=0), related_name="events_main_band_at", ) supporting_bands = models.ManyToManyField( Band, blank=True, related_name="events_supporting_band_at", help_text="Supporting Bands.", ) start_date = models.DateField(blank=True, null=True) start_time = models.TimeField(blank=True, null=True) description = models.TextField(blank=True) link = models.URLField(blank=True) min_age = models.IntegerField(blank=True, null=True) class Car(models.Model): owner = models.ForeignKey(User, models.CASCADE) make = models.CharField(max_length=30) model = models.CharField(max_length=30) def __str__(self): return "%s %s" % (self.make, self.model) class CarTire(models.Model): """ A single car tire. This to test that a user can only select their own cars. """ car = models.ForeignKey(Car, models.CASCADE) class Honeycomb(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) location = models.CharField(max_length=20) class Bee(models.Model): """ A model with a FK to a model that won't be registered with the admin (Honeycomb) so the corresponding raw ID widget won't have a magnifying glass link to select related honeycomb instances. """ honeycomb = models.ForeignKey(Honeycomb, models.CASCADE) class Individual(models.Model): """ A model with a FK to itself. It won't be registered with the admin, so the corresponding raw ID widget won't have a magnifying glass link to select related instances (rendering will be called programmatically in this case). """ name = models.CharField(max_length=20) parent = models.ForeignKey("self", models.SET_NULL, null=True) soulmate = models.ForeignKey( "self", models.CASCADE, null=True, related_name="soulmates" ) class Company(models.Model): name = models.CharField(max_length=20) class Advisor(models.Model): """ A model with a m2m to a model that won't be registered with the admin (Company) so the corresponding raw ID widget won't have a magnifying glass link to select related company instances. """ name = models.CharField(max_length=20) companies = models.ManyToManyField(Company) class Student(models.Model): name = models.CharField(max_length=255) if Image: photo = models.ImageField( storage=temp_storage, upload_to="photos", blank=True, null=True ) class Meta: ordering = ("name",) def __str__(self): return self.name class School(models.Model): name = models.CharField(max_length=255) students = models.ManyToManyField(Student, related_name="current_schools") alumni = models.ManyToManyField(Student, related_name="previous_schools") def __str__(self): return self.name class Profile(models.Model): user = models.ForeignKey("auth.User", models.CASCADE, to_field="username") def __str__(self): return self.user.username
4fb59d220a82c7ca053b19317be8890b6ee7be6c891429b901ada7125199cc75
from django.contrib import admin from .models import ( Advisor, Album, Band, Bee, Car, CarTire, Event, Inventory, Member, Profile, ReleaseEvent, School, Student, User, VideoStream, ) class WidgetAdmin(admin.AdminSite): pass class CarAdmin(admin.ModelAdmin): list_display = ["make", "model", "owner"] list_editable = ["owner"] class CarTireAdmin(admin.ModelAdmin): def formfield_for_foreignkey(self, db_field, request, **kwargs): if db_field.name == "car": kwargs["queryset"] = Car.objects.filter(owner=request.user) return db_field.formfield(**kwargs) return super().formfield_for_foreignkey(db_field, request, **kwargs) class EventAdmin(admin.ModelAdmin): raw_id_fields = ["main_band", "supporting_bands"] class AlbumAdmin(admin.ModelAdmin): fields = ( "name", "cover_art", ) readonly_fields = ("cover_art",) class SchoolAdmin(admin.ModelAdmin): filter_vertical = ("students",) filter_horizontal = ("alumni",) site = WidgetAdmin(name="widget-admin") site.register(User) site.register(Car, CarAdmin) site.register(CarTire, CarTireAdmin) site.register(Member) site.register(Band) site.register(Event, EventAdmin) site.register(Album, AlbumAdmin) site.register(ReleaseEvent, search_fields=["name"]) site.register(VideoStream, autocomplete_fields=["release_event"]) site.register(Inventory) site.register(Bee) site.register(Advisor) site.register(School, SchoolAdmin) site.register(Student) site.register(Profile)
10a9e4aea5bf43850356458dd7fe394b682bda2f4abb3fb6b9cb49b6c57cbb41
import importlib import inspect import os import re import sys import tempfile import threading from io import StringIO from pathlib import Path from unittest import mock, skipIf from django.core import mail from django.core.files.uploadedfile import SimpleUploadedFile from django.db import DatabaseError, connection from django.http import Http404, HttpRequest, HttpResponse from django.shortcuts import render from django.template import TemplateDoesNotExist from django.test import RequestFactory, SimpleTestCase, override_settings from django.test.utils import LoggingCaptureMixin from django.urls import path, reverse from django.urls.converters import IntConverter from django.utils.functional import SimpleLazyObject from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import mark_safe from django.views.debug import ( CallableSettingWrapper, ExceptionCycleWarning, ExceptionReporter, ) from django.views.debug import Path as DebugPath from django.views.debug import ( SafeExceptionReporterFilter, default_urlconf, get_default_exception_reporter_filter, technical_404_response, technical_500_response, ) from django.views.decorators.debug import sensitive_post_parameters, sensitive_variables from ..views import ( custom_exception_reporter_filter_view, index_page, multivalue_dict_key_error, non_sensitive_view, paranoid_view, sensitive_args_function_caller, sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view, ) class User: def __str__(self): return "jacob" class WithoutEmptyPathUrls: urlpatterns = [path("url/", index_page, name="url")] class CallableSettingWrapperTests(SimpleTestCase): """Unittests for CallableSettingWrapper""" def test_repr(self): class WrappedCallable: def __repr__(self): return "repr from the wrapped callable" def __call__(self): pass actual = repr(CallableSettingWrapper(WrappedCallable())) self.assertEqual(actual, "repr from the wrapped callable") @override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls") class DebugViewTests(SimpleTestCase): def test_files(self): with self.assertLogs("django.request", "ERROR"): response = self.client.get("/raises/") self.assertEqual(response.status_code, 500) data = { "file_data.txt": SimpleUploadedFile("file_data.txt", b"haha"), } with self.assertLogs("django.request", "ERROR"): response = self.client.post("/raises/", data) self.assertContains(response, "file_data.txt", status_code=500) self.assertNotContains(response, "haha", status_code=500) def test_400(self): # When DEBUG=True, technical_500_template() is called. with self.assertLogs("django.security", "WARNING"): response = self.client.get("/raises400/") self.assertContains(response, '<div class="context" id="', status_code=400) def test_400_bad_request(self): # When DEBUG=True, technical_500_template() is called. with self.assertLogs("django.request", "WARNING") as cm: response = self.client.get("/raises400_bad_request/") self.assertContains(response, '<div class="context" id="', status_code=400) self.assertEqual( cm.records[0].getMessage(), "Malformed request syntax: /raises400_bad_request/", ) # Ensure no 403.html template exists to test the default case. @override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", } ] ) def test_403(self): response = self.client.get("/raises403/") self.assertContains(response, "<h1>403 Forbidden</h1>", status_code=403) # Set up a test 403.html template. @override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", "OPTIONS": { "loaders": [ ( "django.template.loaders.locmem.Loader", { "403.html": ( "This is a test template for a 403 error " "({{ exception }})." ), }, ), ], }, } ] ) def test_403_template(self): response = self.client.get("/raises403/") self.assertContains(response, "test template", status_code=403) self.assertContains(response, "(Insufficient Permissions).", status_code=403) def test_404(self): response = self.client.get("/raises404/") self.assertNotContains( response, '<pre class="exception_value">', status_code=404, ) self.assertContains( response, "<p>The current path, <code>not-in-urls</code>, didn’t match any " "of these.</p>", status_code=404, html=True, ) def test_404_not_in_urls(self): response = self.client.get("/not-in-urls") self.assertNotContains(response, "Raised by:", status_code=404) self.assertNotContains( response, '<pre class="exception_value">', status_code=404, ) self.assertContains( response, "Django tried these URL patterns", status_code=404 ) self.assertContains( response, "<p>The current path, <code>not-in-urls</code>, didn’t match any " "of these.</p>", status_code=404, html=True, ) # Pattern and view name of a RegexURLPattern appear. self.assertContains( response, r"^regex-post/(?P&lt;pk&gt;[0-9]+)/$", status_code=404 ) self.assertContains(response, "[name='regex-post']", status_code=404) # Pattern and view name of a RoutePattern appear. self.assertContains(response, r"path-post/&lt;int:pk&gt;/", status_code=404) self.assertContains(response, "[name='path-post']", status_code=404) @override_settings(ROOT_URLCONF=WithoutEmptyPathUrls) def test_404_empty_path_not_in_urls(self): response = self.client.get("/") self.assertContains( response, "<p>The empty path didn’t match any of these.</p>", status_code=404, html=True, ) def test_technical_404(self): response = self.client.get("/technical404/") self.assertContains( response, '<pre class="exception_value">Testing technical 404.</pre>', status_code=404, html=True, ) self.assertContains(response, "Raised by:", status_code=404) self.assertContains( response, "<td>view_tests.views.technical404</td>", status_code=404, ) self.assertContains( response, "<p>The current path, <code>technical404/</code>, matched the " "last one.</p>", status_code=404, html=True, ) def test_classbased_technical_404(self): response = self.client.get("/classbased404/") self.assertContains( response, "<th>Raised by:</th><td>view_tests.views.Http404View</td>", status_code=404, html=True, ) def test_technical_500(self): with self.assertLogs("django.request", "ERROR"): response = self.client.get("/raises500/") self.assertContains( response, "<th>Raised during:</th><td>view_tests.views.raises500</td>", status_code=500, html=True, ) with self.assertLogs("django.request", "ERROR"): response = self.client.get("/raises500/", HTTP_ACCEPT="text/plain") self.assertContains( response, "Raised during: view_tests.views.raises500", status_code=500, ) def test_classbased_technical_500(self): with self.assertLogs("django.request", "ERROR"): response = self.client.get("/classbased500/") self.assertContains( response, "<th>Raised during:</th><td>view_tests.views.Raises500View</td>", status_code=500, html=True, ) with self.assertLogs("django.request", "ERROR"): response = self.client.get("/classbased500/", HTTP_ACCEPT="text/plain") self.assertContains( response, "Raised during: view_tests.views.Raises500View", status_code=500, ) def test_non_l10ned_numeric_ids(self): """ Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized. """ with self.settings(DEBUG=True): with self.assertLogs("django.request", "ERROR"): response = self.client.get("/raises500/") # We look for a HTML fragment of the form # '<div class="context" id="c38123208">', # not '<div class="context" id="c38,123,208"'. self.assertContains(response, '<div class="context" id="', status_code=500) match = re.search( b'<div class="context" id="(?P<id>[^"]+)">', response.content ) self.assertIsNotNone(match) id_repr = match["id"] self.assertFalse( re.search(b"[^c0-9]", id_repr), "Numeric IDs in debug response HTML page shouldn't be localized " "(value: %s)." % id_repr.decode(), ) def test_template_exceptions(self): with self.assertLogs("django.request", "ERROR"): try: self.client.get(reverse("template_exception")) except Exception: raising_loc = inspect.trace()[-1][-2][0].strip() self.assertNotEqual( raising_loc.find('raise Exception("boom")'), -1, "Failed to find 'raise Exception' in last frame of " "traceback, instead found: %s" % raising_loc, ) @skipIf( sys.platform == "win32", "Raises OSError instead of TemplateDoesNotExist on Windows.", ) def test_safestring_in_exception(self): with self.assertLogs("django.request", "ERROR"): response = self.client.get("/safestring_exception/") self.assertNotContains( response, "<script>alert(1);</script>", status_code=500, html=True, ) self.assertContains( response, "&lt;script&gt;alert(1);&lt;/script&gt;", count=3, status_code=500, html=True, ) def test_template_loader_postmortem(self): """Tests for not existing file""" template_name = "notfound.html" with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile: tempdir = os.path.dirname(tmpfile.name) template_path = os.path.join(tempdir, template_name) with override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [tempdir], } ] ), self.assertLogs("django.request", "ERROR"): response = self.client.get( reverse( "raises_template_does_not_exist", kwargs={"path": template_name} ) ) self.assertContains( response, "%s (Source does not exist)" % template_path, status_code=500, count=2, ) # Assert as HTML. self.assertContains( response, "<li><code>django.template.loaders.filesystem.Loader</code>: " "%s (Source does not exist)</li>" % os.path.join(tempdir, "notfound.html"), status_code=500, html=True, ) def test_no_template_source_loaders(self): """ Make sure if you don't specify a template, the debug view doesn't blow up. """ with self.assertLogs("django.request", "ERROR"): with self.assertRaises(TemplateDoesNotExist): self.client.get("/render_no_template/") @override_settings(ROOT_URLCONF="view_tests.default_urls") def test_default_urlconf_template(self): """ Make sure that the default URLconf template is shown instead of the technical 404 page, if the user has not altered their URLconf yet. """ response = self.client.get("/") self.assertContains( response, "<h1>The install worked successfully! Congratulations!</h1>" ) @override_settings(ROOT_URLCONF="view_tests.regression_21530_urls") def test_regression_21530(self): """ Regression test for bug #21530. If the admin app include is replaced with exactly one url pattern, then the technical 404 template should be displayed. The bug here was that an AttributeError caused a 500 response. """ response = self.client.get("/") self.assertContains( response, "Page not found <span>(404)</span>", status_code=404 ) def test_template_encoding(self): """ The templates are loaded directly, not via a template loader, and should be opened as utf-8 charset as is the default specified on template engines. """ with mock.patch.object(DebugPath, "open") as m: default_urlconf(None) m.assert_called_once_with(encoding="utf-8") m.reset_mock() technical_404_response(mock.MagicMock(), mock.Mock()) m.assert_called_once_with(encoding="utf-8") def test_technical_404_converter_raise_404(self): with mock.patch.object(IntConverter, "to_python", side_effect=Http404): response = self.client.get("/path-post/1/") self.assertContains(response, "Page not found", status_code=404) def test_exception_reporter_from_request(self): with self.assertLogs("django.request", "ERROR"): response = self.client.get("/custom_reporter_class_view/") self.assertContains(response, "custom traceback text", status_code=500) @override_settings( DEFAULT_EXCEPTION_REPORTER="view_tests.views.CustomExceptionReporter" ) def test_exception_reporter_from_settings(self): with self.assertLogs("django.request", "ERROR"): response = self.client.get("/raises500/") self.assertContains(response, "custom traceback text", status_code=500) @override_settings( DEFAULT_EXCEPTION_REPORTER="view_tests.views.TemplateOverrideExceptionReporter" ) def test_template_override_exception_reporter(self): with self.assertLogs("django.request", "ERROR"): response = self.client.get("/raises500/") self.assertContains( response, "<h1>Oh no, an error occurred!</h1>", status_code=500, html=True, ) with self.assertLogs("django.request", "ERROR"): response = self.client.get("/raises500/", HTTP_ACCEPT="text/plain") self.assertContains(response, "Oh dear, an error occurred!", status_code=500) class DebugViewQueriesAllowedTests(SimpleTestCase): # May need a query to initialize MySQL connection databases = {"default"} def test_handle_db_exception(self): """ Ensure the debug view works when a database exception is raised by performing an invalid query and passing the exception to the debug view. """ with connection.cursor() as cursor: try: cursor.execute("INVALID SQL") except DatabaseError: exc_info = sys.exc_info() rf = RequestFactory() response = technical_500_response(rf.get("/"), *exc_info) self.assertContains(response, "OperationalError at /", status_code=500) @override_settings( DEBUG=True, ROOT_URLCONF="view_tests.urls", # No template directories are configured, so no templates will be found. TEMPLATES=[ { "BACKEND": "django.template.backends.dummy.TemplateStrings", } ], ) class NonDjangoTemplatesDebugViewTests(SimpleTestCase): def test_400(self): # When DEBUG=True, technical_500_template() is called. with self.assertLogs("django.security", "WARNING"): response = self.client.get("/raises400/") self.assertContains(response, '<div class="context" id="', status_code=400) def test_400_bad_request(self): # When DEBUG=True, technical_500_template() is called. with self.assertLogs("django.request", "WARNING") as cm: response = self.client.get("/raises400_bad_request/") self.assertContains(response, '<div class="context" id="', status_code=400) self.assertEqual( cm.records[0].getMessage(), "Malformed request syntax: /raises400_bad_request/", ) def test_403(self): response = self.client.get("/raises403/") self.assertContains(response, "<h1>403 Forbidden</h1>", status_code=403) def test_404(self): response = self.client.get("/raises404/") self.assertEqual(response.status_code, 404) def test_template_not_found_error(self): # Raises a TemplateDoesNotExist exception and shows the debug view. url = reverse( "raises_template_does_not_exist", kwargs={"path": "notfound.html"} ) with self.assertLogs("django.request", "ERROR"): response = self.client.get(url) self.assertContains(response, '<div class="context" id="', status_code=500) class ExceptionReporterTests(SimpleTestCase): rf = RequestFactory() def test_request_and_exception(self): "A simple exception report can be generated" try: request = self.rf.get("/test_view/") request.user = User() raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML("<h1>ValueError at /test_view/</h1>", html) self.assertIn( '<pre class="exception_value">Can&#x27;t find my keys</pre>', html ) self.assertIn("<th>Request Method:</th>", html) self.assertIn("<th>Request URL:</th>", html) self.assertIn('<h3 id="user-info">USER</h3>', html) self.assertIn("<p>jacob</p>", html) self.assertIn("<th>Exception Type:</th>", html) self.assertIn("<th>Exception Value:</th>", html) self.assertIn("<h2>Traceback ", html) self.assertIn("<h2>Request information</h2>", html) self.assertNotIn("<p>Request data not supplied</p>", html) self.assertIn("<p>No POST data</p>", html) def test_no_request(self): "An exception report can be generated without request" try: raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML("<h1>ValueError</h1>", html) self.assertIn( '<pre class="exception_value">Can&#x27;t find my keys</pre>', html ) self.assertNotIn("<th>Request Method:</th>", html) self.assertNotIn("<th>Request URL:</th>", html) self.assertNotIn('<h3 id="user-info">USER</h3>', html) self.assertIn("<th>Exception Type:</th>", html) self.assertIn("<th>Exception Value:</th>", html) self.assertIn("<h2>Traceback ", html) self.assertIn("<h2>Request information</h2>", html) self.assertIn("<p>Request data not supplied</p>", html) def test_sharing_traceback(self): try: raise ValueError("Oops") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn( '<form action="https://dpaste.com/" name="pasteform" ' 'id="pasteform" method="post">', html, ) def test_eol_support(self): """The ExceptionReporter supports Unix, Windows and Macintosh EOL markers""" LINES = ["print %d" % i for i in range(1, 6)] reporter = ExceptionReporter(None, None, None, None) for newline in ["\n", "\r\n", "\r"]: fd, filename = tempfile.mkstemp(text=False) os.write(fd, (newline.join(LINES) + newline).encode()) os.close(fd) try: self.assertEqual( reporter._get_lines_from_file(filename, 3, 2), (1, LINES[1:3], LINES[3], LINES[4:]), ) finally: os.unlink(filename) def test_no_exception(self): "An exception report can be generated for just a request" request = self.rf.get("/test_view/") reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML("<h1>Report at /test_view/</h1>", html) self.assertIn( '<pre class="exception_value">No exception message supplied</pre>', html ) self.assertIn("<th>Request Method:</th>", html) self.assertIn("<th>Request URL:</th>", html) self.assertNotIn("<th>Exception Type:</th>", html) self.assertNotIn("<th>Exception Value:</th>", html) self.assertNotIn("<h2>Traceback ", html) self.assertIn("<h2>Request information</h2>", html) self.assertNotIn("<p>Request data not supplied</p>", html) def test_suppressed_context(self): try: try: raise RuntimeError("Can't find my keys") except RuntimeError: raise ValueError("Can't find my keys") from None except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML("<h1>ValueError</h1>", html) self.assertIn( '<pre class="exception_value">Can&#x27;t find my keys</pre>', html ) self.assertIn("<th>Exception Type:</th>", html) self.assertIn("<th>Exception Value:</th>", html) self.assertIn("<h2>Traceback ", html) self.assertIn("<h2>Request information</h2>", html) self.assertIn("<p>Request data not supplied</p>", html) self.assertNotIn("During handling of the above exception", html) def test_innermost_exception_without_traceback(self): try: try: raise RuntimeError("Oops") except Exception as exc: new_exc = RuntimeError("My context") exc.__context__ = new_exc raise except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) frames = reporter.get_traceback_frames() self.assertEqual(len(frames), 2) html = reporter.get_traceback_html() self.assertInHTML("<h1>RuntimeError</h1>", html) self.assertIn('<pre class="exception_value">Oops</pre>', html) self.assertIn("<th>Exception Type:</th>", html) self.assertIn("<th>Exception Value:</th>", html) self.assertIn("<h2>Traceback ", html) self.assertIn("<h2>Request information</h2>", html) self.assertIn("<p>Request data not supplied</p>", html) self.assertIn( "During handling of the above exception (My context), another " "exception occurred", html, ) self.assertInHTML('<li class="frame user">None</li>', html) self.assertIn("Traceback (most recent call last):\n None", html) text = reporter.get_traceback_text() self.assertIn("Exception Type: RuntimeError", text) self.assertIn("Exception Value: Oops", text) self.assertIn("Traceback (most recent call last):\n None", text) self.assertIn( "During handling of the above exception (My context), another " "exception occurred", text, ) def test_mid_stack_exception_without_traceback(self): try: try: raise RuntimeError("Inner Oops") except Exception as exc: new_exc = RuntimeError("My context") new_exc.__context__ = exc raise RuntimeError("Oops") from new_exc except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML("<h1>RuntimeError</h1>", html) self.assertIn('<pre class="exception_value">Oops</pre>', html) self.assertIn("<th>Exception Type:</th>", html) self.assertIn("<th>Exception Value:</th>", html) self.assertIn("<h2>Traceback ", html) self.assertInHTML('<li class="frame user">Traceback: None</li>', html) self.assertIn( "During handling of the above exception (Inner Oops), another " "exception occurred:\n Traceback: None", html, ) text = reporter.get_traceback_text() self.assertIn("Exception Type: RuntimeError", text) self.assertIn("Exception Value: Oops", text) self.assertIn("Traceback (most recent call last):", text) self.assertIn( "During handling of the above exception (Inner Oops), another " "exception occurred:\n Traceback: None", text, ) def test_reporting_of_nested_exceptions(self): request = self.rf.get("/test_view/") try: try: raise AttributeError(mark_safe("<p>Top level</p>")) except AttributeError as explicit: try: raise ValueError(mark_safe("<p>Second exception</p>")) from explicit except ValueError: raise IndexError(mark_safe("<p>Final exception</p>")) except Exception: # Custom exception handler, just pass it into ExceptionReporter exc_type, exc_value, tb = sys.exc_info() explicit_exc = ( "The above exception ({0}) was the direct cause of the following exception:" ) implicit_exc = ( "During handling of the above exception ({0}), another exception occurred:" ) reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() # Both messages are twice on page -- one rendered as html, # one as plain text (for pastebin) self.assertEqual( 2, html.count(explicit_exc.format("&lt;p&gt;Top level&lt;/p&gt;")) ) self.assertEqual( 2, html.count(implicit_exc.format("&lt;p&gt;Second exception&lt;/p&gt;")) ) self.assertEqual(10, html.count("&lt;p&gt;Final exception&lt;/p&gt;")) text = reporter.get_traceback_text() self.assertIn(explicit_exc.format("<p>Top level</p>"), text) self.assertIn(implicit_exc.format("<p>Second exception</p>"), text) self.assertEqual(3, text.count("<p>Final exception</p>")) def test_reporting_frames_without_source(self): try: source = "def funcName():\n raise Error('Whoops')\nfuncName()" namespace = {} code = compile(source, "generated", "exec") exec(code, namespace) except Exception: exc_type, exc_value, tb = sys.exc_info() request = self.rf.get("/test_view/") reporter = ExceptionReporter(request, exc_type, exc_value, tb) frames = reporter.get_traceback_frames() last_frame = frames[-1] self.assertEqual(last_frame["context_line"], "<source code not available>") self.assertEqual(last_frame["filename"], "generated") self.assertEqual(last_frame["function"], "funcName") self.assertEqual(last_frame["lineno"], 2) html = reporter.get_traceback_html() self.assertIn( '<span class="fname">generated</span>, line 2, in funcName', html, ) self.assertIn( '<code class="fname">generated</code>, line 2, in funcName', html, ) self.assertIn( '"generated", line 2, in funcName\n &lt;source code not available&gt;', html, ) text = reporter.get_traceback_text() self.assertIn( '"generated", line 2, in funcName\n <source code not available>', text, ) def test_reporting_frames_source_not_match(self): try: source = "def funcName():\n raise Error('Whoops')\nfuncName()" namespace = {} code = compile(source, "generated", "exec") exec(code, namespace) except Exception: exc_type, exc_value, tb = sys.exc_info() with mock.patch( "django.views.debug.ExceptionReporter._get_source", return_value=["wrong source"], ): request = self.rf.get("/test_view/") reporter = ExceptionReporter(request, exc_type, exc_value, tb) frames = reporter.get_traceback_frames() last_frame = frames[-1] self.assertEqual(last_frame["context_line"], "<source code not available>") self.assertEqual(last_frame["filename"], "generated") self.assertEqual(last_frame["function"], "funcName") self.assertEqual(last_frame["lineno"], 2) html = reporter.get_traceback_html() self.assertIn( '<span class="fname">generated</span>, line 2, in funcName', html, ) self.assertIn( '<code class="fname">generated</code>, line 2, in funcName', html, ) self.assertIn( '"generated", line 2, in funcName\n' " &lt;source code not available&gt;", html, ) text = reporter.get_traceback_text() self.assertIn( '"generated", line 2, in funcName\n <source code not available>', text, ) def test_reporting_frames_for_cyclic_reference(self): try: def test_func(): try: raise RuntimeError("outer") from RuntimeError("inner") except RuntimeError as exc: raise exc.__cause__ test_func() except Exception: exc_type, exc_value, tb = sys.exc_info() request = self.rf.get("/test_view/") reporter = ExceptionReporter(request, exc_type, exc_value, tb) def generate_traceback_frames(*args, **kwargs): nonlocal tb_frames tb_frames = reporter.get_traceback_frames() tb_frames = None tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True) msg = ( "Cycle in the exception chain detected: exception 'inner' " "encountered again." ) with self.assertWarnsMessage(ExceptionCycleWarning, msg): tb_generator.start() tb_generator.join(timeout=5) if tb_generator.is_alive(): # tb_generator is a daemon that runs until the main thread/process # exits. This is resource heavy when running the full test suite. # Setting the following values to None makes # reporter.get_traceback_frames() exit early. exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None tb_generator.join() self.fail("Cyclic reference in Exception Reporter.get_traceback_frames()") if tb_frames is None: # can happen if the thread generating traceback got killed # or exception while generating the traceback self.fail("Traceback generation failed") last_frame = tb_frames[-1] self.assertIn("raise exc.__cause__", last_frame["context_line"]) self.assertEqual(last_frame["filename"], __file__) self.assertEqual(last_frame["function"], "test_func") def test_request_and_message(self): "A message can be provided in addition to a request" request = self.rf.get("/test_view/") reporter = ExceptionReporter(request, None, "I'm a little teapot", None) html = reporter.get_traceback_html() self.assertInHTML("<h1>Report at /test_view/</h1>", html) self.assertIn( '<pre class="exception_value">I&#x27;m a little teapot</pre>', html ) self.assertIn("<th>Request Method:</th>", html) self.assertIn("<th>Request URL:</th>", html) self.assertNotIn("<th>Exception Type:</th>", html) self.assertNotIn("<th>Exception Value:</th>", html) self.assertIn("<h2>Traceback ", html) self.assertIn("<h2>Request information</h2>", html) self.assertNotIn("<p>Request data not supplied</p>", html) def test_message_only(self): reporter = ExceptionReporter(None, None, "I'm a little teapot", None) html = reporter.get_traceback_html() self.assertInHTML("<h1>Report</h1>", html) self.assertIn( '<pre class="exception_value">I&#x27;m a little teapot</pre>', html ) self.assertNotIn("<th>Request Method:</th>", html) self.assertNotIn("<th>Request URL:</th>", html) self.assertNotIn("<th>Exception Type:</th>", html) self.assertNotIn("<th>Exception Value:</th>", html) self.assertIn("<h2>Traceback ", html) self.assertIn("<h2>Request information</h2>", html) self.assertIn("<p>Request data not supplied</p>", html) def test_non_utf8_values_handling(self): "Non-UTF-8 exceptions/values should not make the output generation choke." try: class NonUtf8Output(Exception): def __repr__(self): return b"EXC\xe9EXC" somevar = b"VAL\xe9VAL" # NOQA raise NonUtf8Output() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn("VAL\\xe9VAL", html) self.assertIn("EXC\\xe9EXC", html) def test_local_variable_escaping(self): """Safe strings in local variables are escaped.""" try: local = mark_safe("<p>Local variable</p>") raise ValueError(local) except Exception: exc_type, exc_value, tb = sys.exc_info() html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html() self.assertIn( '<td class="code"><pre>&#x27;&lt;p&gt;Local variable&lt;/p&gt;&#x27;</pre>' "</td>", html, ) def test_unprintable_values_handling(self): "Unprintable values should not make the output generation choke." try: class OomOutput: def __repr__(self): raise MemoryError("OOM") oomvalue = OomOutput() # NOQA raise ValueError() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<td class="code"><pre>Error in formatting', html) def test_too_large_values_handling(self): "Large values should not create a large HTML." large = 256 * 1024 repr_of_str_adds = len(repr("")) try: class LargeOutput: def __repr__(self): return repr("A" * large) largevalue = LargeOutput() # NOQA raise ValueError() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb self.assertIn( "&lt;trimmed %d bytes string&gt;" % (large + repr_of_str_adds,), html ) def test_encoding_error(self): """ A UnicodeError displays a portion of the problematic string. HTML in safe strings is escaped. """ try: mark_safe("abcdefghijkl<p>mnὀp</p>qrstuwxyz").encode("ascii") except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn("<h2>Unicode error hint</h2>", html) self.assertIn("The string that could not be encoded/decoded was: ", html) self.assertIn("<strong>&lt;p&gt;mnὀp&lt;/p&gt;</strong>", html) def test_unfrozen_importlib(self): """ importlib is not a frozen app, but its loader thinks it's frozen which results in an ImportError. Refs #21443. """ try: request = self.rf.get("/test_view/") importlib.import_module("abc.def.invalid.name") except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML("<h1>ModuleNotFoundError at /test_view/</h1>", html) def test_ignore_traceback_evaluation_exceptions(self): """ Don't trip over exceptions generated by crafted objects when evaluating them while cleansing (#24455). """ class BrokenEvaluation(Exception): pass def broken_setup(): raise BrokenEvaluation request = self.rf.get("/test_view/") broken_lazy = SimpleLazyObject(broken_setup) try: bool(broken_lazy) except BrokenEvaluation: exc_type, exc_value, tb = sys.exc_info() self.assertIn( "BrokenEvaluation", ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(), "Evaluation exception reason not mentioned in traceback", ) @override_settings(ALLOWED_HOSTS="example.com") def test_disallowed_host(self): "An exception report can be generated even for a disallowed host." request = self.rf.get("/", HTTP_HOST="evil.com") reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertIn("http://evil.com/", html) def test_request_with_items_key(self): """ An exception report can be generated for requests with 'items' in request GET, POST, FILES, or COOKIES QueryDicts. """ value = '<td>items</td><td class="code"><pre>&#x27;Oops&#x27;</pre></td>' # GET request = self.rf.get("/test_view/?items=Oops") reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML(value, html) # POST request = self.rf.post("/test_view/", data={"items": "Oops"}) reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML(value, html) # FILES fp = StringIO("filecontent") request = self.rf.post("/test_view/", data={"name": "filename", "items": fp}) reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML( '<td>items</td><td class="code"><pre>&lt;InMemoryUploadedFile: ' "items (application/octet-stream)&gt;</pre></td>", html, ) # COOKIES rf = RequestFactory() rf.cookies["items"] = "Oops" request = rf.get("/test_view/") reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML( '<td>items</td><td class="code"><pre>&#x27;Oops&#x27;</pre></td>', html ) def test_exception_fetching_user(self): """ The error page can be rendered if the current user can't be retrieved (such as when the database is unavailable). """ class ExceptionUser: def __str__(self): raise Exception() request = self.rf.get("/test_view/") request.user = ExceptionUser() try: raise ValueError("Oops") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML("<h1>ValueError at /test_view/</h1>", html) self.assertIn('<pre class="exception_value">Oops</pre>', html) self.assertIn('<h3 id="user-info">USER</h3>', html) self.assertIn("<p>[unable to retrieve the current user]</p>", html) text = reporter.get_traceback_text() self.assertIn("USER: [unable to retrieve the current user]", text) def test_template_encoding(self): """ The templates are loaded directly, not via a template loader, and should be opened as utf-8 charset as is the default specified on template engines. """ reporter = ExceptionReporter(None, None, None, None) with mock.patch.object(DebugPath, "open") as m: reporter.get_traceback_html() m.assert_called_once_with(encoding="utf-8") m.reset_mock() reporter.get_traceback_text() m.assert_called_once_with(encoding="utf-8") @override_settings(ALLOWED_HOSTS=["example.com"]) def test_get_raw_insecure_uri(self): factory = RequestFactory(HTTP_HOST="evil.com") tests = [ ("////absolute-uri", "http://evil.com//absolute-uri"), ("/?foo=bar", "http://evil.com/?foo=bar"), ("/path/with:colons", "http://evil.com/path/with:colons"), ] for url, expected in tests: with self.subTest(url=url): request = factory.get(url) reporter = ExceptionReporter(request, None, None, None) self.assertEqual(reporter._get_raw_insecure_uri(), expected) class PlainTextReportTests(SimpleTestCase): rf = RequestFactory() def test_request_and_exception(self): "A simple exception report can be generated" try: request = self.rf.get("/test_view/") request.user = User() raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) text = reporter.get_traceback_text() self.assertIn("ValueError at /test_view/", text) self.assertIn("Can't find my keys", text) self.assertIn("Request Method:", text) self.assertIn("Request URL:", text) self.assertIn("USER: jacob", text) self.assertIn("Exception Type:", text) self.assertIn("Exception Value:", text) self.assertIn("Traceback (most recent call last):", text) self.assertIn("Request information:", text) self.assertNotIn("Request data not supplied", text) def test_no_request(self): "An exception report can be generated without request" try: raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) text = reporter.get_traceback_text() self.assertIn("ValueError", text) self.assertIn("Can't find my keys", text) self.assertNotIn("Request Method:", text) self.assertNotIn("Request URL:", text) self.assertNotIn("USER:", text) self.assertIn("Exception Type:", text) self.assertIn("Exception Value:", text) self.assertIn("Traceback (most recent call last):", text) self.assertIn("Request data not supplied", text) def test_no_exception(self): "An exception report can be generated for just a request" request = self.rf.get("/test_view/") reporter = ExceptionReporter(request, None, None, None) reporter.get_traceback_text() def test_request_and_message(self): "A message can be provided in addition to a request" request = self.rf.get("/test_view/") reporter = ExceptionReporter(request, None, "I'm a little teapot", None) reporter.get_traceback_text() @override_settings(DEBUG=True) def test_template_exception(self): request = self.rf.get("/test_view/") try: render(request, "debug/template_error.html") except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) text = reporter.get_traceback_text() templ_path = Path( Path(__file__).parents[1], "templates", "debug", "template_error.html" ) self.assertIn( "Template error:\n" "In template %(path)s, error at line 2\n" " 'cycle' tag requires at least two arguments\n" " 1 : Template with error:\n" " 2 : {%% cycle %%} \n" " 3 : " % {"path": templ_path}, text, ) def test_request_with_items_key(self): """ An exception report can be generated for requests with 'items' in request GET, POST, FILES, or COOKIES QueryDicts. """ # GET request = self.rf.get("/test_view/?items=Oops") reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) # POST request = self.rf.post("/test_view/", data={"items": "Oops"}) reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) # FILES fp = StringIO("filecontent") request = self.rf.post("/test_view/", data={"name": "filename", "items": fp}) reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = <InMemoryUploadedFile:", text) # COOKIES rf = RequestFactory() rf.cookies["items"] = "Oops" request = rf.get("/test_view/") reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) def test_message_only(self): reporter = ExceptionReporter(None, None, "I'm a little teapot", None) reporter.get_traceback_text() @override_settings(ALLOWED_HOSTS="example.com") def test_disallowed_host(self): "An exception report can be generated even for a disallowed host." request = self.rf.get("/", HTTP_HOST="evil.com") reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("http://evil.com/", text) class ExceptionReportTestMixin: # Mixin used in the ExceptionReporterFilterTests and # AjaxResponseExceptionReporterFilter tests below breakfast_data = { "sausage-key": "sausage-value", "baked-beans-key": "baked-beans-value", "hash-brown-key": "hash-brown-value", "bacon-key": "bacon-value", } def verify_unsafe_response( self, view, check_for_vars=True, check_for_POST_params=True ): """ Asserts that potentially sensitive info are displayed in the response. """ request = self.rf.post("/some_url/", self.breakfast_data) response = view(request) if check_for_vars: # All variables are shown. self.assertContains(response, "cooked_eggs", status_code=500) self.assertContains(response, "scrambled", status_code=500) self.assertContains(response, "sauce", status_code=500) self.assertContains(response, "worcestershire", status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters are shown. self.assertContains(response, k, status_code=500) self.assertContains(response, v, status_code=500) def verify_safe_response( self, view, check_for_vars=True, check_for_POST_params=True ): """ Asserts that certain sensitive info are not displayed in the response. """ request = self.rf.post("/some_url/", self.breakfast_data) response = view(request) if check_for_vars: # Non-sensitive variable's name and value are shown. self.assertContains(response, "cooked_eggs", status_code=500) self.assertContains(response, "scrambled", status_code=500) # Sensitive variable's name is shown but not its value. self.assertContains(response, "sauce", status_code=500) self.assertNotContains(response, "worcestershire", status_code=500) if check_for_POST_params: for k in self.breakfast_data: # All POST parameters' names are shown. self.assertContains(response, k, status_code=500) # Non-sensitive POST parameters' values are shown. self.assertContains(response, "baked-beans-value", status_code=500) self.assertContains(response, "hash-brown-value", status_code=500) # Sensitive POST parameters' values are not shown. self.assertNotContains(response, "sausage-value", status_code=500) self.assertNotContains(response, "bacon-value", status_code=500) def verify_paranoid_response( self, view, check_for_vars=True, check_for_POST_params=True ): """ Asserts that no variables or POST parameters are displayed in the response. """ request = self.rf.post("/some_url/", self.breakfast_data) response = view(request) if check_for_vars: # Show variable names but not their values. self.assertContains(response, "cooked_eggs", status_code=500) self.assertNotContains(response, "scrambled", status_code=500) self.assertContains(response, "sauce", status_code=500) self.assertNotContains(response, "worcestershire", status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertContains(response, k, status_code=500) # No POST parameters' values are shown. self.assertNotContains(response, v, status_code=500) def verify_unsafe_email(self, view, check_for_POST_params=True): """ Asserts that potentially sensitive info are displayed in the email report. """ with self.settings(ADMINS=[("Admin", "[email protected]")]): mail.outbox = [] # Empty outbox request = self.rf.post("/some_url/", self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body_plain = str(email.body) self.assertNotIn("cooked_eggs", body_plain) self.assertNotIn("scrambled", body_plain) self.assertNotIn("sauce", body_plain) self.assertNotIn("worcestershire", body_plain) # Frames vars are shown in html email reports. body_html = str(email.alternatives[0][0]) self.assertIn("cooked_eggs", body_html) self.assertIn("scrambled", body_html) self.assertIn("sauce", body_html) self.assertIn("worcestershire", body_html) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters are shown. self.assertIn(k, body_plain) self.assertIn(v, body_plain) self.assertIn(k, body_html) self.assertIn(v, body_html) def verify_safe_email(self, view, check_for_POST_params=True): """ Asserts that certain sensitive info are not displayed in the email report. """ with self.settings(ADMINS=[("Admin", "[email protected]")]): mail.outbox = [] # Empty outbox request = self.rf.post("/some_url/", self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body_plain = str(email.body) self.assertNotIn("cooked_eggs", body_plain) self.assertNotIn("scrambled", body_plain) self.assertNotIn("sauce", body_plain) self.assertNotIn("worcestershire", body_plain) # Frames vars are shown in html email reports. body_html = str(email.alternatives[0][0]) self.assertIn("cooked_eggs", body_html) self.assertIn("scrambled", body_html) self.assertIn("sauce", body_html) self.assertNotIn("worcestershire", body_html) if check_for_POST_params: for k in self.breakfast_data: # All POST parameters' names are shown. self.assertIn(k, body_plain) # Non-sensitive POST parameters' values are shown. self.assertIn("baked-beans-value", body_plain) self.assertIn("hash-brown-value", body_plain) self.assertIn("baked-beans-value", body_html) self.assertIn("hash-brown-value", body_html) # Sensitive POST parameters' values are not shown. self.assertNotIn("sausage-value", body_plain) self.assertNotIn("bacon-value", body_plain) self.assertNotIn("sausage-value", body_html) self.assertNotIn("bacon-value", body_html) def verify_paranoid_email(self, view): """ Asserts that no variables or POST parameters are displayed in the email report. """ with self.settings(ADMINS=[("Admin", "[email protected]")]): mail.outbox = [] # Empty outbox request = self.rf.post("/some_url/", self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body = str(email.body) self.assertNotIn("cooked_eggs", body) self.assertNotIn("scrambled", body) self.assertNotIn("sauce", body) self.assertNotIn("worcestershire", body) for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertIn(k, body) # No POST parameters' values are shown. self.assertNotIn(v, body) @override_settings(ROOT_URLCONF="view_tests.urls") class ExceptionReporterFilterTests( ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase ): """ Sensitive information can be filtered out of error reports (#14614). """ rf = RequestFactory() def test_non_sensitive_request(self): """ Everything (request info and frame variables) can bee seen in the default error reports for non-sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) with self.settings(DEBUG=False): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) def test_sensitive_request(self): """ Sensitive POST parameters and frame variables cannot be seen in the default error reports for sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_view) self.verify_unsafe_email(sensitive_view) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_view) self.verify_safe_email(sensitive_view) def test_paranoid_request(self): """ No POST parameters and frame variables can be seen in the default error reports for "paranoid" requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(paranoid_view) self.verify_unsafe_email(paranoid_view) with self.settings(DEBUG=False): self.verify_paranoid_response(paranoid_view) self.verify_paranoid_email(paranoid_view) def test_multivalue_dict_key_error(self): """ #21098 -- Sensitive POST parameters cannot be seen in the error reports for if request.POST['nonexistent_key'] throws an error. """ with self.settings(DEBUG=True): self.verify_unsafe_response(multivalue_dict_key_error) self.verify_unsafe_email(multivalue_dict_key_error) with self.settings(DEBUG=False): self.verify_safe_response(multivalue_dict_key_error) self.verify_safe_email(multivalue_dict_key_error) def test_custom_exception_reporter_filter(self): """ It's possible to assign an exception reporter filter to the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER. """ with self.settings(DEBUG=True): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) with self.settings(DEBUG=False): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) def test_sensitive_method(self): """ The sensitive_variables decorator works with object methods. """ with self.settings(DEBUG=True): self.verify_unsafe_response( sensitive_method_view, check_for_POST_params=False ) self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False) with self.settings(DEBUG=False): self.verify_safe_response( sensitive_method_view, check_for_POST_params=False ) self.verify_safe_email(sensitive_method_view, check_for_POST_params=False) def test_sensitive_function_arguments(self): """ Sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as arguments to the decorated function. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_args_function_caller) self.verify_unsafe_email(sensitive_args_function_caller) with self.settings(DEBUG=False): self.verify_safe_response( sensitive_args_function_caller, check_for_POST_params=False ) self.verify_safe_email( sensitive_args_function_caller, check_for_POST_params=False ) def test_sensitive_function_keyword_arguments(self): """ Sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as keyword arguments to the decorated function. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_kwargs_function_caller) self.verify_unsafe_email(sensitive_kwargs_function_caller) with self.settings(DEBUG=False): self.verify_safe_response( sensitive_kwargs_function_caller, check_for_POST_params=False ) self.verify_safe_email( sensitive_kwargs_function_caller, check_for_POST_params=False ) def test_callable_settings(self): """ Callable settings should not be evaluated in the debug page (#21345). """ def callable_setting(): return "This should not be displayed" with self.settings(DEBUG=True, FOOBAR=callable_setting): response = self.client.get("/raises500/") self.assertNotContains( response, "This should not be displayed", status_code=500 ) def test_callable_settings_forbidding_to_set_attributes(self): """ Callable settings which forbid to set attributes should not break the debug page (#23070). """ class CallableSettingWithSlots: __slots__ = [] def __call__(self): return "This should not be displayed" with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()): response = self.client.get("/raises500/") self.assertNotContains( response, "This should not be displayed", status_code=500 ) def test_dict_setting_with_non_str_key(self): """ A dict setting containing a non-string key should not break the debug page (#12744). """ with self.settings(DEBUG=True, FOOBAR={42: None}): response = self.client.get("/raises500/") self.assertContains(response, "FOOBAR", status_code=500) def test_sensitive_settings(self): """ The debug page should not show some sensitive settings (password, secret key, ...). """ sensitive_settings = [ "SECRET_KEY", "SECRET_KEY_FALLBACKS", "PASSWORD", "API_KEY", "AUTH_TOKEN", ] for setting in sensitive_settings: with self.settings(DEBUG=True, **{setting: "should not be displayed"}): response = self.client.get("/raises500/") self.assertNotContains( response, "should not be displayed", status_code=500 ) def test_settings_with_sensitive_keys(self): """ The debug page should filter out some sensitive information found in dict settings. """ sensitive_settings = [ "SECRET_KEY", "SECRET_KEY_FALLBACKS", "PASSWORD", "API_KEY", "AUTH_TOKEN", ] for setting in sensitive_settings: FOOBAR = { setting: "should not be displayed", "recursive": {setting: "should not be displayed"}, } with self.settings(DEBUG=True, FOOBAR=FOOBAR): response = self.client.get("/raises500/") self.assertNotContains( response, "should not be displayed", status_code=500 ) def test_cleanse_setting_basic(self): reporter_filter = SafeExceptionReporterFilter() self.assertEqual(reporter_filter.cleanse_setting("TEST", "TEST"), "TEST") self.assertEqual( reporter_filter.cleanse_setting("PASSWORD", "super_secret"), reporter_filter.cleansed_substitute, ) def test_cleanse_setting_ignore_case(self): reporter_filter = SafeExceptionReporterFilter() self.assertEqual( reporter_filter.cleanse_setting("password", "super_secret"), reporter_filter.cleansed_substitute, ) def test_cleanse_setting_recurses_in_dictionary(self): reporter_filter = SafeExceptionReporterFilter() initial = {"login": "cooper", "password": "secret"} self.assertEqual( reporter_filter.cleanse_setting("SETTING_NAME", initial), {"login": "cooper", "password": reporter_filter.cleansed_substitute}, ) def test_cleanse_setting_recurses_in_dictionary_with_non_string_key(self): reporter_filter = SafeExceptionReporterFilter() initial = {("localhost", 8000): {"login": "cooper", "password": "secret"}} self.assertEqual( reporter_filter.cleanse_setting("SETTING_NAME", initial), { ("localhost", 8000): { "login": "cooper", "password": reporter_filter.cleansed_substitute, }, }, ) def test_cleanse_setting_recurses_in_list_tuples(self): reporter_filter = SafeExceptionReporterFilter() initial = [ { "login": "cooper", "password": "secret", "apps": ( {"name": "app1", "api_key": "a06b-c462cffae87a"}, {"name": "app2", "api_key": "a9f4-f152e97ad808"}, ), "tokens": ["98b37c57-ec62-4e39", "8690ef7d-8004-4916"], }, {"SECRET_KEY": "c4d77c62-6196-4f17-a06b-c462cffae87a"}, ] cleansed = [ { "login": "cooper", "password": reporter_filter.cleansed_substitute, "apps": ( {"name": "app1", "api_key": reporter_filter.cleansed_substitute}, {"name": "app2", "api_key": reporter_filter.cleansed_substitute}, ), "tokens": reporter_filter.cleansed_substitute, }, {"SECRET_KEY": reporter_filter.cleansed_substitute}, ] self.assertEqual( reporter_filter.cleanse_setting("SETTING_NAME", initial), cleansed, ) self.assertEqual( reporter_filter.cleanse_setting("SETTING_NAME", tuple(initial)), tuple(cleansed), ) def test_request_meta_filtering(self): request = self.rf.get("/", HTTP_SECRET_HEADER="super_secret") reporter_filter = SafeExceptionReporterFilter() self.assertEqual( reporter_filter.get_safe_request_meta(request)["HTTP_SECRET_HEADER"], reporter_filter.cleansed_substitute, ) def test_exception_report_uses_meta_filtering(self): response = self.client.get("/raises500/", HTTP_SECRET_HEADER="super_secret") self.assertNotIn(b"super_secret", response.content) response = self.client.get( "/raises500/", HTTP_SECRET_HEADER="super_secret", HTTP_ACCEPT="application/json", ) self.assertNotIn(b"super_secret", response.content) @override_settings(SESSION_COOKIE_NAME="djangosession") def test_cleanse_session_cookie_value(self): self.client.cookies.load({"djangosession": "should not be displayed"}) response = self.client.get("/raises500/") self.assertNotContains(response, "should not be displayed", status_code=500) class CustomExceptionReporterFilter(SafeExceptionReporterFilter): cleansed_substitute = "XXXXXXXXXXXXXXXXXXXX" hidden_settings = _lazy_re_compile( "API|TOKEN|KEY|SECRET|PASS|SIGNATURE|DATABASE_URL", flags=re.I ) @override_settings( ROOT_URLCONF="view_tests.urls", DEFAULT_EXCEPTION_REPORTER_FILTER="%s.CustomExceptionReporterFilter" % __name__, ) class CustomExceptionReporterFilterTests(SimpleTestCase): def setUp(self): get_default_exception_reporter_filter.cache_clear() def tearDown(self): get_default_exception_reporter_filter.cache_clear() def test_setting_allows_custom_subclass(self): self.assertIsInstance( get_default_exception_reporter_filter(), CustomExceptionReporterFilter, ) def test_cleansed_substitute_override(self): reporter_filter = get_default_exception_reporter_filter() self.assertEqual( reporter_filter.cleanse_setting("password", "super_secret"), reporter_filter.cleansed_substitute, ) def test_hidden_settings_override(self): reporter_filter = get_default_exception_reporter_filter() self.assertEqual( reporter_filter.cleanse_setting("database_url", "super_secret"), reporter_filter.cleansed_substitute, ) class NonHTMLResponseExceptionReporterFilter( ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase ): """ Sensitive information can be filtered out of error reports. The plain text 500 debug-only error page is served when it has been detected the request doesn't accept HTML content. Don't check for (non)existence of frames vars in the traceback information section of the response content because they're not included in these error pages. Refs #14614. """ rf = RequestFactory(HTTP_ACCEPT="application/json") def test_non_sensitive_request(self): """ Request info can bee seen in the default error reports for non-sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(non_sensitive_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_unsafe_response(non_sensitive_view, check_for_vars=False) def test_sensitive_request(self): """ Sensitive POST parameters cannot be seen in the default error reports for sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_view, check_for_vars=False) def test_paranoid_request(self): """ No POST parameters can be seen in the default error reports for "paranoid" requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(paranoid_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_paranoid_response(paranoid_view, check_for_vars=False) def test_custom_exception_reporter_filter(self): """ It's possible to assign an exception reporter filter to the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER. """ with self.settings(DEBUG=True): self.verify_unsafe_response( custom_exception_reporter_filter_view, check_for_vars=False ) with self.settings(DEBUG=False): self.verify_unsafe_response( custom_exception_reporter_filter_view, check_for_vars=False ) @override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls") def test_non_html_response_encoding(self): response = self.client.get("/raises500/", HTTP_ACCEPT="application/json") self.assertEqual(response.headers["Content-Type"], "text/plain; charset=utf-8") class DecoratorsTests(SimpleTestCase): def test_sensitive_variables_not_called(self): msg = ( "sensitive_variables() must be called to use it as a decorator, " "e.g., use @sensitive_variables(), not @sensitive_variables." ) with self.assertRaisesMessage(TypeError, msg): @sensitive_variables def test_func(password): pass def test_sensitive_post_parameters_not_called(self): msg = ( "sensitive_post_parameters() must be called to use it as a " "decorator, e.g., use @sensitive_post_parameters(), not " "@sensitive_post_parameters." ) with self.assertRaisesMessage(TypeError, msg): @sensitive_post_parameters def test_func(request): return index_page(request) def test_sensitive_post_parameters_http_request(self): class MyClass: @sensitive_post_parameters() def a_view(self, request): return HttpResponse() msg = ( "sensitive_post_parameters didn't receive an HttpRequest object. " "If you are decorating a classmethod, make sure to use " "@method_decorator." ) with self.assertRaisesMessage(TypeError, msg): MyClass().a_view(HttpRequest())
1b8253056473e5dbe4c61ce2e903f63b487a5bd31b4b3dcbcddcc2f5c80cf722
from decimal import Decimal, localcontext from django.template.defaultfilters import floatformat from django.test import SimpleTestCase from django.utils import translation from django.utils.safestring import mark_safe from ..utils import setup class FloatformatTests(SimpleTestCase): @setup( { "floatformat01": ( "{% autoescape off %}{{ a|floatformat }} {{ b|floatformat }}" "{% endautoescape %}" ) } ) def test_floatformat01(self): output = self.engine.render_to_string( "floatformat01", {"a": "1.42", "b": mark_safe("1.42")} ) self.assertEqual(output, "1.4 1.4") @setup({"floatformat02": "{{ a|floatformat }} {{ b|floatformat }}"}) def test_floatformat02(self): output = self.engine.render_to_string( "floatformat02", {"a": "1.42", "b": mark_safe("1.42")} ) self.assertEqual(output, "1.4 1.4") class FunctionTests(SimpleTestCase): def test_inputs(self): self.assertEqual(floatformat(7.7), "7.7") self.assertEqual(floatformat(7.0), "7") self.assertEqual(floatformat(0.7), "0.7") self.assertEqual(floatformat(-0.7), "-0.7") self.assertEqual(floatformat(0.07), "0.1") self.assertEqual(floatformat(-0.07), "-0.1") self.assertEqual(floatformat(0.007), "0.0") self.assertEqual(floatformat(0.0), "0") self.assertEqual(floatformat(7.7, 0), "8") self.assertEqual(floatformat(7.7, 3), "7.700") self.assertEqual(floatformat(6.000000, 3), "6.000") self.assertEqual(floatformat(6.200000, 3), "6.200") self.assertEqual(floatformat(6.200000, -3), "6.200") self.assertEqual(floatformat(13.1031, -3), "13.103") self.assertEqual(floatformat(11.1197, -2), "11.12") self.assertEqual(floatformat(11.0000, -2), "11") self.assertEqual(floatformat(11.000001, -2), "11.00") self.assertEqual(floatformat(8.2798, 3), "8.280") self.assertEqual(floatformat(5555.555, 2), "5555.56") self.assertEqual(floatformat(001.3000, 2), "1.30") self.assertEqual(floatformat(0.12345, 2), "0.12") self.assertEqual(floatformat(Decimal("555.555"), 2), "555.56") self.assertEqual(floatformat(Decimal("09.000")), "9") self.assertEqual( floatformat(Decimal("123456.123456789012345678901"), 21), "123456.123456789012345678901", ) self.assertEqual(floatformat("foo"), "") self.assertEqual(floatformat(13.1031, "bar"), "13.1031") self.assertEqual(floatformat(18.125, 2), "18.13") self.assertEqual(floatformat("foo", "bar"), "") self.assertEqual(floatformat("¿Cómo esta usted?"), "") self.assertEqual(floatformat(None), "") self.assertEqual( floatformat(-1.323297138040798e35, 2), "-132329713804079800000000000000000000.00", ) self.assertEqual( floatformat(-1.323297138040798e35, -2), "-132329713804079800000000000000000000", ) self.assertEqual(floatformat(1.5e-15, 20), "0.00000000000000150000") self.assertEqual(floatformat(1.5e-15, -20), "0.00000000000000150000") self.assertEqual(floatformat(1.00000000000000015, 16), "1.0000000000000002") def test_force_grouping(self): with translation.override("en"): self.assertEqual(floatformat(10000, "g"), "10,000") self.assertEqual(floatformat(66666.666, "1g"), "66,666.7") # Invalid suffix. self.assertEqual(floatformat(10000, "g2"), "10000") with translation.override("de", deactivate=True): self.assertEqual(floatformat(10000, "g"), "10.000") self.assertEqual(floatformat(66666.666, "1g"), "66.666,7") # Invalid suffix. self.assertEqual(floatformat(10000, "g2"), "10000") def test_unlocalize(self): with translation.override("de", deactivate=True): self.assertEqual(floatformat(66666.666, "2"), "66666,67") self.assertEqual(floatformat(66666.666, "2u"), "66666.67") with self.settings( USE_THOUSAND_SEPARATOR=True, NUMBER_GROUPING=3, THOUSAND_SEPARATOR="!", ): self.assertEqual(floatformat(66666.666, "2gu"), "66!666.67") self.assertEqual(floatformat(66666.666, "2ug"), "66!666.67") # Invalid suffix. self.assertEqual(floatformat(66666.666, "u2"), "66666.666") def test_zero_values(self): self.assertEqual(floatformat(0, 6), "0.000000") self.assertEqual(floatformat(0, 7), "0.0000000") self.assertEqual(floatformat(0, 10), "0.0000000000") self.assertEqual( floatformat(0.000000000000000000015, 20), "0.00000000000000000002" ) def test_negative_zero_values(self): tests = [ (-0.01, -1, "0.0"), (-0.001, 2, "0.00"), (-0.499, 0, "0"), ] for num, decimal_places, expected in tests: with self.subTest(num=num, decimal_places=decimal_places): self.assertEqual(floatformat(num, decimal_places), expected) def test_infinity(self): pos_inf = float(1e30000) neg_inf = float(-1e30000) self.assertEqual(floatformat(pos_inf), "inf") self.assertEqual(floatformat(neg_inf), "-inf") self.assertEqual(floatformat(pos_inf / pos_inf), "nan") def test_float_dunder_method(self): class FloatWrapper: def __init__(self, value): self.value = value def __float__(self): return self.value self.assertEqual(floatformat(FloatWrapper(11.000001), -2), "11.00") def test_low_decimal_precision(self): """ #15789 """ with localcontext() as ctx: ctx.prec = 2 self.assertEqual(floatformat(1.2345, 2), "1.23") self.assertEqual(floatformat(15.2042, -3), "15.204") self.assertEqual(floatformat(1.2345, "2"), "1.23") self.assertEqual(floatformat(15.2042, "-3"), "15.204") self.assertEqual(floatformat(Decimal("1.2345"), 2), "1.23") self.assertEqual(floatformat(Decimal("15.2042"), -3), "15.204")
736ca0172208f50bc2f4175e6d10439130488ddc38671795fe3bc51d1e0028f6
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import copy import difflib import functools import sys from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import FieldDoesNotExist, FieldError from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import ( BaseExpression, Col, Exists, F, OuterRef, Ref, ResolvedOuterRef, Value, ) from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode from django.utils.functional import cached_property from django.utils.regex_helper import _lazy_re_compile from django.utils.tree import Node __all__ = ["Query", "RawQuery"] # Quotation marks ('"`[]), whitespace characters, semicolons, or inline # SQL comments are forbidden in column aliases. FORBIDDEN_ALIAS_PATTERN = _lazy_re_compile(r"['`\"\]\[;\s]|--|/\*|\*/") # Inspired from # https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS EXPLAIN_OPTIONS_PATTERN = _lazy_re_compile(r"[\w\-]+") def get_field_names_from_opts(opts): if opts is None: return set() return set( chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() ) ) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( "JoinInfo", ("final_field", "targets", "opts", "joins", "path", "transform_function"), ) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=()): self.params = params self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): if self.params is None: return None return dict if isinstance(self.params, Mapping) else tuple def __str__(self): if self.params_type is None: return self.sql return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} elif params_type is None: params = None else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) ExplainInfo = namedtuple("ExplainInfo", ("format", "options")) class Query(BaseExpression): """A single SQL query.""" alias_prefix = "T" empty_result_set_value = None subq_aliases = frozenset([alias_prefix]) compiler = "SQLCompiler" base_table_class = BaseTable join_class = Join default_cols = True default_ordering = True standard_ordering = True filter_is_sticky = False subquery = False # SQL-related attributes. # Select and related select clauses are expressions to use in the SELECT # clause of the query. The select is used for cases where we want to set up # the select clause to contain other than default fields (values(), # subqueries...). Note that annotations go to annotations dictionary. select = () # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. group_by = None order_by = () low_mark = 0 # Used for offset/limit. high_mark = None # Used for offset/limit. distinct = False distinct_fields = () select_for_update = False select_for_update_nowait = False select_for_update_skip_locked = False select_for_update_of = () select_for_no_key_update = False select_related = False has_select_fields = False # Arbitrary limit for select_related to prevents infinite recursion. max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. values_select = () # SQL annotation-related attributes. annotation_select_mask = None _annotation_select_cache = None # Set combination attributes. combinator = None combinator_all = False combined_queries = () # These are for extensions. The contents are more or less appended verbatim # to the appropriate clause. extra_select_mask = None _extra_select_cache = None extra_tables = () extra_order_by = () # A tuple that is a set of model field names and either True, if these are # the fields to defer, or False if these are the only fields to load. deferred_loading = (frozenset(), True) explain_info = None def __init__(self, model, alias_cols=True): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Whether to provide alias to columns during reference resolving. self.alias_cols = alias_cols # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. # Map external tables to whether they are aliased. self.external_aliases = {} self.table_map = {} # Maps table names to list of aliases. self.used_aliases = set() self.where = WhereNode() # Maps alias -> Annotation Expression. self.annotations = {} # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self._filtered_relations = {} @property def output_field(self): if len(self.select) == 1: select = self.select[0] return getattr(select, "target", None) or select.field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None, elide_empty=True): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)( self, connection, using, elide_empty ) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ if self.model: return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is not None: obj.annotation_select_mask = self.annotation_select_mask.copy() if self.combined_queries: obj.combined_queries = tuple( [query.clone() for query in self.combined_queries] ) # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is not None: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is not None: obj._extra_select_cache = self._extra_select_cache.copy() if self.select_related is not False: # Use deepcopy because select_related stores fields in nested # dicts. obj.select_related = copy.deepcopy(obj.select_related) if "subq_aliases" in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property, if it exists. obj.__dict__.pop("base_table", None) return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, "_setup_query"): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def _get_col(self, target, field, alias): if not self.alias_cols: alias = None return target.get_col(alias, field) def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) else: # Reuse aliases of expressions already selected in subquery. for col_alias, selected_annotation in self.annotation_select.items(): if selected_annotation is expr: new_expr = Ref(col_alias, expr) break else: # An expression that is not selected the subquery. if isinstance(expr, Col) or ( expr.contains_aggregate and not expr.is_summary ): # Reference column or another aggregate. Select it # under a non-conflicting alias. col_cnt += 1 col_alias = "__col%d" % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_expr = Ref(col_alias, expr) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} existing_annotations = [ annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ] # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if ( isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or self.distinct or self.combinator ): from django.db.models.sql.subqueries import AggregateQuery inner_query = self.clone() inner_query.subquery = True outer_query = AggregateQuery(self.model, inner_query) inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) # Queries with distinct_fields need ordering and when a limit is # applied we must take the slice from the ordered query. Otherwise # no need for ordering. inner_query.clear_ordering(force=False) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. has_existing_aggregate_annotations = any( annotation for annotation in existing_annotations if getattr(annotation, "contains_aggregate", True) ) if inner_query.default_cols and has_existing_aggregate_annotations: inner_query.group_by = ( self.model._meta.pk.get_col(inner_query.get_initial_alias()), ) inner_query.default_cols = False relabels = {t: "subquery" for t in inner_query.alias_map} relabels[None] = "subquery" # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): annotation_select_mask = inner_query.annotation_select_mask if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone( relabels ) del inner_query.annotations[alias] annotation_select_mask.remove(alias) # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if ( inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask ): # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = ( self.model._meta.pk.get_col(inner_query.get_initial_alias()), ) else: outer_query = self self.select = () self.default_cols = False self.extra = {} empty_set_result = [ expression.empty_result_set_value for expression in outer_query.annotation_select.values() ] elide_empty = not any(result is NotImplemented for result in empty_set_result) outer_query.clear_ordering(force=True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using, elide_empty=elide_empty) result = compiler.execute_sql(SINGLE) if result is None: result = empty_set_result converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count("*"), alias="__count", is_summary=True) return obj.get_aggregation(using, ["__count"])["__count"] def has_filters(self): return self.where def exists(self, using, limit=True): q = self.clone() if not (q.distinct and q.is_sliced): if q.group_by is True: q.add_fields( (f.attname for f in self.model._meta.concrete_fields), False ) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. q.set_group_by(allow_aliases=False) q.clear_select_clause() if q.combined_queries and q.combinator == "union": limit_combined = connections[ using ].features.supports_slicing_ordering_in_compound q.combined_queries = tuple( combined_query.exists(using, limit=limit_combined) for combined_query in q.combined_queries ) q.clear_ordering(force=True) if limit: q.set_limits(high=1) q.add_annotation(Value(1), "a") return q def has_results(self, using): q = self.exists(using) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() for option_name in options: if ( not EXPLAIN_OPTIONS_PATTERN.fullmatch(option_name) or "--" in option_name ): raise ValueError(f"Invalid option name: {option_name!r}.") q.explain_info = ExplainInfo(format, options) compiler = q.get_compiler(using=using) return "\n".join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ if self.model != rhs.model: raise TypeError("Cannot combine queries on two different base models.") if self.is_sliced: raise TypeError("Cannot combine queries once a slice has been taken.") if self.distinct != rhs.distinct: raise TypeError("Cannot combine a unique query with a non-unique query.") if self.distinct_fields != rhs.distinct_fields: raise TypeError("Cannot combine queries with different distinct fields.") # If lhs and rhs shares the same alias prefix, it is possible to have # conflicting alias changes like T4 -> T5, T5 -> T6, which might end up # as T4 -> T6 while combining two querysets. To prevent this, change an # alias prefix of the rhs and update current aliases accordingly, # except if the alias is the base table since it must be present in the # query on both sides. initial_alias = self.get_initial_alias() rhs.bump_prefix(self, exclude={initial_alias}) # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = connector == AND # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER ) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Combine subqueries aliases to ensure aliases relabelling properly # handle subqueries when combining where and select clauses. self.subq_aliases |= rhs.subq_aliases # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError( "When merging querysets using 'or', you cannot have " "extra(select=...) on both sides." ) self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def _get_defer_select_mask(self, opts, mask, select_mask=None): if select_mask is None: select_mask = {} select_mask[opts.pk] = {} # All concrete fields that are not part of the defer mask must be # loaded. If a relational field is encountered it gets added to the # mask for it be considered if `select_related` and the cycle continues # by recursively caling this function. for field in opts.concrete_fields: field_mask = mask.pop(field.name, None) if field_mask is None: select_mask.setdefault(field, {}) elif field_mask: if not field.is_relation: raise FieldError(next(iter(field_mask))) field_select_mask = select_mask.setdefault(field, {}) related_model = field.remote_field.model._meta.concrete_model self._get_defer_select_mask( related_model._meta, field_mask, field_select_mask ) # Remaining defer entries must be references to reverse relationships. # The following code is expected to raise FieldError if it encounters # a malformed defer entry. for field_name, field_mask in mask.items(): if filtered_relation := self._filtered_relations.get(field_name): relation = opts.get_field(filtered_relation.relation_name) field_select_mask = select_mask.setdefault((field_name, relation), {}) field = relation.field else: field = opts.get_field(field_name).field field_select_mask = select_mask.setdefault(field, {}) related_model = field.model._meta.concrete_model self._get_defer_select_mask( related_model._meta, field_mask, field_select_mask ) return select_mask def _get_only_select_mask(self, opts, mask, select_mask=None): if select_mask is None: select_mask = {} select_mask[opts.pk] = {} # Only include fields mentioned in the mask. for field_name, field_mask in mask.items(): field = opts.get_field(field_name) field_select_mask = select_mask.setdefault(field, {}) if field_mask: if not field.is_relation: raise FieldError(next(iter(field_mask))) related_model = field.remote_field.model._meta.concrete_model self._get_only_select_mask( related_model._meta, field_mask, field_select_mask ) return select_mask def get_select_mask(self): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. """ field_names, defer = self.deferred_loading if not field_names: return {} mask = {} for field_name in field_names: part_mask = mask for part in field_name.split(LOOKUP_SEP): part_mask = part_mask.setdefault(part, {}) opts = self.get_meta() if defer: return self._get_defer_select_mask(opts, mask) return self._get_only_select_mask(opts, mask) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = ( filtered_relation.alias if filtered_relation is not None else table_name ) self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = ( parent_alias and self.alias_map[parent_alias].join_type == LOUTER ) already_louter = self.alias_map[alias].join_type == LOUTER if (self.alias_map[alias].nullable or parent_louter) and not already_louter: self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ # If keys and values of change_map were to intersect, an alias might be # updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending # on their order in change_map. assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple( [col.relabeled_clone(change_map) for col in self.group_by] ) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = { # Table is aliased or it's being changed and thus is aliased. change_map.get(alias, alias): (aliased or alias in change_map) for alias, aliased in self.external_aliases.items() } def bump_prefix(self, other_query, exclude=None): """ Change the alias prefix to the next letter in the alphabet in a way that the other query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. To prevent changing aliases use the exclude parameter. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet for s in product(seq, repeat=n): yield "".join(s) prefix = None if self.alias_prefix != other_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( "Maximum recursion depth exceeded: too many subqueries." ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases) if exclude is None: exclude = {} self.change_aliases( { alias: "%s%d" % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) if alias not in exclude } ) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) elif self.model: alias = self.join(self.base_table_class(self.get_meta().db_table, None)) else: alias = None return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a base_table_class or join_class. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias( join.table_name, create=True, filtered_relation=join.filtered_relation ) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def check_alias(self, alias): if FORBIDDEN_ALIAS_PATTERN.search(alias): raise ValueError( "Column aliases cannot contain whitespace characters, quotation marks, " "semicolons, or SQL comments." ) def add_annotation(self, annotation, alias, is_summary=False, select=True): """Add a single annotation expression to the Query.""" self.check_alias(alias) annotation = annotation.resolve_expression( self, allow_joins=True, reuse=None, summarize=is_summary ) if select: self.append_annotation_mask([alias]) else: self.set_annotation_mask(set(self.annotation_select).difference({alias})) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True clone.where.resolve_expression(query, *args, **kwargs) # Resolve combined queries. if clone.combinator: clone.combined_queries = tuple( [ combined_query.resolve_expression(query, *args, **kwargs) for combined_query in clone.combined_queries ] ) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, "external_aliases"): resolved.external_aliases.update(clone.external_aliases) clone.annotations[key] = resolved # Outer query's aliases are considered external. for alias, table in query.alias_map.items(): clone.external_aliases[alias] = ( isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias ) or ( isinstance(table, BaseTable) and table.table_name != table.table_alias ) return clone def get_external_cols(self): exprs = chain(self.annotations.values(), self.where.children) return [ col for col in self._gen_cols(exprs, include_external=True) if col.alias in self.external_aliases ] def get_group_by_cols(self, alias=None): if alias: return [Ref(alias, self)] external_cols = self.get_external_cols() if any(col.possibly_multivalued for col in external_cols): return [self] return external_cols def as_sql(self, compiler, connection): # Some backends (e.g. Oracle) raise an error when a subquery contains # unnecessary ORDER BY clause. if ( self.subquery and not connection.features.ignores_unnecessary_order_by_in_subqueries ): self.clear_ordering(force=False) for query in self.combined_queries: query.clear_ordering(force=False) sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = "(%s)" % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins): if hasattr(value, "resolve_expression"): value = value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, ) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. values = ( self.resolve_lookup_value(sub_value, can_reuse, allow_joins) for sub_value in value ) type_ = type(value) if hasattr(type_, "_make"): # namedtuple return type_(*values) return type_(values) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: expression, expression_lookups = refs_expression( lookup_splitted, self.annotations ) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, "_meta"): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name) ) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if ( isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field) ): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, "_meta"): self.check_query_object_type(value, opts, field) elif hasattr(value, "__iter__"): for v in value: self.check_query_object_type(v, opts, field) def check_filterable(self, expression): """Raise an error if expression cannot be used in a WHERE clause.""" if hasattr(expression, "resolve_expression") and not getattr( expression, "filterable", True ): raise NotSupportedError( expression.__class__.__name__ + " is disallowed in the filter " "clause." ) if hasattr(expression, "get_source_expressions"): for expr in expression.get_source_expressions(): self.check_filterable(expr) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ["exact"] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = "exact" lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ("exact", "iexact"): raise ValueError("Cannot use None as a query value") return lhs.get_lookup("isnull")(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if ( lookup_name == "exact" and lookup.rhs == "" and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls ): return lhs.get_lookup("isnull")(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches( name, output_field.get_lookups() ) if suggested_lookups: suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups) else: suggestion = "." raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter( self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False, check_filterable=True, ): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") if isinstance(filter_expr, Q): return self._add_q( filter_expr, branch_negated=branch_negated, current_negated=current_negated, used_aliases=can_reuse, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) if hasattr(filter_expr, "resolve_expression"): if not getattr(filter_expr, "conditional", False): raise TypeError("Cannot filter against a non-conditional expression.") condition = filter_expr.resolve_expression(self, allow_joins=allow_joins) if not isinstance(condition, Lookup): condition = self.build_lookup(["exact"], condition, True) return WhereNode([condition], connector=AND), [] arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) if check_filterable: self.check_filterable(reffed_expression) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins) used_joins = { k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0) } if check_filterable: self.check_filterable(value) if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) return WhereNode([condition], connector=AND), [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins( join_info.targets, join_info.joins, join_info.path ) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: if len(targets) == 1: col = self._get_col(targets[0], join_info.final_field, alias) else: col = MultiColSource( alias, targets, join_info.targets, join_info.final_field ) else: col = self._get_col(targets[0], join_info.final_field, alias) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause = WhereNode([condition], connector=AND) require_outer = ( lookup_type == "isnull" and condition.rhs is True and not current_negated ) if ( current_negated and (lookup_type != "isnull" or condition.rhs is False) and condition.rhs is not None ): require_outer = True if lookup_type != "isnull": # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). if ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER ): lookup_class = targets[0].get_lookup("isnull") col = self._get_col(targets[0], join_info.targets[0], alias) clause.add(lookup_class(col, False), AND) # If someval is a nullable column, someval IS NOT NULL is # added. if isinstance(value, Col) and self.is_nullable(value.target): lookup_class = value.target.get_lookup("isnull") clause.add(lookup_class(value, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_lhs, filter_rhs): self.add_q(Q((filter_lhs, filter_rhs))) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = { a for a in self.alias_map if self.alias_map[a].join_type == INNER } clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, filter_expr): return self.build_filter(filter_expr, allow_joins=False)[0] def clear_where(self): self.where = WhereNode() def _add_q( self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, check_filterable=True, ): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = WhereNode(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter( q_object.connector, len(q_object.children), current_negated ) for child in q_object.children: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q( self, q_object, reuse, branch_negated=False, current_negated=False ): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = WhereNode(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type( filtered_relation.relation_name ) if relation_lookup_parts: raise ValueError( "FilteredRelation's relation_name cannot contain lookups " "(got %r)." % filtered_relation.relation_name ) for lookup in chain(lookups): lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 lookup_field_path = lookup_field_parts[:-shift] for idx, lookup_field_part in enumerate(lookup_field_path): if len(relation_field_parts) > idx: if relation_field_parts[idx] != lookup_field_part: raise ValueError( "FilteredRelation's condition doesn't support " "relations outside the %r (got %r)." % (filtered_relation.relation_name, lookup) ) else: raise ValueError( "FilteredRelation's condition doesn't support nested " "relations deeper than the relation_name (got %r for " "%r)." % (lookup, filtered_relation.relation_name) ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == "pk": name = opts.pk.name field = None filtered_relation = None try: if opts is None: raise FieldDoesNotExist field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] if LOOKUP_SEP in filtered_relation.relation_name: parts = filtered_relation.relation_name.split(LOOKUP_SEP) filtered_relation_path, field, _, _ = self.names_to_path( parts, opts, allow_many, fail_on_missing, ) path.extend(filtered_relation_path[:-1]) else: field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted( [ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ] ) raise FieldError( "Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available)) ) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if opts is not None and model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, "path_infos"): if filtered_relation: pathinfos = field.get_path_info(filtered_relation) else: pathinfos = field.path_infos if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name) ) break return path, final_field, targets, names[pos + 1 :] def setup_joins( self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False, ): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): if not self.alias_cols: alias = None return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial( transform, name=name, previous=final_transformer ) final_transformer.has_transforms = True # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = self.join_class( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = { r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets } targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins @classmethod def _gen_cols(cls, exprs, include_external=False): for expr in exprs: if isinstance(expr, Col): yield expr elif include_external and callable( getattr(expr, "get_external_cols", None) ): yield from expr.get_external_cols() elif hasattr(expr, "get_source_expressions"): yield from cls._gen_cols( expr.get_source_expressions(), include_external=include_external, ) @classmethod def _gen_col_aliases(cls, exprs): yield from (expr.alias for expr in cls._gen_cols(exprs)) def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False): annotation = self.annotations.get(name) if annotation is not None: if not allow_joins: for alias in self._gen_col_aliases([annotation]): if isinstance(self.alias_map[alias], Join): raise FieldError( "Joined field references are not permitted in this query" ) if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. if name not in self.annotation_select: raise FieldError( "Cannot aggregate over the '%s' alias. Use annotate() " "to promote it." % name ) return Ref(name, self.annotation_select[name]) else: return annotation else: field_list = name.split(LOOKUP_SEP) annotation = self.annotations.get(field_list[0]) if annotation is not None: for transform in field_list[1:]: annotation = self.try_transform(annotation, transform) return annotation join_info = self.setup_joins( field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse ) targets, final_alias, join_list = self.trim_joins( join_info.targets, join_info.joins, join_info.path ) if not allow_joins and len(join_list) > 1: raise FieldError( "Joined field references are not permitted in this query" ) if len(targets) > 1: raise FieldError( "Referencing multicolumn fields with F() objects isn't supported" ) # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. transform = join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) return transform def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT EXISTS( SELECT 1 FROM child WHERE name = 'foo' AND child.parent_id = parent.id LIMIT 1 ) """ # Generate the inner query. query = self.__class__(self.model) query._filtered_relations = self._filtered_relations filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, OuterRef): filter_rhs = OuterRef(filter_rhs) elif isinstance(filter_rhs, F): filter_rhs = OuterRef(filter_rhs.name) query.add_filter(filter_lhs, filter_rhs) query.clear_ordering(force=True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) col = query.select[0] select_field = col.target alias = col.alias if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup("exact") # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases[alias] = True lookup_class = select_field.get_lookup("exact") lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix)) query.where.add(lookup, AND) condition, needed_inner = self.build_filter(Exists(query)) if contains_louter: or_null_condition, _ = self.build_filter( ("%s__isnull" % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse, ) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) for query in self.combined_queries: query.set_empty() def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None @property def is_sliced(self): return self.low_mark != 0 or self.high_mark is not None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.is_sliced def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col, name): self.select += (col,) self.values_select += (name,) def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins( name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m ) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise elif name in self.annotations: raise FieldError( "Cannot select the '%s' alias. Use annotate() to promote " "it." % name ) else: names = sorted( [ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations, ] ) raise FieldError( "Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names)) ) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if isinstance(item, str): if item == "?": continue if item.startswith("-"): item = item[1:] if item in self.annotations: continue if self.extra and item in self.extra: continue # names_to_path() validates the lookup. A descriptive # FieldError will be raise if it's not. self.names_to_path(item.split(LOOKUP_SEP), self.model._meta) elif not hasattr(item, "resolve_expression"): errors.append(item) if getattr(item, "contains_aggregate", False): raise FieldError( "Using an aggregate in order_by() without also including " "it in annotate() is not allowed: %s" % item ) if errors: raise FieldError("Invalid order_by arguments: %s" % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force=False, clear_default=True): """ Remove any ordering settings if the current query allows it without side effects, set 'force' to True to clear the ordering regardless. If 'clear_default' is True, there will be no ordering in the resulting query (not even the model's default). """ if not force and ( self.is_sliced or self.distinct_fields or self.select_for_update ): return self.order_by = () self.extra_order_by = () if clear_default: self.default_ordering = False def set_group_by(self, allow_aliases=True): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ # Column names from JOINs to check collisions with aliases. if allow_aliases: column_names = set() seen_models = set() for join in list(self.alias_map.values())[1:]: # Skip base table. model = join.join_field.related_model if model not in seen_models: column_names.update( {field.column for field in model._meta.local_concrete_fields} ) seen_models.add(model) group_by = list(self.select) if self.annotation_select: for alias, annotation in self.annotation_select.items(): if not allow_aliases or alias in column_names: alias = None group_by_cols = annotation.get_group_by_cols(alias=alias) group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): self.check_alias(name) entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != "%": entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. if new_existing := existing.difference(field_names): self.deferred_loading = new_existing, False else: self.clear_deferred_loading() if new_only := set(field_names).difference(existing): self.deferred_loading = new_only, True def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if "pk" in field_names: field_names.remove("pk") field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() self.has_select_fields = True if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) selected = frozenset(field_names + extra_names + annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] selected = frozenset(field_names) # Selected annotations must be known before setting the GROUP BY # clause. if self.group_by is True: self.add_fields( (f.attname for f in self.model._meta.concrete_fields), False ) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. self.set_group_by(allow_aliases=False) self.clear_select_fields() elif self.group_by: # Resolve GROUP BY annotation references if they are not part of # the selected fields anymore. group_by = [] for expr in self.group_by: if isinstance(expr, Ref) and expr.refs not in selected: expr = self.annotations[expr.refs] group_by.append(expr) self.group_by = tuple(group_by) self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append(join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( None, lookup_tables[trimmed_paths + 1] ) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a join_class instead of a # base_table_class reference. But the first entry in the query's FROM # clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = self.base_table_class( self.alias_map[table].table_name, table, ) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return field.null or ( field.empty_strings_allowed and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls ) def get_order_dir(field, default="ASC"): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == "-": return field[1:], dirn[1] return field, dirn[0] class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def __repr__(self): return ( f"{self.__class__.__qualname__}(connector={self.connector!r}, " f"num_children={self.num_children!r}, negated={self.negated!r})" ) def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == OR and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == AND or ( self.effective_connector == OR and votes == self.num_children ): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
230b93a96be39dabb1404955e79ca2fc55af74f199b923b928d07181617b1746
import collections import json import re from functools import partial from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError from django.db import DatabaseError, NotSupportedError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value from django.db.models.functions import Cast, Random from django.db.models.lookups import Lookup from django.db.models.query_utils import select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.models.sql.where import AND from django.db.transaction import TransactionManagementError from django.utils.functional import cached_property from django.utils.hashable import make_hashable from django.utils.regex_helper import _lazy_re_compile class SQLCompiler: # Multiline ordering SQL clause may appear from RawSQL. ordering_parts = _lazy_re_compile( r"^(.*)\s(?:ASC|DESC).*", re.MULTILINE | re.DOTALL, ) def __init__(self, query, connection, using, elide_empty=True): self.query = query self.connection = connection self.using = using # Some queries, e.g. coalesced aggregation, need to be executed even if # they would return an empty result set. self.elide_empty = elide_empty self.quote_cache = {"*": "*"} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None self._meta_ordering = None def __repr__(self): return ( f"<{self.__class__.__qualname__} " f"model={self.query.model.__qualname__} " f"connection={self.connection!r} using={self.using!r}>" ) def setup_query(self, with_col_aliases=False): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select( with_col_aliases=with_col_aliases, ) self.col_count = len(self.select) def pre_sql_setup(self, with_col_aliases=False): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query(with_col_aliases=with_col_aliases) order_by = self.get_order_by() self.where, self.having, self.qualify = self.query.where.split_having_qualify( must_group_by=self.query.group_by is not None ) extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk') # .annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, "as_sql"): expressions.append(self.query.resolve_ref(expr)) else: expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)} for expr, _, _ in select: # Skip members of the select clause that are already included # by reference. if expr in ref_sources: continue cols = expr.get_group_by_cols() for col in cols: expressions.append(col) if not self._meta_ordering: for expr, (sql, params, is_ref) in order_by: # Skip references to the SELECT clause, as all expressions in # the SELECT clause are already part of the GROUP BY. if not is_ref: expressions.extend(expr.get_group_by_cols()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: try: sql, params = self.compile(expr) except EmptyResultSet: continue sql, params = expr.select_format(self, sql, params) params_hash = make_hashable(params) if (sql, params_hash) not in seen: result.append((sql, params)) seen.add((sql, params_hash)) return result def collapse_group_by(self, expressions, having): # If the DB can group by primary key, then group by the primary key of # query's main model. Note that for PostgreSQL the GROUP BY clause must # include the primary key of every table, but for MySQL it is enough to # have the main table's primary key. if self.connection.features.allows_group_by_pk: # Determine if the main model's primary key is in the query. pk = None for expr in expressions: # Is this a reference to query's base table primary key? If the # expression isn't a Col-like, then skip the expression. if ( getattr(expr, "target", None) == self.query.model._meta.pk and getattr(expr, "alias", None) == self.query.base_table ): pk = expr break # If the main model's primary key is in the query, group by that # field, HAVING expressions, and expressions associated with tables # that don't have a primary key included in the grouped columns. if pk: pk_aliases = { expr.alias for expr in expressions if hasattr(expr, "target") and expr.target.primary_key } expressions = [pk] + [ expr for expr in expressions if expr in having or ( getattr(expr, "alias", None) is not None and expr.alias not in pk_aliases ) ] elif self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if ( hasattr(expr, "target") and expr.target.primary_key and self.connection.features.allows_group_by_selected_pks_on_model( expr.target.model ) ) } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or getattr(expr, "alias", None) not in aliases ] return expressions def get_select(self, with_col_aliases=False): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) select_mask = self.query.get_select_mask() if self.query.default_cols: cols = self.get_default_columns(select_mask) else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { "model": self.query.model, "select_fields": select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select, select_mask) klass_info["related_klass_infos"] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info["related_klass_infos"]: if ki["from_parent"]: ki["select_fields"] = ( klass_info["select_fields"] + ki["select_fields"] ) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] col_idx = 1 for col, alias in select: try: sql, params = self.compile(col) except EmptyResultSet: empty_result_set_value = getattr( col, "empty_result_set_value", NotImplemented ) if empty_result_set_value is NotImplemented: # Select a predicate that's always False. sql, params = "0", () else: sql, params = self.compile(Value(empty_result_set_value)) else: sql, params = col.select_format(self, sql, params) if alias is None and with_col_aliases: alias = f"col{col_idx}" col_idx += 1 ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def _order_by_pairs(self): if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif (meta := self.query.get_meta()) and meta.ordering: ordering = meta.ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: default_order, _ = ORDER_DIR["ASC"] else: default_order, _ = ORDER_DIR["DESC"] for field in ordering: if hasattr(field, "resolve_expression"): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() yield field, False continue if field == "?": # random yield OrderBy(Random()), False continue col, order = get_order_dir(field, default_order) descending = order == "DESC" if col in self.query.annotation_select: # Reference to expression in SELECT clause yield ( OrderBy( Ref(col, self.query.annotation_select[col]), descending=descending, ), True, ) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT # clause. if self.query.combinator and self.select: # Don't use the resolved annotation because other # combinated queries might define it differently. expr = F(col) else: expr = self.query.annotations[col] if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) yield OrderBy(expr, descending=descending), False continue if "." in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split(".", 1) yield ( OrderBy( RawSQL( "%s.%s" % (self.quote_name_unless_alias(table), col), [] ), descending=descending, ), False, ) continue if self.query.extra and col in self.query.extra: if col in self.query.extra_select: yield ( OrderBy( Ref(col, RawSQL(*self.query.extra[col])), descending=descending, ), True, ) else: yield ( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False, ) else: if self.query.combinator and self.select: # Don't use the first model's field because other # combinated queries might define it differently. yield OrderBy(F(col), descending=descending), False else: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. yield from self.find_ordering_name( field, self.query.get_meta(), default_order=default_order, ) def get_order_by(self): """ Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ result = [] seen = set() for expr, is_ref in self._order_by_pairs(): resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if self.query.combinator and self.select: src = resolved.get_source_expressions()[0] expr_src = expr.get_source_expressions()[0] # Relabel order by columns to raw numbers if this is a combined # query; necessary since the columns can't be referenced by the # fully qualified name and the simple column names may collide. for idx, (sel_expr, _, col_alias) in enumerate(self.select): if is_ref and col_alias == src.refs: src = src.source elif col_alias and not ( isinstance(expr_src, F) and col_alias == expr_src.name ): continue if src == sel_expr: resolved.set_source_expressions([RawSQL("%d" % (idx + 1), ())]) break else: if col_alias: raise DatabaseError( "ORDER BY term does not match any column in the result set." ) # Add column used in ORDER BY clause to the selected # columns and to each combined query. order_by_idx = len(self.query.select) + 1 col_name = f"__orderbycol{order_by_idx}" for q in self.query.combined_queries: q.add_annotation(expr_src, col_name) self.query.add_select_col(resolved, col_name) resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())]) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql)[1] params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql)[1] if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ( (name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( self.query.external_aliases.get(name) and name not in self.query.table_map ) ): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node): vendor_impl = getattr(node, "as_" + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection, self.elide_empty) for query in self.query.combined_queries if not query.is_empty() ] if not features.supports_slicing_ordering_in_compound: for query, compiler in zip(self.query.combined_queries, compilers): if query.low_mark or query.high_mark: raise DatabaseError( "LIMIT/OFFSET not allowed in subqueries of compound statements." ) if compiler.get_order_by(): raise DatabaseError( "ORDER BY not allowed in subqueries of compound statements." ) parts = () for compiler in compilers: try: # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. if not compiler.query.values_select and self.query.values_select: compiler.query = compiler.query.clone() compiler.query.set_values( ( *self.query.extra_select, *self.query.values_select, *self.query.annotation_select, ) ) part_sql, part_args = compiler.as_sql() if compiler.query.combinator: # Wrap in a subquery if wrapping in parentheses isn't # supported. if not features.supports_parentheses_in_compound: part_sql = "SELECT * FROM ({})".format(part_sql) # Add parentheses when combining with compound query if not # already added for all compound queries. elif ( self.query.subquery or not features.supports_slicing_ordering_in_compound ): part_sql = "({})".format(part_sql) elif ( self.query.subquery and features.supports_slicing_ordering_in_compound ): part_sql = "({})".format(part_sql) parts += ((part_sql, part_args),) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == "union" or (combinator == "difference" and parts): continue raise if not parts: raise EmptyResultSet combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == "union": combinator_sql += " ALL" braces = "{}" if not self.query.subquery and features.supports_slicing_ordering_in_compound: braces = "({})" sql_parts, args_parts = zip( *((braces.format(sql), args) for sql, args in parts) ) result = [" {} ".format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def get_qualify_sql(self): where_parts = [] if self.where: where_parts.append(self.where) if self.having: where_parts.append(self.having) inner_query = self.query.clone() inner_query.subquery = True inner_query.where = inner_query.where.__class__(where_parts) # Augment the inner query with any window function references that # might have been masked via values() and alias(). If any masked # aliases are added they'll be masked again to avoid fetching # the data in the `if qual_aliases` branch below. select = { expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0] } select_aliases = set(select.values()) qual_aliases = set() replacements = {} def collect_replacements(expressions): while expressions: expr = expressions.pop() if expr in replacements: continue elif select_alias := select.get(expr): replacements[expr] = select_alias elif isinstance(expr, Lookup): expressions.extend(expr.get_source_expressions()) elif isinstance(expr, Ref): if expr.refs not in select_aliases: expressions.extend(expr.get_source_expressions()) else: num_qual_alias = len(qual_aliases) select_alias = f"qual{num_qual_alias}" qual_aliases.add(select_alias) inner_query.add_annotation(expr, select_alias) replacements[expr] = select_alias collect_replacements(list(self.qualify.leaves())) self.qualify = self.qualify.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) order_by = [] for order_by_expr, *_ in self.get_order_by(): collect_replacements(order_by_expr.get_source_expressions()) order_by.append( order_by_expr.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) ) inner_query_compiler = inner_query.get_compiler( self.using, elide_empty=self.elide_empty ) inner_sql, inner_params = inner_query_compiler.as_sql( # The limits must be applied to the outer query to avoid pruning # results too eagerly. with_limits=False, # Force unique aliasing of selected columns to avoid collisions # and make rhs predicates referencing easier. with_col_aliases=True, ) qualify_sql, qualify_params = self.compile(self.qualify) result = [ "SELECT * FROM (", inner_sql, ")", self.connection.ops.quote_name("qualify"), "WHERE", qualify_sql, ] if qual_aliases: # If some select aliases were unmasked for filtering purposes they # must be masked back. cols = [self.connection.ops.quote_name(alias) for alias in select.values()] result = [ "SELECT", ", ".join(cols), "FROM (", *result, ")", self.connection.ops.quote_name("qualify_mask"), ] params = list(inner_params) + qualify_params # As the SQL spec is unclear on whether or not derived tables # ordering must propagate it has to be explicitly repeated on the # outer-most query to ensure it's preserved. if order_by: ordering_sqls = [] for ordering in order_by: ordering_sql, ordering_params = self.compile(ordering) ordering_sqls.append(ordering_sql) params.extend(ordering_params) result.extend(["ORDER BY", ", ".join(ordering_sqls)]) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup( with_col_aliases=with_col_aliases, ) for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and ( self.query.high_mark is not None or self.query.low_mark ) combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, "supports_select_{}".format(combinator)): raise NotSupportedError( "{} is not supported on this database backend.".format( combinator ) ) result, params = self.get_combinator_sql( combinator, self.query.combinator_all ) elif self.qualify: result, params = self.get_qualify_sql() order_by = None else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() try: where, w_params = ( self.compile(self.where) if self.where is not None else ("", []) ) except EmptyResultSet: if self.elide_empty: raise # Use a predicate that's always False. where, w_params = "0 = 1", [] having, h_params = ( self.compile(self.having) if self.having is not None else ("", []) ) result = ["SELECT"] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = "%s AS %s" % ( s_sql, self.connection.ops.quote_name(alias), ) params.extend(s_params) out_cols.append(s_sql) result += [", ".join(out_cols)] if from_: result += ["FROM", *from_] elif self.connection.features.bare_select_suffix: result += [self.connection.features.bare_select_suffix] params.extend(f_params) if self.query.select_for_update and features.has_select_for_update: if ( self.connection.get_autocommit() # Don't raise an exception when database doesn't # support transactions, as it's a noop. and features.supports_transactions ): raise TransactionManagementError( "select_for_update cannot be used outside of a transaction." ) if ( with_limit_offset and not features.supports_select_for_update_with_limit ): raise NotSupportedError( "LIMIT/OFFSET is not supported with " "select_for_update on this database backend." ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of no_key = self.query.select_for_no_key_update # If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the # backend doesn't support it, raise NotSupportedError to # prevent a possible deadlock. if nowait and not features.has_select_for_update_nowait: raise NotSupportedError( "NOWAIT is not supported on this database backend." ) elif skip_locked and not features.has_select_for_update_skip_locked: raise NotSupportedError( "SKIP LOCKED is not supported on this database backend." ) elif of and not features.has_select_for_update_of: raise NotSupportedError( "FOR UPDATE OF is not supported on this database backend." ) elif no_key and not features.has_select_for_no_key_update: raise NotSupportedError( "FOR NO KEY UPDATE is not supported on this " "database backend." ) for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), no_key=no_key, ) if for_update_part and features.for_update_after_from: result.append(for_update_part) if where: result.append("WHERE %s" % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError( "annotate() + distinct(fields) is not implemented." ) order_by = order_by or self.connection.ops.force_no_ordering() result.append("GROUP BY %s" % ", ".join(grouping)) if self._meta_ordering: order_by = None if having: result.append("HAVING %s" % having) params.extend(h_params) if self.query.explain_info: result.insert( 0, self.connection.ops.explain_query_prefix( self.query.explain_info.format, **self.query.explain_info.options, ), ) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) result.append("ORDER BY %s" % ", ".join(ordering)) if with_limit_offset: result.append( self.connection.ops.limit_offset_sql( self.query.low_mark, self.query.high_mark ) ) if for_update_part and not features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering expressions # to be selected as well. Wrap the query in another subquery # to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if alias: sub_selects.append( "%s.%s" % ( self.connection.ops.quote_name("subquery"), self.connection.ops.quote_name(alias), ) ) else: select_clone = select.relabeled_clone( {select.alias: "subquery"} ) subselect, subparams = select_clone.as_sql( self, self.connection ) sub_selects.append(subselect) sub_params.extend(subparams) return "SELECT %s FROM (%s) subquery" % ( ", ".join(sub_selects), " ".join(result), ), tuple(sub_params + params) return " ".join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns( self, select_mask, start_alias=None, opts=None, from_parent=None ): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: if (opts := self.query.get_meta()) is None: return result start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if ( from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model ) ): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if select_mask and field not in select_mask: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins( parts, opts, None ) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(self.connection.ops.quote_name(name)) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name( self, name, opts, alias=None, default_order="ASC", already_seen=None ): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == "DESC" pieces = name.split(LOOKUP_SEP) ( field, targets, alias, joins, path, opts, transform_function, ) = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless it is the pk # shortcut or the attribute name of the field that is specified or # there are transforms to process. if ( field.is_relation and opts.ordering and getattr(field, "attname", None) != pieces[-1] and name != "pk" and not getattr(transform_function, "has_transforms", False) ): # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple( getattr(self.query.alias_map[j], "join_cols", None) for j in joins ) if join_tuple in already_seen: raise FieldError("Infinite loop caused by ordering.") already_seen.add(join_tuple) results = [] for item in opts.ordering: if hasattr(item, "resolve_expression") and not isinstance( item, OrderBy ): item = item.desc() if descending else item.asc() if isinstance(item, OrderBy): results.append( (item.prefix_references(f"{name}{LOOKUP_SEP}"), False) ) continue results.extend( (expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref) for expr, is_ref in self.find_ordering_name( item, opts, alias, order, already_seen ) ) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [ (OrderBy(transform_function(t, alias), descending=descending), False) for t in targets ] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins( pieces, opts, alias ) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in tuple(self.query.alias_map): if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if ( alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1 ): result.append(", %s" % self.quote_name_unless_alias(alias)) return result, params def get_related_selections( self, select, select_mask, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None, ): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain( direct_choices, reverse_choices, self.query._filtered_relations ) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info["related_klass_infos"] = related_klass_infos for f in opts.fields: fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or "(none)", ) ) else: next = False if not select_related_descend(f, restricted, requested, select_mask): continue related_select_mask = select_mask.get(f) or {} klass_info = { "model": f.remote_field.model, "field": f, "reverse": False, "local_setter": f.set_cached_value, "remote_setter": f.remote_field.set_cached_value if f.unique else lambda x, y: None, "from_parent": False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=f.remote_field.model._meta ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_klass_infos = self.get_related_selections( select, related_select_mask, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: related_select_mask = select_mask.get(f) or {} if not select_related_descend( f, restricted, requested, related_select_mask, reverse=True ): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins( [related_field_name], opts, root_alias ) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { "model": model, "field": f, "reverse": True, "local_setter": f.remote_field.set_cached_value, "remote_setter": f.set_cached_value, "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, related_select_mask, model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) def local_setter(obj, from_obj): # Set a reverse fk object when relation is non-empty. if from_obj: f.remote_field.set_cached_value(from_obj, obj) def remote_setter(name, obj, from_obj): setattr(from_obj, name, obj) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) f, _, join_opts, joins, _, _ = self.query.setup_joins( [name], opts, root_alias ) model = join_opts.model alias = joins[-1] from_parent = ( issubclass(model, opts.model) and model is not opts.model ) klass_info = { "model": model, "field": f, "reverse": True, "local_setter": local_setter, "remote_setter": partial(remote_setter, name), "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] field_select_mask = select_mask.get((name, f)) or {} columns = self.get_default_columns( field_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, field_select_mask, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( "Invalid field name(s) given in select_related: %s. " "Choices are: %s" % ( ", ".join(invalid_fields), ", ".join(_get_field_choices()) or "(none)", ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_parent_klass_info(klass_info): concrete_model = klass_info["model"]._meta.concrete_model for parent_model, parent_link in concrete_model._meta.parents.items(): parent_list = parent_model._meta.get_parent_list() yield { "model": parent_model, "field": parent_link, "reverse": False, "select_fields": [ select_index for select_index in klass_info["select_fields"] # Selected columns from a model or its parents. if ( self.select[select_index][0].target.model == parent_model or self.select[select_index][0].target.model in parent_list ) ], } def _get_first_selected_col_from_model(klass_info): """ Find the first selected column from a model. If it doesn't exist, don't lock a model. select_fields is filled recursively, so it also contains fields from the parent models. """ concrete_model = klass_info["model"]._meta.concrete_model for select_index in klass_info["select_fields"]: if self.select[select_index][0].target.model == concrete_model: return self.select[select_index][0] def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield "self" else: field = klass_info["field"] if klass_info["reverse"]: field = field.remote_field path = parent_path + [field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in _get_parent_klass_info(klass_info) ) queue.extend( (path, klass_info) for klass_info in klass_info.get("related_klass_infos", []) ) if not self.klass_info: return [] result = [] invalid_names = [] for name in self.query.select_for_update_of: klass_info = self.klass_info if name == "self": col = _get_first_selected_col_from_model(klass_info) else: for part in name.split(LOOKUP_SEP): klass_infos = ( *klass_info.get("related_klass_infos", []), *_get_parent_klass_info(klass_info), ) for related_klass_info in klass_infos: field = related_klass_info["field"] if related_klass_info["reverse"]: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue col = _get_first_selected_col_from_model(klass_info) if col is not None: if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( "Invalid field name(s) given in select_for_update(of=(...)): %s. " "Only relational fields followed in the query are allowed. " "Choices are: %s." % ( ", ".join(invalid_names), ", ".join(_get_field_choices()), ) ) return result def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def results_iter( self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE, ): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql( MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size ) fields = [s[0] for s in self.select[0 : self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ return bool(self.execute_sql(SINGLE)) def execute_sql( self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE ): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0 : self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch or not self.connection.features.can_use_chunked_reads: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested, # unless the database doesn't support it. return list(result) return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = "%s.%s" % (qn(alias), qn2(columns[index])) self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND) sql, params = self.as_sql() return "EXISTS (%s)" % sql, params def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. format_ = self.query.explain_info.format output_formatter = json.dumps if format_ and format_.lower() == "json" else str for row in result[0]: if not isinstance(row, str): yield " ".join(output_formatter(c) for c in row) else: yield row class SQLInsertCompiler(SQLCompiler): returning_fields = None returning_params = () def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, "as_sql"): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, "get_placeholder"): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = "%s", [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, "resolve_expression"): value = value.resolve_expression( self.query, allow_joins=False, for_save=True ) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' "can only be used to update, not to insert." % (value, field) ) if value.contains_aggregate: raise FieldError( "Aggregate functions are not allowed in this query " "(%s=%r)." % (field.name, value) ) if value.contains_over_clause: raise FieldError( "Window expressions are not allowed in this query (%s=%r)." % (field.name, value) ) else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() insert_statement = self.connection.ops.insert_statement( on_conflict=self.query.on_conflict, ) result = ["%s %s" % (insert_statement, qn(opts.db_table))] fields = self.query.fields or [opts.pk] result.append("(%s)" % ", ".join(qn(f.column) for f in fields)) if self.query.fields: value_rows = [ [ self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields ] for obj in self.query.objs ] else: # An empty object. value_rows = [ [self.connection.ops.pk_default_value()] for _ in self.query.objs ] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = ( not self.returning_fields and self.connection.features.has_bulk_insert ) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql( fields, self.query.on_conflict, self.query.update_fields, self.query.unique_fields, ) if ( self.returning_fields and self.connection.features.can_return_columns_from_insert ): if self.connection.features.can_return_rows_from_bulk_insert: result.append( self.connection.ops.bulk_insert_sql(fields, placeholder_rows) ) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) # Skip empty r_sql to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. r_sql, self.returning_params = self.connection.ops.return_insert_columns( self.returning_fields ) if r_sql: result.append(r_sql) params += [self.returning_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, returning_fields=None): assert not ( returning_fields and len(self.query.objs) != 1 and not self.connection.features.can_return_rows_from_bulk_insert ) opts = self.query.get_meta() self.returning_fields = returning_fields with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not self.returning_fields: return [] if ( self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1 ): rows = self.connection.ops.fetch_returned_insert_rows(cursor) elif self.connection.features.can_return_columns_from_insert: assert len(self.query.objs) == 1 rows = [ self.connection.ops.fetch_returned_insert_columns( cursor, self.returning_params, ) ] else: rows = [ ( self.connection.ops.last_insert_id( cursor, opts.db_table, opts.pk.column, ), ) ] cols = [field.get_col(opts.db_table) for field in self.returning_fields] converters = self.get_converters(cols) if converters: rows = list(self.apply_converters(rows, converters)) return rows class SQLDeleteCompiler(SQLCompiler): @cached_property def single_alias(self): # Ensure base table is in aliases. self.query.get_initial_alias() return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1 @classmethod def _expr_refs_base_model(cls, expr, base_model): if isinstance(expr, Query): return expr.model == base_model if not hasattr(expr, "get_source_expressions"): return False return any( cls._expr_refs_base_model(source_expr, base_model) for source_expr in expr.get_source_expressions() ) @cached_property def contains_self_reference_subquery(self): return any( self._expr_refs_base_model(expr, self.query.model) for expr in chain( self.query.annotations.values(), self.query.where.children ) ) def _as_sql(self, query): result = ["DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)] where, params = self.compile(query.where) if where: result.append("WHERE %s" % where) return " ".join(result), tuple(params) def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ if self.single_alias and not self.contains_self_reference_subquery: return self._as_sql(self.query) innerq = self.query.clone() innerq.__class__ = Query innerq.clear_select_clause() pk = self.query.model._meta.pk innerq.select = [pk.get_col(self.query.get_initial_alias())] outerq = Query(self.query.model) if not self.connection.features.update_can_self_select: # Force the materialization of the inner query to allow reference # to the target table on MySQL. sql, params = innerq.get_compiler(connection=self.connection).as_sql() innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params) outerq.add_filter("pk__in", innerq) return self._as_sql(outerq) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return "", () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, "resolve_expression"): val = val.resolve_expression( self.query, allow_joins=False, for_save=True ) if val.contains_aggregate: raise FieldError( "Aggregate functions are not allowed in this query " "(%s=%r)." % (field.name, val) ) if val.contains_over_clause: raise FieldError( "Window expressions are not allowed in this query " "(%s=%r)." % (field.name, val) ) elif hasattr(val, "prepare_database_save"): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, "get_placeholder"): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = "%s" name = field.column if hasattr(val, "as_sql"): sql, params = self.compile(val) values.append("%s = %s" % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append("%s = %s" % (qn(name), placeholder)) update_params.append(val) else: values.append("%s = NULL" % qn(name)) table = self.query.base_table result = [ "UPDATE %s SET" % qn(table), ", ".join(values), ] where, params = self.compile(self.query.where) if where: result.append("WHERE %s" % where) return " ".join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(force=True) query.extra = {} query.select = [] meta = query.get_meta() fields = [meta.pk.name] related_ids_index = [] for related in self.query.related_updates: if all( path.join_field.primary_key for path in meta.get_path_to_parent(related) ): # If a primary key chain exists to the targeted related update, # then the meta.pk value can be used for it. related_ids_index.append((related, 0)) else: # This branch will only be reached when updating a field of an # ancestor that is not part of the primary key chain of a MTI # tree. related_ids_index.append((related, len(fields))) fields.append(related._meta.pk.name) query.add_fields(fields) super().pre_sql_setup() must_pre_select = ( count > 1 and not self.connection.features.update_can_self_select ) # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.clear_where() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] related_ids = collections.defaultdict(list) for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) for parent, index in related_ids_index: related_ids[parent].extend(r[index] for r in rows) self.query.add_filter("pk__in", idents) self.query.related_ids = related_ids else: # The fast path. Filters and updates in one query. self.query.add_filter("pk__in", query) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation) ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ", ".join(sql) params = tuple(params) inner_query_sql, inner_query_params = self.query.inner_query.get_compiler( self.using, elide_empty=self.elide_empty, ).as_sql(with_col_aliases=True) sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql) params = params + inner_query_params return sql, params def cursor_iter(cursor, sentinel, col_count, itersize): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): yield rows if col_count is None else [r[:col_count] for r in rows] finally: cursor.close()
424ef410cad8daf704a87dfc697bfad5d3af436edde679493fa1f9bd0c905191
import asyncio import collections import logging import os import re import sys import time import warnings from contextlib import contextmanager from functools import wraps from io import StringIO from itertools import chain from types import SimpleNamespace from unittest import TestCase, skipIf, skipUnless from xml.dom.minidom import Node, parseString from django.apps import apps from django.apps.registry import Apps from django.conf import UserSettingsHolder, settings from django.core import mail from django.core.exceptions import ImproperlyConfigured from django.core.signals import request_started, setting_changed from django.db import DEFAULT_DB_ALIAS, connections, reset_queries from django.db.models.options import Options from django.template import Template from django.test.signals import template_rendered from django.urls import get_script_prefix, set_script_prefix from django.utils.deprecation import RemovedInDjango50Warning from django.utils.translation import deactivate try: import jinja2 except ImportError: jinja2 = None __all__ = ( "Approximate", "ContextList", "isolate_lru_cache", "get_runner", "CaptureQueriesContext", "ignore_warnings", "isolate_apps", "modify_settings", "override_settings", "override_system_checks", "tag", "requires_tz_support", "setup_databases", "setup_test_environment", "teardown_test_environment", ) TZ_SUPPORT = hasattr(time, "tzset") class Approximate: def __init__(self, val, places=7): self.val = val self.places = places def __repr__(self): return repr(self.val) def __eq__(self, other): return self.val == other or round(abs(self.val - other), self.places) == 0 class ContextList(list): """ A wrapper that provides direct key access to context items contained in a list of context objects. """ def __getitem__(self, key): if isinstance(key, str): for subcontext in self: if key in subcontext: return subcontext[key] raise KeyError(key) else: return super().__getitem__(key) def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default def __contains__(self, key): try: self[key] except KeyError: return False return True def keys(self): """ Flattened keys of subcontexts. """ return set(chain.from_iterable(d for subcontext in self for d in subcontext)) def instrumented_test_render(self, context): """ An instrumented Template render method, providing a signal that can be intercepted by the test Client. """ template_rendered.send(sender=self, template=self, context=context) return self.nodelist.render(context) class _TestState: pass def setup_test_environment(debug=None): """ Perform global pre-test setup, such as installing the instrumented template renderer and setting the email backend to the locmem email backend. """ if hasattr(_TestState, "saved_data"): # Executing this function twice would overwrite the saved values. raise RuntimeError( "setup_test_environment() was already called and can't be called " "again without first calling teardown_test_environment()." ) if debug is None: debug = settings.DEBUG saved_data = SimpleNamespace() _TestState.saved_data = saved_data saved_data.allowed_hosts = settings.ALLOWED_HOSTS # Add the default host of the test client. settings.ALLOWED_HOSTS = [*settings.ALLOWED_HOSTS, "testserver"] saved_data.debug = settings.DEBUG settings.DEBUG = debug saved_data.email_backend = settings.EMAIL_BACKEND settings.EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend" saved_data.template_render = Template._render Template._render = instrumented_test_render mail.outbox = [] deactivate() def teardown_test_environment(): """ Perform any global post-test teardown, such as restoring the original template renderer and restoring the email sending functions. """ saved_data = _TestState.saved_data settings.ALLOWED_HOSTS = saved_data.allowed_hosts settings.DEBUG = saved_data.debug settings.EMAIL_BACKEND = saved_data.email_backend Template._render = saved_data.template_render del _TestState.saved_data del mail.outbox def setup_databases( verbosity, interactive, *, time_keeper=None, keepdb=False, debug_sql=False, parallel=0, aliases=None, serialized_aliases=None, **kwargs, ): """Create the test databases.""" if time_keeper is None: time_keeper = NullTimeKeeper() test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases) old_names = [] for db_name, aliases in test_databases.values(): first_alias = None for alias in aliases: connection = connections[alias] old_names.append((connection, db_name, first_alias is None)) # Actually create the database for the first connection if first_alias is None: first_alias = alias with time_keeper.timed(" Creating '%s'" % alias): # RemovedInDjango50Warning: when the deprecation ends, # replace with: # serialize_alias = ( # serialized_aliases is None # or alias in serialized_aliases # ) try: serialize_alias = connection.settings_dict["TEST"]["SERIALIZE"] except KeyError: serialize_alias = ( serialized_aliases is None or alias in serialized_aliases ) else: warnings.warn( "The SERIALIZE test database setting is " "deprecated as it can be inferred from the " "TestCase/TransactionTestCase.databases that " "enable the serialized_rollback feature.", category=RemovedInDjango50Warning, ) connection.creation.create_test_db( verbosity=verbosity, autoclobber=not interactive, keepdb=keepdb, serialize=serialize_alias, ) if parallel > 1: for index in range(parallel): with time_keeper.timed(" Cloning '%s'" % alias): connection.creation.clone_test_db( suffix=str(index + 1), verbosity=verbosity, keepdb=keepdb, ) # Configure all other connections as mirrors of the first one else: connections[alias].creation.set_as_test_mirror( connections[first_alias].settings_dict ) # Configure the test mirrors. for alias, mirror_alias in mirrored_aliases.items(): connections[alias].creation.set_as_test_mirror( connections[mirror_alias].settings_dict ) if debug_sql: for alias in connections: connections[alias].force_debug_cursor = True return old_names def iter_test_cases(tests): """ Return an iterator over a test suite's unittest.TestCase objects. The tests argument can also be an iterable of TestCase objects. """ for test in tests: if isinstance(test, str): # Prevent an unfriendly RecursionError that can happen with # strings. raise TypeError( f"Test {test!r} must be a test case or test suite not string " f"(was found in {tests!r})." ) if isinstance(test, TestCase): yield test else: # Otherwise, assume it is a test suite. yield from iter_test_cases(test) def dependency_ordered(test_databases, dependencies): """ Reorder test_databases into an order that honors the dependencies described in TEST[DEPENDENCIES]. """ ordered_test_databases = [] resolved_databases = set() # Maps db signature to dependencies of all its aliases dependencies_map = {} # Check that no database depends on its own alias for sig, (_, aliases) in test_databases: all_deps = set() for alias in aliases: all_deps.update(dependencies.get(alias, [])) if not all_deps.isdisjoint(aliases): raise ImproperlyConfigured( "Circular dependency: databases %r depend on each other, " "but are aliases." % aliases ) dependencies_map[sig] = all_deps while test_databases: changed = False deferred = [] # Try to find a DB that has all its dependencies met for signature, (db_name, aliases) in test_databases: if dependencies_map[signature].issubset(resolved_databases): resolved_databases.update(aliases) ordered_test_databases.append((signature, (db_name, aliases))) changed = True else: deferred.append((signature, (db_name, aliases))) if not changed: raise ImproperlyConfigured("Circular dependency in TEST[DEPENDENCIES]") test_databases = deferred return ordered_test_databases def get_unique_databases_and_mirrors(aliases=None): """ Figure out which databases actually need to be created. Deduplicate entries in DATABASES that correspond the same database or are configured as test mirrors. Return two values: - test_databases: ordered mapping of signatures to (name, list of aliases) where all aliases share the same underlying database. - mirrored_aliases: mapping of mirror aliases to original aliases. """ if aliases is None: aliases = connections mirrored_aliases = {} test_databases = {} dependencies = {} default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature() for alias in connections: connection = connections[alias] test_settings = connection.settings_dict["TEST"] if test_settings["MIRROR"]: # If the database is marked as a test mirror, save the alias. mirrored_aliases[alias] = test_settings["MIRROR"] elif alias in aliases: # Store a tuple with DB parameters that uniquely identify it. # If we have two aliases with the same values for that tuple, # we only need to create the test database once. item = test_databases.setdefault( connection.creation.test_db_signature(), (connection.settings_dict["NAME"], []), ) # The default database must be the first because data migrations # use the default alias by default. if alias == DEFAULT_DB_ALIAS: item[1].insert(0, alias) else: item[1].append(alias) if "DEPENDENCIES" in test_settings: dependencies[alias] = test_settings["DEPENDENCIES"] else: if ( alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig ): dependencies[alias] = test_settings.get( "DEPENDENCIES", [DEFAULT_DB_ALIAS] ) test_databases = dict(dependency_ordered(test_databases.items(), dependencies)) return test_databases, mirrored_aliases def teardown_databases(old_config, verbosity, parallel=0, keepdb=False): """Destroy all the non-mirror databases.""" for connection, old_name, destroy in old_config: if destroy: if parallel > 1: for index in range(parallel): connection.creation.destroy_test_db( suffix=str(index + 1), verbosity=verbosity, keepdb=keepdb, ) connection.creation.destroy_test_db(old_name, verbosity, keepdb) def get_runner(settings, test_runner_class=None): test_runner_class = test_runner_class or settings.TEST_RUNNER test_path = test_runner_class.split(".") # Allow for relative paths if len(test_path) > 1: test_module_name = ".".join(test_path[:-1]) else: test_module_name = "." test_module = __import__(test_module_name, {}, {}, test_path[-1]) return getattr(test_module, test_path[-1]) class TestContextDecorator: """ A base class that can either be used as a context manager during tests or as a test function or unittest.TestCase subclass decorator to perform temporary alterations. `attr_name`: attribute assigned the return value of enable() if used as a class decorator. `kwarg_name`: keyword argument passing the return value of enable() if used as a function decorator. """ def __init__(self, attr_name=None, kwarg_name=None): self.attr_name = attr_name self.kwarg_name = kwarg_name def enable(self): raise NotImplementedError def disable(self): raise NotImplementedError def __enter__(self): return self.enable() def __exit__(self, exc_type, exc_value, traceback): self.disable() def decorate_class(self, cls): if issubclass(cls, TestCase): decorated_setUp = cls.setUp def setUp(inner_self): context = self.enable() inner_self.addCleanup(self.disable) if self.attr_name: setattr(inner_self, self.attr_name, context) decorated_setUp(inner_self) cls.setUp = setUp return cls raise TypeError("Can only decorate subclasses of unittest.TestCase") def decorate_callable(self, func): if asyncio.iscoroutinefunction(func): # If the inner function is an async function, we must execute async # as well so that the `with` statement executes at the right time. @wraps(func) async def inner(*args, **kwargs): with self as context: if self.kwarg_name: kwargs[self.kwarg_name] = context return await func(*args, **kwargs) else: @wraps(func) def inner(*args, **kwargs): with self as context: if self.kwarg_name: kwargs[self.kwarg_name] = context return func(*args, **kwargs) return inner def __call__(self, decorated): if isinstance(decorated, type): return self.decorate_class(decorated) elif callable(decorated): return self.decorate_callable(decorated) raise TypeError("Cannot decorate object of type %s" % type(decorated)) class override_settings(TestContextDecorator): """ Act as either a decorator or a context manager. If it's a decorator, take a function and return a wrapped function. If it's a contextmanager, use it with the ``with`` statement. In either event, entering/exiting are called before and after, respectively, the function/block is executed. """ enable_exception = None def __init__(self, **kwargs): self.options = kwargs super().__init__() def enable(self): # Keep this code at the beginning to leave the settings unchanged # in case it raises an exception because INSTALLED_APPS is invalid. if "INSTALLED_APPS" in self.options: try: apps.set_installed_apps(self.options["INSTALLED_APPS"]) except Exception: apps.unset_installed_apps() raise override = UserSettingsHolder(settings._wrapped) for key, new_value in self.options.items(): setattr(override, key, new_value) self.wrapped = settings._wrapped settings._wrapped = override for key, new_value in self.options.items(): try: setting_changed.send( sender=settings._wrapped.__class__, setting=key, value=new_value, enter=True, ) except Exception as exc: self.enable_exception = exc self.disable() def disable(self): if "INSTALLED_APPS" in self.options: apps.unset_installed_apps() settings._wrapped = self.wrapped del self.wrapped responses = [] for key in self.options: new_value = getattr(settings, key, None) responses_for_setting = setting_changed.send_robust( sender=settings._wrapped.__class__, setting=key, value=new_value, enter=False, ) responses.extend(responses_for_setting) if self.enable_exception is not None: exc = self.enable_exception self.enable_exception = None raise exc for _, response in responses: if isinstance(response, Exception): raise response def save_options(self, test_func): if test_func._overridden_settings is None: test_func._overridden_settings = self.options else: # Duplicate dict to prevent subclasses from altering their parent. test_func._overridden_settings = { **test_func._overridden_settings, **self.options, } def decorate_class(self, cls): from django.test import SimpleTestCase if not issubclass(cls, SimpleTestCase): raise ValueError( "Only subclasses of Django SimpleTestCase can be decorated " "with override_settings" ) self.save_options(cls) return cls class modify_settings(override_settings): """ Like override_settings, but makes it possible to append, prepend, or remove items instead of redefining the entire list. """ def __init__(self, *args, **kwargs): if args: # Hack used when instantiating from SimpleTestCase.setUpClass. assert not kwargs self.operations = args[0] else: assert not args self.operations = list(kwargs.items()) super(override_settings, self).__init__() def save_options(self, test_func): if test_func._modified_settings is None: test_func._modified_settings = self.operations else: # Duplicate list to prevent subclasses from altering their parent. test_func._modified_settings = ( list(test_func._modified_settings) + self.operations ) def enable(self): self.options = {} for name, operations in self.operations: try: # When called from SimpleTestCase.setUpClass, values may be # overridden several times; cumulate changes. value = self.options[name] except KeyError: value = list(getattr(settings, name, [])) for action, items in operations.items(): # items my be a single value or an iterable. if isinstance(items, str): items = [items] if action == "append": value += [item for item in items if item not in value] elif action == "prepend": value = [item for item in items if item not in value] + value elif action == "remove": value = [item for item in value if item not in items] else: raise ValueError("Unsupported action: %s" % action) self.options[name] = value super().enable() class override_system_checks(TestContextDecorator): """ Act as a decorator. Override list of registered system checks. Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app, you also need to exclude its system checks. """ def __init__(self, new_checks, deployment_checks=None): from django.core.checks.registry import registry self.registry = registry self.new_checks = new_checks self.deployment_checks = deployment_checks super().__init__() def enable(self): self.old_checks = self.registry.registered_checks self.registry.registered_checks = set() for check in self.new_checks: self.registry.register(check, *getattr(check, "tags", ())) self.old_deployment_checks = self.registry.deployment_checks if self.deployment_checks is not None: self.registry.deployment_checks = set() for check in self.deployment_checks: self.registry.register(check, *getattr(check, "tags", ()), deploy=True) def disable(self): self.registry.registered_checks = self.old_checks self.registry.deployment_checks = self.old_deployment_checks def compare_xml(want, got): """ Try to do a 'xml-comparison' of want and got. Plain string comparison doesn't always work because, for example, attribute ordering should not be important. Ignore comment nodes, processing instructions, document type node, and leading and trailing whitespaces. Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py """ _norm_whitespace_re = re.compile(r"[ \t\n][ \t\n]+") def norm_whitespace(v): return _norm_whitespace_re.sub(" ", v) def child_text(element): return "".join( c.data for c in element.childNodes if c.nodeType == Node.TEXT_NODE ) def children(element): return [c for c in element.childNodes if c.nodeType == Node.ELEMENT_NODE] def norm_child_text(element): return norm_whitespace(child_text(element)) def attrs_dict(element): return dict(element.attributes.items()) def check_element(want_element, got_element): if want_element.tagName != got_element.tagName: return False if norm_child_text(want_element) != norm_child_text(got_element): return False if attrs_dict(want_element) != attrs_dict(got_element): return False want_children = children(want_element) got_children = children(got_element) if len(want_children) != len(got_children): return False return all( check_element(want, got) for want, got in zip(want_children, got_children) ) def first_node(document): for node in document.childNodes: if node.nodeType not in ( Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE, Node.PROCESSING_INSTRUCTION_NODE, ): return node want = want.strip().replace("\\n", "\n") got = got.strip().replace("\\n", "\n") # If the string is not a complete xml document, we may need to add a # root element. This allow us to compare fragments, like "<foo/><bar/>" if not want.startswith("<?xml"): wrapper = "<root>%s</root>" want = wrapper % want got = wrapper % got # Parse the want and got strings, and compare the parsings. want_root = first_node(parseString(want)) got_root = first_node(parseString(got)) return check_element(want_root, got_root) class CaptureQueriesContext: """ Context manager that captures queries executed by the specified connection. """ def __init__(self, connection): self.connection = connection def __iter__(self): return iter(self.captured_queries) def __getitem__(self, index): return self.captured_queries[index] def __len__(self): return len(self.captured_queries) @property def captured_queries(self): return self.connection.queries[self.initial_queries : self.final_queries] def __enter__(self): self.force_debug_cursor = self.connection.force_debug_cursor self.connection.force_debug_cursor = True # Run any initialization queries if needed so that they won't be # included as part of the count. self.connection.ensure_connection() self.initial_queries = len(self.connection.queries_log) self.final_queries = None request_started.disconnect(reset_queries) return self def __exit__(self, exc_type, exc_value, traceback): self.connection.force_debug_cursor = self.force_debug_cursor request_started.connect(reset_queries) if exc_type is not None: return self.final_queries = len(self.connection.queries_log) class ignore_warnings(TestContextDecorator): def __init__(self, **kwargs): self.ignore_kwargs = kwargs if "message" in self.ignore_kwargs or "module" in self.ignore_kwargs: self.filter_func = warnings.filterwarnings else: self.filter_func = warnings.simplefilter super().__init__() def enable(self): self.catch_warnings = warnings.catch_warnings() self.catch_warnings.__enter__() self.filter_func("ignore", **self.ignore_kwargs) def disable(self): self.catch_warnings.__exit__(*sys.exc_info()) # On OSes that don't provide tzset (Windows), we can't set the timezone # in which the program runs. As a consequence, we must skip tests that # don't enforce a specific timezone (with timezone.override or equivalent), # or attempt to interpret naive datetimes in the default timezone. requires_tz_support = skipUnless( TZ_SUPPORT, "This test relies on the ability to run a program in an arbitrary " "time zone, but your operating system isn't able to do that.", ) @contextmanager def extend_sys_path(*paths): """Context manager to temporarily add paths to sys.path.""" _orig_sys_path = sys.path[:] sys.path.extend(paths) try: yield finally: sys.path = _orig_sys_path @contextmanager def isolate_lru_cache(lru_cache_object): """Clear the cache of an LRU cache object on entering and exiting.""" lru_cache_object.cache_clear() try: yield finally: lru_cache_object.cache_clear() @contextmanager def captured_output(stream_name): """Return a context manager used by captured_stdout/stdin/stderr that temporarily replaces the sys stream *stream_name* with a StringIO. Note: This function and the following ``captured_std*`` are copied from CPython's ``test.support`` module.""" orig_stdout = getattr(sys, stream_name) setattr(sys, stream_name, StringIO()) try: yield getattr(sys, stream_name) finally: setattr(sys, stream_name, orig_stdout) def captured_stdout(): """Capture the output of sys.stdout: with captured_stdout() as stdout: print("hello") self.assertEqual(stdout.getvalue(), "hello\n") """ return captured_output("stdout") def captured_stderr(): """Capture the output of sys.stderr: with captured_stderr() as stderr: print("hello", file=sys.stderr) self.assertEqual(stderr.getvalue(), "hello\n") """ return captured_output("stderr") def captured_stdin(): """Capture the input to sys.stdin: with captured_stdin() as stdin: stdin.write('hello\n') stdin.seek(0) # call test code that consumes from sys.stdin captured = input() self.assertEqual(captured, "hello") """ return captured_output("stdin") @contextmanager def freeze_time(t): """ Context manager to temporarily freeze time.time(). This temporarily modifies the time function of the time module. Modules which import the time function directly (e.g. `from time import time`) won't be affected This isn't meant as a public API, but helps reduce some repetitive code in Django's test suite. """ _real_time = time.time time.time = lambda: t try: yield finally: time.time = _real_time def require_jinja2(test_func): """ Decorator to enable a Jinja2 template engine in addition to the regular Django template engine for a test or skip it if Jinja2 isn't available. """ test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func) return override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", "APP_DIRS": True, }, { "BACKEND": "django.template.backends.jinja2.Jinja2", "APP_DIRS": True, "OPTIONS": {"keep_trailing_newline": True}, }, ] )(test_func) class override_script_prefix(TestContextDecorator): """Decorator or context manager to temporary override the script prefix.""" def __init__(self, prefix): self.prefix = prefix super().__init__() def enable(self): self.old_prefix = get_script_prefix() set_script_prefix(self.prefix) def disable(self): set_script_prefix(self.old_prefix) class LoggingCaptureMixin: """ Capture the output from the 'django' logger and store it on the class's logger_output attribute. """ def setUp(self): self.logger = logging.getLogger("django") self.old_stream = self.logger.handlers[0].stream self.logger_output = StringIO() self.logger.handlers[0].stream = self.logger_output def tearDown(self): self.logger.handlers[0].stream = self.old_stream class isolate_apps(TestContextDecorator): """ Act as either a decorator or a context manager to register models defined in its wrapped context to an isolated registry. The list of installed apps the isolated registry should contain must be passed as arguments. Two optional keyword arguments can be specified: `attr_name`: attribute assigned the isolated registry if used as a class decorator. `kwarg_name`: keyword argument passing the isolated registry if used as a function decorator. """ def __init__(self, *installed_apps, **kwargs): self.installed_apps = installed_apps super().__init__(**kwargs) def enable(self): self.old_apps = Options.default_apps apps = Apps(self.installed_apps) setattr(Options, "default_apps", apps) return apps def disable(self): setattr(Options, "default_apps", self.old_apps) class TimeKeeper: def __init__(self): self.records = collections.defaultdict(list) @contextmanager def timed(self, name): self.records[name] start_time = time.perf_counter() try: yield finally: end_time = time.perf_counter() - start_time self.records[name].append(end_time) def print_results(self): for name, end_times in self.records.items(): for record_time in end_times: record = "%s took %.3fs" % (name, record_time) sys.stderr.write(record + os.linesep) class NullTimeKeeper: @contextmanager def timed(self, name): yield def print_results(self): pass def tag(*tags): """Decorator to add tags to a test class or method.""" def decorator(obj): if hasattr(obj, "tags"): obj.tags = obj.tags.union(tags) else: setattr(obj, "tags", set(tags)) return obj return decorator @contextmanager def register_lookup(field, *lookups, lookup_name=None): """ Context manager to temporarily register lookups on a model field using lookup_name (or the lookup's lookup_name if not provided). """ try: for lookup in lookups: field.register_lookup(lookup, lookup_name) yield finally: for lookup in lookups: field._unregister_lookup(lookup, lookup_name)
9fee0172b908d61974f8d5959ccbb04ce6223d0fc2fb474ad99ccee7f6378627
# These classes override date and datetime to ensure that strftime('%Y') # returns four digits (with leading zeros) on years < 1000. # https://bugs.python.org/issue13305 # # Based on code submitted to comp.lang.python by Andrew Dalke # # >>> datetime_safe.date(10, 8, 2).strftime("%Y/%m/%d was a %A") # '0010/08/02 was a Monday' import time import warnings from datetime import date as real_date from datetime import datetime as real_datetime from django.utils.deprecation import RemovedInDjango50Warning from django.utils.regex_helper import _lazy_re_compile warnings.warn( "The django.utils.datetime_safe module is deprecated.", category=RemovedInDjango50Warning, stacklevel=2, ) class date(real_date): def strftime(self, fmt): return strftime(self, fmt) class datetime(real_datetime): def strftime(self, fmt): return strftime(self, fmt) @classmethod def combine(cls, date, time): return cls( date.year, date.month, date.day, time.hour, time.minute, time.second, time.microsecond, time.tzinfo, ) def date(self): return date(self.year, self.month, self.day) def new_date(d): "Generate a safe date from a datetime.date object." return date(d.year, d.month, d.day) def new_datetime(d): """ Generate a safe datetime from a datetime.date or datetime.datetime object. """ kw = [d.year, d.month, d.day] if isinstance(d, real_datetime): kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo]) return datetime(*kw) # This library does not support strftime's "%s" or "%y" format strings. # Allowed if there's an even number of "%"s because they are escaped. _illegal_formatting = _lazy_re_compile(r"((^|[^%])(%%)*%[sy])") def _findall(text, substr): # Also finds overlaps sites = [] i = 0 while True: i = text.find(substr, i) if i == -1: break sites.append(i) i += 1 return sites def strftime(dt, fmt): if dt.year >= 1000: return super(type(dt), dt).strftime(fmt) illegal_formatting = _illegal_formatting.search(fmt) if illegal_formatting: raise TypeError( "strftime of dates before 1000 does not handle " + illegal_formatting[0] ) year = dt.year # For every non-leap year century, advance by # 6 years to get into the 28-year repeat cycle delta = 2000 - year off = 6 * (delta // 100 + delta // 400) year += off # Move to around the year 2000 year += ((2000 - year) // 28) * 28 timetuple = dt.timetuple() s1 = time.strftime(fmt, (year,) + timetuple[1:]) sites1 = _findall(s1, str(year)) s2 = time.strftime(fmt, (year + 28,) + timetuple[1:]) sites2 = _findall(s2, str(year + 28)) sites = [] for site in sites1: if site in sites2: sites.append(site) s = s1 syear = "%04d" % dt.year for site in sites: s = s[:site] + syear + s[site + 4 :] return s
ff8d539ef7ad70d088fe8cc9538d5581469154cf5e5a9fae390d1d21c689c92b
from decimal import Decimal from django.conf import settings from django.utils.safestring import mark_safe def format( number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep="", force_grouping=False, use_l10n=None, ): """ Get a number (as a number or string), and return it as a string, using formats defined as arguments: * decimal_sep: Decimal separator symbol (for example ".") * decimal_pos: Number of decimal positions * grouping: Number of digits in every group limited by thousand separator. For non-uniform digit grouping, it can be a sequence with the number of digit group sizes following the format used by the Python locale module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)). * thousand_sep: Thousand separator symbol (for example ",") """ if number is None or number == "": return mark_safe(number) use_grouping = ( use_l10n or (use_l10n is None and settings.USE_L10N) ) and settings.USE_THOUSAND_SEPARATOR use_grouping = use_grouping or force_grouping use_grouping = use_grouping and grouping != 0 # Make the common case fast if isinstance(number, int) and not use_grouping and not decimal_pos: return mark_safe(number) # sign sign = "" # Treat potentially very large/small floats as Decimals. if isinstance(number, float) and "e" in str(number).lower(): number = Decimal(str(number)) if isinstance(number, Decimal): if decimal_pos is not None: # If the provided number is too small to affect any of the visible # decimal places, consider it equal to '0'. cutoff = Decimal("0." + "1".rjust(decimal_pos, "0")) if abs(number) < cutoff: number = Decimal("0") # Format values with more than 200 digits (an arbitrary cutoff) using # scientific notation to avoid high memory usage in {:f}'.format(). _, digits, exponent = number.as_tuple() if abs(exponent) + len(digits) > 200: number = "{:e}".format(number) coefficient, exponent = number.split("e") # Format the coefficient. coefficient = format( coefficient, decimal_sep, decimal_pos, grouping, thousand_sep, force_grouping, use_l10n, ) return "{}e{}".format(coefficient, exponent) else: str_number = "{:f}".format(number) else: str_number = str(number) if str_number[0] == "-": sign = "-" str_number = str_number[1:] # decimal part if "." in str_number: int_part, dec_part = str_number.split(".") if decimal_pos is not None: dec_part = dec_part[:decimal_pos] else: int_part, dec_part = str_number, "" if decimal_pos is not None: dec_part += "0" * (decimal_pos - len(dec_part)) dec_part = dec_part and decimal_sep + dec_part # grouping if use_grouping: try: # if grouping is a sequence intervals = list(grouping) except TypeError: # grouping is a single value intervals = [grouping, 0] active_interval = intervals.pop(0) int_part_gd = "" cnt = 0 for digit in int_part[::-1]: if cnt and cnt == active_interval: if intervals: active_interval = intervals.pop(0) or active_interval int_part_gd += thousand_sep[::-1] cnt = 0 int_part_gd += digit cnt += 1 int_part = int_part_gd[::-1] return sign + int_part + dec_part
9e58482d331b3056722b689550746ec9786d854f6d2c104abde7901f405d3e5d
""" Timezone-related classes and functions. """ import functools import sys import warnings try: import zoneinfo except ImportError: from backports import zoneinfo from contextlib import ContextDecorator from datetime import datetime, timedelta, timezone, tzinfo from asgiref.local import Local from django.conf import settings from django.utils.deprecation import RemovedInDjango50Warning __all__ = [ # noqa for utc RemovedInDjango50Warning. "utc", "get_fixed_timezone", "get_default_timezone", "get_default_timezone_name", "get_current_timezone", "get_current_timezone_name", "activate", "deactivate", "override", "localtime", "localdate", "now", "is_aware", "is_naive", "make_aware", "make_naive", ] # RemovedInDjango50Warning: sentinel for deprecation of is_dst parameters. NOT_PASSED = object() def __getattr__(name): if name != "utc": raise AttributeError(f"module {__name__!r} has no attribute {name!r}") warnings.warn( "The django.utils.timezone.utc alias is deprecated. " "Please update your code to use datetime.timezone.utc instead.", RemovedInDjango50Warning, stacklevel=2, ) return timezone.utc def get_fixed_timezone(offset): """Return a tzinfo instance with a fixed offset from UTC.""" if isinstance(offset, timedelta): offset = offset.total_seconds() // 60 sign = "-" if offset < 0 else "+" hhmm = "%02d%02d" % divmod(abs(offset), 60) name = sign + hhmm return timezone(timedelta(minutes=offset), name) # In order to avoid accessing settings at compile time, # wrap the logic in a function and cache the result. @functools.lru_cache def get_default_timezone(): """ Return the default time zone as a tzinfo instance. This is the time zone defined by settings.TIME_ZONE. """ if settings.USE_DEPRECATED_PYTZ: import pytz return pytz.timezone(settings.TIME_ZONE) return zoneinfo.ZoneInfo(settings.TIME_ZONE) # This function exists for consistency with get_current_timezone_name def get_default_timezone_name(): """Return the name of the default time zone.""" return _get_timezone_name(get_default_timezone()) _active = Local() def get_current_timezone(): """Return the currently active time zone as a tzinfo instance.""" return getattr(_active, "value", get_default_timezone()) def get_current_timezone_name(): """Return the name of the currently active time zone.""" return _get_timezone_name(get_current_timezone()) def _get_timezone_name(timezone): """ Return the offset for fixed offset timezones, or the name of timezone if not set. """ return timezone.tzname(None) or str(timezone) # Timezone selection functions. # These functions don't change os.environ['TZ'] and call time.tzset() # because it isn't thread safe. def activate(timezone): """ Set the time zone for the current thread. The ``timezone`` argument must be an instance of a tzinfo subclass or a time zone name. """ if isinstance(timezone, tzinfo): _active.value = timezone elif isinstance(timezone, str): if settings.USE_DEPRECATED_PYTZ: import pytz _active.value = pytz.timezone(timezone) else: _active.value = zoneinfo.ZoneInfo(timezone) else: raise ValueError("Invalid timezone: %r" % timezone) def deactivate(): """ Unset the time zone for the current thread. Django will then use the time zone defined by settings.TIME_ZONE. """ if hasattr(_active, "value"): del _active.value class override(ContextDecorator): """ Temporarily set the time zone for the current thread. This is a context manager that uses django.utils.timezone.activate() to set the timezone on entry and restores the previously active timezone on exit. The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a time zone name, or ``None``. If it is ``None``, Django enables the default time zone. """ def __init__(self, timezone): self.timezone = timezone def __enter__(self): self.old_timezone = getattr(_active, "value", None) if self.timezone is None: deactivate() else: activate(self.timezone) def __exit__(self, exc_type, exc_value, traceback): if self.old_timezone is None: deactivate() else: _active.value = self.old_timezone # Templates def template_localtime(value, use_tz=None): """ Check if value is a datetime and converts it to local time if necessary. If use_tz is provided and is not None, that will force the value to be converted (or not), overriding the value of settings.USE_TZ. This function is designed for use by the template engine. """ should_convert = ( isinstance(value, datetime) and (settings.USE_TZ if use_tz is None else use_tz) and not is_naive(value) and getattr(value, "convert_to_local_time", True) ) return localtime(value) if should_convert else value # Utilities def localtime(value=None, timezone=None): """ Convert an aware datetime.datetime to local time. Only aware datetimes are allowed. When value is omitted, it defaults to now(). Local time is defined by the current time zone, unless another time zone is specified. """ if value is None: value = now() if timezone is None: timezone = get_current_timezone() # Emulate the behavior of astimezone() on Python < 3.6. if is_naive(value): raise ValueError("localtime() cannot be applied to a naive datetime") return value.astimezone(timezone) def localdate(value=None, timezone=None): """ Convert an aware datetime to local time and return the value's date. Only aware datetimes are allowed. When value is omitted, it defaults to now(). Local time is defined by the current time zone, unless another time zone is specified. """ return localtime(value, timezone).date() def now(): """ Return an aware or naive datetime.datetime, depending on settings.USE_TZ. """ return datetime.now(tz=timezone.utc if settings.USE_TZ else None) # By design, these four functions don't perform any checks on their arguments. # The caller should ensure that they don't receive an invalid value like None. def is_aware(value): """ Determine if a given datetime.datetime is aware. The concept is defined in Python's docs: https://docs.python.org/library/datetime.html#datetime.tzinfo Assuming value.tzinfo is either None or a proper datetime.tzinfo, value.utcoffset() implements the appropriate logic. """ return value.utcoffset() is not None def is_naive(value): """ Determine if a given datetime.datetime is naive. The concept is defined in Python's docs: https://docs.python.org/library/datetime.html#datetime.tzinfo Assuming value.tzinfo is either None or a proper datetime.tzinfo, value.utcoffset() implements the appropriate logic. """ return value.utcoffset() is None def make_aware(value, timezone=None, is_dst=NOT_PASSED): """Make a naive datetime.datetime in a given time zone aware.""" if is_dst is NOT_PASSED: is_dst = None else: warnings.warn( "The is_dst argument to make_aware(), used by the Trunc() " "database functions and QuerySet.datetimes(), is deprecated as it " "has no effect with zoneinfo time zones.", RemovedInDjango50Warning, ) if timezone is None: timezone = get_current_timezone() if _is_pytz_zone(timezone): # This method is available for pytz time zones. return timezone.localize(value, is_dst=is_dst) else: # Check that we won't overwrite the timezone of an aware datetime. if is_aware(value): raise ValueError("make_aware expects a naive datetime, got %s" % value) # This may be wrong around DST changes! return value.replace(tzinfo=timezone) def make_naive(value, timezone=None): """Make an aware datetime.datetime naive in a given time zone.""" if timezone is None: timezone = get_current_timezone() # Emulate the behavior of astimezone() on Python < 3.6. if is_naive(value): raise ValueError("make_naive() cannot be applied to a naive datetime") return value.astimezone(timezone).replace(tzinfo=None) _PYTZ_IMPORTED = False def _pytz_imported(): """ Detects whether or not pytz has been imported without importing pytz. Copied from pytz_deprecation_shim with thanks to Paul Ganssle. """ global _PYTZ_IMPORTED if not _PYTZ_IMPORTED and "pytz" in sys.modules: _PYTZ_IMPORTED = True return _PYTZ_IMPORTED def _is_pytz_zone(tz): """Checks if a zone is a pytz zone.""" # See if pytz was already imported rather than checking # settings.USE_DEPRECATED_PYTZ to *allow* manually passing a pytz timezone, # which some of the test cases (at least) rely on. if not _pytz_imported(): return False # If tz could be pytz, then pytz is needed here. import pytz _PYTZ_BASE_CLASSES = (pytz.tzinfo.BaseTzInfo, pytz._FixedOffset) # In releases prior to 2018.4, pytz.UTC was not a subclass of BaseTzInfo if not isinstance(pytz.UTC, pytz._FixedOffset): _PYTZ_BASE_CLASSES += (type(pytz.UTC),) return isinstance(tz, _PYTZ_BASE_CLASSES) def _datetime_ambiguous_or_imaginary(dt, tz): if _is_pytz_zone(tz): import pytz try: tz.utcoffset(dt) except (pytz.AmbiguousTimeError, pytz.NonExistentTimeError): return True else: return False return tz.utcoffset(dt.replace(fold=not dt.fold)) != tz.utcoffset(dt) # RemovedInDjango50Warning. _DIR = dir() def __dir__(): return sorted([*_DIR, "utc"])
1cae44dfa17518674f7210aa1ec9a5a1886b1d7fc1cb2213bce1f2bb1b449521
import datetime def _get_duration_components(duration): days = duration.days seconds = duration.seconds microseconds = duration.microseconds minutes = seconds // 60 seconds %= 60 hours = minutes // 60 minutes %= 60 return days, hours, minutes, seconds, microseconds def duration_string(duration): """Version of str(timedelta) which is not English specific.""" days, hours, minutes, seconds, microseconds = _get_duration_components(duration) string = "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds) if days: string = "{} ".format(days) + string if microseconds: string += ".{:06d}".format(microseconds) return string def duration_iso_string(duration): if duration < datetime.timedelta(0): sign = "-" duration *= -1 else: sign = "" days, hours, minutes, seconds, microseconds = _get_duration_components(duration) ms = ".{:06d}".format(microseconds) if microseconds else "" return "{}P{}DT{:02d}H{:02d}M{:02d}{}S".format( sign, days, hours, minutes, seconds, ms ) def duration_microseconds(delta): return (24 * 60 * 60 * delta.days + delta.seconds) * 1000000 + delta.microseconds
caa6b0825a8d70fac84c852f4214a99f7043f3df48b787e1c8ec890932269571
import re from django.conf import settings from django.http import HttpResponsePermanentRedirect from django.utils.deprecation import MiddlewareMixin class SecurityMiddleware(MiddlewareMixin): def __init__(self, get_response): super().__init__(get_response) self.sts_seconds = settings.SECURE_HSTS_SECONDS self.sts_include_subdomains = settings.SECURE_HSTS_INCLUDE_SUBDOMAINS self.sts_preload = settings.SECURE_HSTS_PRELOAD self.content_type_nosniff = settings.SECURE_CONTENT_TYPE_NOSNIFF self.redirect = settings.SECURE_SSL_REDIRECT self.redirect_host = settings.SECURE_SSL_HOST self.redirect_exempt = [re.compile(r) for r in settings.SECURE_REDIRECT_EXEMPT] self.referrer_policy = settings.SECURE_REFERRER_POLICY self.cross_origin_opener_policy = settings.SECURE_CROSS_ORIGIN_OPENER_POLICY def process_request(self, request): path = request.path.lstrip("/") if ( self.redirect and not request.is_secure() and not any(pattern.search(path) for pattern in self.redirect_exempt) ): host = self.redirect_host or request.get_host() return HttpResponsePermanentRedirect( "https://%s%s" % (host, request.get_full_path()) ) def process_response(self, request, response): if ( self.sts_seconds and request.is_secure() and "Strict-Transport-Security" not in response ): sts_header = "max-age=%s" % self.sts_seconds if self.sts_include_subdomains: sts_header += "; includeSubDomains" if self.sts_preload: sts_header += "; preload" response.headers["Strict-Transport-Security"] = sts_header if self.content_type_nosniff: response.headers.setdefault("X-Content-Type-Options", "nosniff") if self.referrer_policy: # Support a comma-separated string or iterable of values to allow # fallback. response.headers.setdefault( "Referrer-Policy", ",".join( [v.strip() for v in self.referrer_policy.split(",")] if isinstance(self.referrer_policy, str) else self.referrer_policy ), ) if self.cross_origin_opener_policy: response.setdefault( "Cross-Origin-Opener-Policy", self.cross_origin_opener_policy, ) return response
30382926d295bbbc0a6d6873ba8f522543b2bc88b79dd79fd1bfe1b736b020f5
from urllib.parse import unquote, urlsplit, urlunsplit from asgiref.local import Local from django.utils.functional import lazy from django.utils.translation import override from .exceptions import NoReverseMatch, Resolver404 from .resolvers import _get_cached_resolver, get_ns_resolver, get_resolver from .utils import get_callable # SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for # the current thread (which is the only one we ever access), it is assumed to # be empty. _prefixes = Local() # Overridden URLconfs for each thread are stored here. _urlconfs = Local() def resolve(path, urlconf=None): if urlconf is None: urlconf = get_urlconf() return get_resolver(urlconf).resolve(path) def reverse(viewname, urlconf=None, args=None, kwargs=None, current_app=None): if urlconf is None: urlconf = get_urlconf() resolver = get_resolver(urlconf) args = args or [] kwargs = kwargs or {} prefix = get_script_prefix() if not isinstance(viewname, str): view = viewname else: *path, view = viewname.split(":") if current_app: current_path = current_app.split(":") current_path.reverse() else: current_path = None resolved_path = [] ns_pattern = "" ns_converters = {} for ns in path: current_ns = current_path.pop() if current_path else None # Lookup the name to see if it could be an app identifier. try: app_list = resolver.app_dict[ns] # Yes! Path part matches an app in the current Resolver. if current_ns and current_ns in app_list: # If we are reversing for a particular app, use that # namespace. ns = current_ns elif ns not in app_list: # The name isn't shared by one of the instances (i.e., # the default) so pick the first instance as the default. ns = app_list[0] except KeyError: pass if ns != current_ns: current_path = None try: extra, resolver = resolver.namespace_dict[ns] resolved_path.append(ns) ns_pattern += extra ns_converters.update(resolver.pattern.converters) except KeyError as key: if resolved_path: raise NoReverseMatch( "%s is not a registered namespace inside '%s'" % (key, ":".join(resolved_path)) ) else: raise NoReverseMatch("%s is not a registered namespace" % key) if ns_pattern: resolver = get_ns_resolver( ns_pattern, resolver, tuple(ns_converters.items()) ) return resolver._reverse_with_prefix(view, prefix, *args, **kwargs) reverse_lazy = lazy(reverse, str) def clear_url_caches(): get_callable.cache_clear() _get_cached_resolver.cache_clear() get_ns_resolver.cache_clear() def set_script_prefix(prefix): """ Set the script prefix for the current thread. """ if not prefix.endswith("/"): prefix += "/" _prefixes.value = prefix def get_script_prefix(): """ Return the currently active script prefix. Useful for client code that wishes to construct their own URLs manually (although accessing the request instance is normally going to be a lot cleaner). """ return getattr(_prefixes, "value", "/") def clear_script_prefix(): """ Unset the script prefix for the current thread. """ try: del _prefixes.value except AttributeError: pass def set_urlconf(urlconf_name): """ Set the URLconf for the current thread (overriding the default one in settings). If urlconf_name is None, revert back to the default. """ if urlconf_name: _urlconfs.value = urlconf_name else: if hasattr(_urlconfs, "value"): del _urlconfs.value def get_urlconf(default=None): """ Return the root URLconf to use for the current thread if it has been changed from the default one. """ return getattr(_urlconfs, "value", default) def is_valid_path(path, urlconf=None): """ Return the ResolverMatch if the given path resolves against the default URL resolver, False otherwise. This is a convenience method to make working with "is this a match?" cases easier, avoiding try...except blocks. """ try: return resolve(path, urlconf) except Resolver404: return False def translate_url(url, lang_code): """ Given a URL (absolute or relative), try to get its translated version in the `lang_code` language (either by i18n_patterns or by translated regex). Return the original URL if no translated version is found. """ parsed = urlsplit(url) try: # URL may be encoded. match = resolve(unquote(parsed.path)) except Resolver404: pass else: to_be_reversed = ( "%s:%s" % (match.namespace, match.url_name) if match.namespace else match.url_name ) with override(lang_code): try: url = reverse(to_be_reversed, args=match.args, kwargs=match.kwargs) except NoReverseMatch: pass else: url = urlunsplit( (parsed.scheme, parsed.netloc, url, parsed.query, parsed.fragment) ) return url
e925ce295c06ddbd484e3e88e9351788b01f5b26c252e703568aab1cf27a4380
""" Helper functions for creating Form classes from Django models and database field objects. """ from itertools import chain from django.core.exceptions import ( NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError, ) from django.db.models.utils import AltersData from django.forms.fields import ChoiceField, Field from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass from django.forms.formsets import BaseFormSet, formset_factory from django.forms.utils import ErrorList from django.forms.widgets import ( HiddenInput, MultipleHiddenInput, RadioSelect, SelectMultiple, ) from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext from django.utils.translation import gettext_lazy as _ __all__ = ( "ModelForm", "BaseModelForm", "model_to_dict", "fields_for_model", "ModelChoiceField", "ModelMultipleChoiceField", "ALL_FIELDS", "BaseModelFormSet", "modelformset_factory", "BaseInlineFormSet", "inlineformset_factory", "modelform_factory", ) ALL_FIELDS = "__all__" def construct_instance(form, instance, fields=None, exclude=None): """ Construct and return a model instance from the bound ``form``'s ``cleaned_data``, but do not save the returned instance to the database. """ from django.db import models opts = instance._meta cleaned_data = form.cleaned_data file_field_list = [] for f in opts.fields: if ( not f.editable or isinstance(f, models.AutoField) or f.name not in cleaned_data ): continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue # Leave defaults for fields that aren't in POST data, except for # checkbox inputs because they don't appear in POST data if not checked. if ( f.has_default() and form[f.name].field.widget.value_omitted_from_data( form.data, form.files, form.add_prefix(f.name) ) and cleaned_data.get(f.name) in form[f.name].field.empty_values ): continue # Defer saving file-type fields until after the other fields, so a # callable upload_to can use the values from other fields. if isinstance(f, models.FileField): file_field_list.append(f) else: f.save_form_data(instance, cleaned_data[f.name]) for f in file_field_list: f.save_form_data(instance, cleaned_data[f.name]) return instance # ModelForms ################################################################# def model_to_dict(instance, fields=None, exclude=None): """ Return a dict containing the data in ``instance`` suitable for passing as a Form's ``initial`` keyword argument. ``fields`` is an optional list of field names. If provided, return only the named. ``exclude`` is an optional list of field names. If provided, exclude the named from the returned dict, even if they are listed in the ``fields`` argument. """ opts = instance._meta data = {} for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many): if not getattr(f, "editable", False): continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue data[f.name] = f.value_from_object(instance) return data def apply_limit_choices_to_to_formfield(formfield): """Apply limit_choices_to to the formfield's queryset if needed.""" from django.db.models import Exists, OuterRef, Q if hasattr(formfield, "queryset") and hasattr(formfield, "get_limit_choices_to"): limit_choices_to = formfield.get_limit_choices_to() if limit_choices_to: complex_filter = limit_choices_to if not isinstance(complex_filter, Q): complex_filter = Q(**limit_choices_to) complex_filter &= Q(pk=OuterRef("pk")) # Use Exists() to avoid potential duplicates. formfield.queryset = formfield.queryset.filter( Exists(formfield.queryset.model._base_manager.filter(complex_filter)), ) def fields_for_model( model, fields=None, exclude=None, widgets=None, formfield_callback=None, localized_fields=None, labels=None, help_texts=None, error_messages=None, field_classes=None, *, apply_limit_choices_to=True, ): """ Return a dictionary containing form fields for the given model. ``fields`` is an optional list of field names. If provided, return only the named fields. ``exclude`` is an optional list of field names. If provided, exclude the named fields from the returned fields, even if they are listed in the ``fields`` argument. ``widgets`` is a dictionary of model field names mapped to a widget. ``formfield_callback`` is a callable that takes a model field and returns a form field. ``localized_fields`` is a list of names of fields which should be localized. ``labels`` is a dictionary of model field names mapped to a label. ``help_texts`` is a dictionary of model field names mapped to a help text. ``error_messages`` is a dictionary of model field names mapped to a dictionary of error messages. ``field_classes`` is a dictionary of model field names mapped to a form field class. ``apply_limit_choices_to`` is a boolean indicating if limit_choices_to should be applied to a field's queryset. """ field_dict = {} ignored = [] opts = model._meta # Avoid circular import from django.db.models import Field as ModelField sortable_private_fields = [ f for f in opts.private_fields if isinstance(f, ModelField) ] for f in sorted( chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many) ): if not getattr(f, "editable", False): if ( fields is not None and f.name in fields and (exclude is None or f.name not in exclude) ): raise FieldError( "'%s' cannot be specified for %s model form as it is a " "non-editable field" % (f.name, model.__name__) ) continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue kwargs = {} if widgets and f.name in widgets: kwargs["widget"] = widgets[f.name] if localized_fields == ALL_FIELDS or ( localized_fields and f.name in localized_fields ): kwargs["localize"] = True if labels and f.name in labels: kwargs["label"] = labels[f.name] if help_texts and f.name in help_texts: kwargs["help_text"] = help_texts[f.name] if error_messages and f.name in error_messages: kwargs["error_messages"] = error_messages[f.name] if field_classes and f.name in field_classes: kwargs["form_class"] = field_classes[f.name] if formfield_callback is None: formfield = f.formfield(**kwargs) elif not callable(formfield_callback): raise TypeError("formfield_callback must be a function or callable") else: formfield = formfield_callback(f, **kwargs) if formfield: if apply_limit_choices_to: apply_limit_choices_to_to_formfield(formfield) field_dict[f.name] = formfield else: ignored.append(f.name) if fields: field_dict = { f: field_dict.get(f) for f in fields if (not exclude or f not in exclude) and f not in ignored } return field_dict class ModelFormOptions: def __init__(self, options=None): self.model = getattr(options, "model", None) self.fields = getattr(options, "fields", None) self.exclude = getattr(options, "exclude", None) self.widgets = getattr(options, "widgets", None) self.localized_fields = getattr(options, "localized_fields", None) self.labels = getattr(options, "labels", None) self.help_texts = getattr(options, "help_texts", None) self.error_messages = getattr(options, "error_messages", None) self.field_classes = getattr(options, "field_classes", None) self.formfield_callback = getattr(options, "formfield_callback", None) class ModelFormMetaclass(DeclarativeFieldsMetaclass): def __new__(mcs, name, bases, attrs): new_class = super().__new__(mcs, name, bases, attrs) if bases == (BaseModelForm,): return new_class opts = new_class._meta = ModelFormOptions(getattr(new_class, "Meta", None)) # We check if a string was passed to `fields` or `exclude`, # which is likely to be a mistake where the user typed ('foo') instead # of ('foo',) for opt in ["fields", "exclude", "localized_fields"]: value = getattr(opts, opt) if isinstance(value, str) and value != ALL_FIELDS: msg = ( "%(model)s.Meta.%(opt)s cannot be a string. " "Did you mean to type: ('%(value)s',)?" % { "model": new_class.__name__, "opt": opt, "value": value, } ) raise TypeError(msg) if opts.model: # If a model is defined, extract form fields from it. if opts.fields is None and opts.exclude is None: raise ImproperlyConfigured( "Creating a ModelForm without either the 'fields' attribute " "or the 'exclude' attribute is prohibited; form %s " "needs updating." % name ) if opts.fields == ALL_FIELDS: # Sentinel for fields_for_model to indicate "get the list of # fields from the model" opts.fields = None fields = fields_for_model( opts.model, opts.fields, opts.exclude, opts.widgets, opts.formfield_callback, opts.localized_fields, opts.labels, opts.help_texts, opts.error_messages, opts.field_classes, # limit_choices_to will be applied during ModelForm.__init__(). apply_limit_choices_to=False, ) # make sure opts.fields doesn't specify an invalid field none_model_fields = {k for k, v in fields.items() if not v} missing_fields = none_model_fields.difference(new_class.declared_fields) if missing_fields: message = "Unknown field(s) (%s) specified for %s" message %= (", ".join(missing_fields), opts.model.__name__) raise FieldError(message) # Override default model fields with any custom declared ones # (plus, include all the other declared fields). fields.update(new_class.declared_fields) else: fields = new_class.declared_fields new_class.base_fields = fields return new_class class BaseModelForm(BaseForm, AltersData): def __init__( self, data=None, files=None, auto_id="id_%s", prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, instance=None, use_required_attribute=None, renderer=None, ): opts = self._meta if opts.model is None: raise ValueError("ModelForm has no model class specified.") if instance is None: # if we didn't get an instance, instantiate a new one self.instance = opts.model() object_data = {} else: self.instance = instance object_data = model_to_dict(instance, opts.fields, opts.exclude) # if initial was provided, it should override the values from instance if initial is not None: object_data.update(initial) # self._validate_unique will be set to True by BaseModelForm.clean(). # It is False by default so overriding self.clean() and failing to call # super will stop validate_unique from being called. self._validate_unique = False super().__init__( data, files, auto_id, prefix, object_data, error_class, label_suffix, empty_permitted, use_required_attribute=use_required_attribute, renderer=renderer, ) for formfield in self.fields.values(): apply_limit_choices_to_to_formfield(formfield) def _get_validation_exclusions(self): """ For backwards-compatibility, exclude several types of fields from model validation. See tickets #12507, #12521, #12553. """ exclude = set() # Build up a list of fields that should be excluded from model field # validation and unique checks. for f in self.instance._meta.fields: field = f.name # Exclude fields that aren't on the form. The developer may be # adding these values to the model after form validation. if field not in self.fields: exclude.add(f.name) # Don't perform model validation on fields that were defined # manually on the form and excluded via the ModelForm's Meta # class. See #12901. elif self._meta.fields and field not in self._meta.fields: exclude.add(f.name) elif self._meta.exclude and field in self._meta.exclude: exclude.add(f.name) # Exclude fields that failed form validation. There's no need for # the model fields to validate them as well. elif field in self._errors: exclude.add(f.name) # Exclude empty fields that are not required by the form, if the # underlying model field is required. This keeps the model field # from raising a required error. Note: don't exclude the field from # validation if the model field allows blanks. If it does, the blank # value may be included in a unique check, so cannot be excluded # from validation. else: form_field = self.fields[field] field_value = self.cleaned_data.get(field) if ( not f.blank and not form_field.required and field_value in form_field.empty_values ): exclude.add(f.name) return exclude def clean(self): self._validate_unique = True return self.cleaned_data def _update_errors(self, errors): # Override any validation error messages defined at the model level # with those defined at the form level. opts = self._meta # Allow the model generated by construct_instance() to raise # ValidationError and have them handled in the same way as others. if hasattr(errors, "error_dict"): error_dict = errors.error_dict else: error_dict = {NON_FIELD_ERRORS: errors} for field, messages in error_dict.items(): if ( field == NON_FIELD_ERRORS and opts.error_messages and NON_FIELD_ERRORS in opts.error_messages ): error_messages = opts.error_messages[NON_FIELD_ERRORS] elif field in self.fields: error_messages = self.fields[field].error_messages else: continue for message in messages: if ( isinstance(message, ValidationError) and message.code in error_messages ): message.message = error_messages[message.code] self.add_error(None, errors) def _post_clean(self): opts = self._meta exclude = self._get_validation_exclusions() # Foreign Keys being used to represent inline relationships # are excluded from basic field value validation. This is for two # reasons: firstly, the value may not be supplied (#12507; the # case of providing new values to the admin); secondly the # object being referred to may not yet fully exist (#12749). # However, these fields *must* be included in uniqueness checks, # so this can't be part of _get_validation_exclusions(). for name, field in self.fields.items(): if isinstance(field, InlineForeignKeyField): exclude.add(name) try: self.instance = construct_instance( self, self.instance, opts.fields, opts.exclude ) except ValidationError as e: self._update_errors(e) try: self.instance.full_clean(exclude=exclude, validate_unique=False) except ValidationError as e: self._update_errors(e) # Validate uniqueness if needed. if self._validate_unique: self.validate_unique() def validate_unique(self): """ Call the instance's validate_unique() method and update the form's validation errors if any were raised. """ exclude = self._get_validation_exclusions() try: self.instance.validate_unique(exclude=exclude) except ValidationError as e: self._update_errors(e) def _save_m2m(self): """ Save the many-to-many fields and generic relations for this form. """ cleaned_data = self.cleaned_data exclude = self._meta.exclude fields = self._meta.fields opts = self.instance._meta # Note that for historical reasons we want to include also # private_fields here. (GenericRelation was previously a fake # m2m field). for f in chain(opts.many_to_many, opts.private_fields): if not hasattr(f, "save_form_data"): continue if fields and f.name not in fields: continue if exclude and f.name in exclude: continue if f.name in cleaned_data: f.save_form_data(self.instance, cleaned_data[f.name]) def save(self, commit=True): """ Save this form's self.instance object if commit=True. Otherwise, add a save_m2m() method to the form which can be called after the instance is saved manually at a later time. Return the model instance. """ if self.errors: raise ValueError( "The %s could not be %s because the data didn't validate." % ( self.instance._meta.object_name, "created" if self.instance._state.adding else "changed", ) ) if commit: # If committing, save the instance and the m2m data immediately. self.instance.save() self._save_m2m() else: # If not committing, add a method to the form to allow deferred # saving of m2m data. self.save_m2m = self._save_m2m return self.instance save.alters_data = True class ModelForm(BaseModelForm, metaclass=ModelFormMetaclass): pass def modelform_factory( model, form=ModelForm, fields=None, exclude=None, formfield_callback=None, widgets=None, localized_fields=None, labels=None, help_texts=None, error_messages=None, field_classes=None, ): """ Return a ModelForm containing form fields for the given model. You can optionally pass a `form` argument to use as a starting point for constructing the ModelForm. ``fields`` is an optional list of field names. If provided, include only the named fields in the returned fields. If omitted or '__all__', use all fields. ``exclude`` is an optional list of field names. If provided, exclude the named fields from the returned fields, even if they are listed in the ``fields`` argument. ``widgets`` is a dictionary of model field names mapped to a widget. ``localized_fields`` is a list of names of fields which should be localized. ``formfield_callback`` is a callable that takes a model field and returns a form field. ``labels`` is a dictionary of model field names mapped to a label. ``help_texts`` is a dictionary of model field names mapped to a help text. ``error_messages`` is a dictionary of model field names mapped to a dictionary of error messages. ``field_classes`` is a dictionary of model field names mapped to a form field class. """ # Create the inner Meta class. FIXME: ideally, we should be able to # construct a ModelForm without creating and passing in a temporary # inner class. # Build up a list of attributes that the Meta object will have. attrs = {"model": model} if fields is not None: attrs["fields"] = fields if exclude is not None: attrs["exclude"] = exclude if widgets is not None: attrs["widgets"] = widgets if localized_fields is not None: attrs["localized_fields"] = localized_fields if labels is not None: attrs["labels"] = labels if help_texts is not None: attrs["help_texts"] = help_texts if error_messages is not None: attrs["error_messages"] = error_messages if field_classes is not None: attrs["field_classes"] = field_classes # If parent form class already has an inner Meta, the Meta we're # creating needs to inherit from the parent's inner meta. bases = (form.Meta,) if hasattr(form, "Meta") else () Meta = type("Meta", bases, attrs) if formfield_callback: Meta.formfield_callback = staticmethod(formfield_callback) # Give this new form class a reasonable name. class_name = model.__name__ + "Form" # Class attributes for the new form class. form_class_attrs = {"Meta": Meta} if getattr(Meta, "fields", None) is None and getattr(Meta, "exclude", None) is None: raise ImproperlyConfigured( "Calling modelform_factory without defining 'fields' or " "'exclude' explicitly is prohibited." ) # Instantiate type(form) in order to use the same metaclass as form. return type(form)(class_name, (form,), form_class_attrs) # ModelFormSets ############################################################## class BaseModelFormSet(BaseFormSet, AltersData): """ A ``FormSet`` for editing a queryset and/or adding new objects to it. """ model = None edit_only = False # Set of fields that must be unique among forms of this set. unique_fields = set() def __init__( self, data=None, files=None, auto_id="id_%s", prefix=None, queryset=None, *, initial=None, **kwargs, ): self.queryset = queryset self.initial_extra = initial super().__init__( **{ "data": data, "files": files, "auto_id": auto_id, "prefix": prefix, **kwargs, } ) def initial_form_count(self): """Return the number of forms that are required in this FormSet.""" if not self.is_bound: return len(self.get_queryset()) return super().initial_form_count() def _existing_object(self, pk): if not hasattr(self, "_object_dict"): self._object_dict = {o.pk: o for o in self.get_queryset()} return self._object_dict.get(pk) def _get_to_python(self, field): """ If the field is a related field, fetch the concrete field's (that is, the ultimate pointed-to field's) to_python. """ while field.remote_field is not None: field = field.remote_field.get_related_field() return field.to_python def _construct_form(self, i, **kwargs): pk_required = i < self.initial_form_count() if pk_required: if self.is_bound: pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name) try: pk = self.data[pk_key] except KeyError: # The primary key is missing. The user may have tampered # with POST data. pass else: to_python = self._get_to_python(self.model._meta.pk) try: pk = to_python(pk) except ValidationError: # The primary key exists but is an invalid value. The # user may have tampered with POST data. pass else: kwargs["instance"] = self._existing_object(pk) else: kwargs["instance"] = self.get_queryset()[i] elif self.initial_extra: # Set initial values for extra forms try: kwargs["initial"] = self.initial_extra[i - self.initial_form_count()] except IndexError: pass form = super()._construct_form(i, **kwargs) if pk_required: form.fields[self.model._meta.pk.name].required = True return form def get_queryset(self): if not hasattr(self, "_queryset"): if self.queryset is not None: qs = self.queryset else: qs = self.model._default_manager.get_queryset() # If the queryset isn't already ordered we need to add an # artificial ordering here to make sure that all formsets # constructed from this queryset have the same form order. if not qs.ordered: qs = qs.order_by(self.model._meta.pk.name) # Removed queryset limiting here. As per discussion re: #13023 # on django-dev, max_num should not prevent existing # related objects/inlines from being displayed. self._queryset = qs return self._queryset def save_new(self, form, commit=True): """Save and return a new model instance for the given form.""" return form.save(commit=commit) def save_existing(self, form, instance, commit=True): """Save and return an existing model instance for the given form.""" return form.save(commit=commit) def delete_existing(self, obj, commit=True): """Deletes an existing model instance.""" if commit: obj.delete() def save(self, commit=True): """ Save model instances for every form, adding and changing instances as necessary, and return the list of instances. """ if not commit: self.saved_forms = [] def save_m2m(): for form in self.saved_forms: form.save_m2m() self.save_m2m = save_m2m if self.edit_only: return self.save_existing_objects(commit) else: return self.save_existing_objects(commit) + self.save_new_objects(commit) save.alters_data = True def clean(self): self.validate_unique() def validate_unique(self): # Collect unique_checks and date_checks to run from all the forms. all_unique_checks = set() all_date_checks = set() forms_to_delete = self.deleted_forms valid_forms = [ form for form in self.forms if form.is_valid() and form not in forms_to_delete ] for form in valid_forms: exclude = form._get_validation_exclusions() unique_checks, date_checks = form.instance._get_unique_checks( exclude=exclude, include_meta_constraints=True, ) all_unique_checks.update(unique_checks) all_date_checks.update(date_checks) errors = [] # Do each of the unique checks (unique and unique_together) for uclass, unique_check in all_unique_checks: seen_data = set() for form in valid_forms: # Get the data for the set of fields that must be unique among # the forms. row_data = ( field if field in self.unique_fields else form.cleaned_data[field] for field in unique_check if field in form.cleaned_data ) # Reduce Model instances to their primary key values row_data = tuple( d._get_pk_val() if hasattr(d, "_get_pk_val") # Prevent "unhashable type: list" errors later on. else tuple(d) if isinstance(d, list) else d for d in row_data ) if row_data and None not in row_data: # if we've already seen it then we have a uniqueness failure if row_data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_unique_error_message(unique_check)) form._errors[NON_FIELD_ERRORS] = self.error_class( [self.get_form_error()], renderer=self.renderer, ) # Remove the data from the cleaned_data dict since it # was invalid. for field in unique_check: if field in form.cleaned_data: del form.cleaned_data[field] # mark the data as seen seen_data.add(row_data) # iterate over each of the date checks now for date_check in all_date_checks: seen_data = set() uclass, lookup, field, unique_for = date_check for form in valid_forms: # see if we have data for both fields if ( form.cleaned_data and form.cleaned_data[field] is not None and form.cleaned_data[unique_for] is not None ): # if it's a date lookup we need to get the data for all the fields if lookup == "date": date = form.cleaned_data[unique_for] date_data = (date.year, date.month, date.day) # otherwise it's just the attribute on the date/datetime # object else: date_data = (getattr(form.cleaned_data[unique_for], lookup),) data = (form.cleaned_data[field],) + date_data # if we've already seen it then we have a uniqueness failure if data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_date_error_message(date_check)) form._errors[NON_FIELD_ERRORS] = self.error_class( [self.get_form_error()], renderer=self.renderer, ) # Remove the data from the cleaned_data dict since it # was invalid. del form.cleaned_data[field] # mark the data as seen seen_data.add(data) if errors: raise ValidationError(errors) def get_unique_error_message(self, unique_check): if len(unique_check) == 1: return gettext("Please correct the duplicate data for %(field)s.") % { "field": unique_check[0], } else: return gettext( "Please correct the duplicate data for %(field)s, which must be unique." ) % { "field": get_text_list(unique_check, _("and")), } def get_date_error_message(self, date_check): return gettext( "Please correct the duplicate data for %(field_name)s " "which must be unique for the %(lookup)s in %(date_field)s." ) % { "field_name": date_check[2], "date_field": date_check[3], "lookup": str(date_check[1]), } def get_form_error(self): return gettext("Please correct the duplicate values below.") def save_existing_objects(self, commit=True): self.changed_objects = [] self.deleted_objects = [] if not self.initial_forms: return [] saved_instances = [] forms_to_delete = self.deleted_forms for form in self.initial_forms: obj = form.instance # If the pk is None, it means either: # 1. The object is an unexpected empty model, created by invalid # POST data such as an object outside the formset's queryset. # 2. The object was already deleted from the database. if obj.pk is None: continue if form in forms_to_delete: self.deleted_objects.append(obj) self.delete_existing(obj, commit=commit) elif form.has_changed(): self.changed_objects.append((obj, form.changed_data)) saved_instances.append(self.save_existing(form, obj, commit=commit)) if not commit: self.saved_forms.append(form) return saved_instances def save_new_objects(self, commit=True): self.new_objects = [] for form in self.extra_forms: if not form.has_changed(): continue # If someone has marked an add form for deletion, don't save the # object. if self.can_delete and self._should_delete_form(form): continue self.new_objects.append(self.save_new(form, commit=commit)) if not commit: self.saved_forms.append(form) return self.new_objects def add_fields(self, form, index): """Add a hidden field for the object's primary key.""" from django.db.models import AutoField, ForeignKey, OneToOneField self._pk_field = pk = self.model._meta.pk # If a pk isn't editable, then it won't be on the form, so we need to # add it here so we can tell which object is which when we get the # data back. Generally, pk.editable should be false, but for some # reason, auto_created pk fields and AutoField's editable attribute is # True, so check for that as well. def pk_is_not_editable(pk): return ( (not pk.editable) or (pk.auto_created or isinstance(pk, AutoField)) or ( pk.remote_field and pk.remote_field.parent_link and pk_is_not_editable(pk.remote_field.model._meta.pk) ) ) if pk_is_not_editable(pk) or pk.name not in form.fields: if form.is_bound: # If we're adding the related instance, ignore its primary key # as it could be an auto-generated default which isn't actually # in the database. pk_value = None if form.instance._state.adding else form.instance.pk else: try: if index is not None: pk_value = self.get_queryset()[index].pk else: pk_value = None except IndexError: pk_value = None if isinstance(pk, (ForeignKey, OneToOneField)): qs = pk.remote_field.model._default_manager.get_queryset() else: qs = self.model._default_manager.get_queryset() qs = qs.using(form.instance._state.db) if form._meta.widgets: widget = form._meta.widgets.get(self._pk_field.name, HiddenInput) else: widget = HiddenInput form.fields[self._pk_field.name] = ModelChoiceField( qs, initial=pk_value, required=False, widget=widget ) super().add_fields(form, index) def modelformset_factory( model, form=ModelForm, formfield_callback=None, formset=BaseModelFormSet, extra=1, can_delete=False, can_order=False, max_num=None, fields=None, exclude=None, widgets=None, validate_max=False, localized_fields=None, labels=None, help_texts=None, error_messages=None, min_num=None, validate_min=False, field_classes=None, absolute_max=None, can_delete_extra=True, renderer=None, edit_only=False, ): """Return a FormSet class for the given Django model class.""" meta = getattr(form, "Meta", None) if ( getattr(meta, "fields", fields) is None and getattr(meta, "exclude", exclude) is None ): raise ImproperlyConfigured( "Calling modelformset_factory without defining 'fields' or " "'exclude' explicitly is prohibited." ) form = modelform_factory( model, form=form, fields=fields, exclude=exclude, formfield_callback=formfield_callback, widgets=widgets, localized_fields=localized_fields, labels=labels, help_texts=help_texts, error_messages=error_messages, field_classes=field_classes, ) FormSet = formset_factory( form, formset, extra=extra, min_num=min_num, max_num=max_num, can_order=can_order, can_delete=can_delete, validate_min=validate_min, validate_max=validate_max, absolute_max=absolute_max, can_delete_extra=can_delete_extra, renderer=renderer, ) FormSet.model = model FormSet.edit_only = edit_only return FormSet # InlineFormSets ############################################################# class BaseInlineFormSet(BaseModelFormSet): """A formset for child objects related to a parent.""" def __init__( self, data=None, files=None, instance=None, save_as_new=False, prefix=None, queryset=None, **kwargs, ): if instance is None: self.instance = self.fk.remote_field.model() else: self.instance = instance self.save_as_new = save_as_new if queryset is None: queryset = self.model._default_manager if self.instance.pk is not None: qs = queryset.filter(**{self.fk.name: self.instance}) else: qs = queryset.none() self.unique_fields = {self.fk.name} super().__init__(data, files, prefix=prefix, queryset=qs, **kwargs) # Add the generated field to form._meta.fields if it's defined to make # sure validation isn't skipped on that field. if self.form._meta.fields and self.fk.name not in self.form._meta.fields: if isinstance(self.form._meta.fields, tuple): self.form._meta.fields = list(self.form._meta.fields) self.form._meta.fields.append(self.fk.name) def initial_form_count(self): if self.save_as_new: return 0 return super().initial_form_count() def _construct_form(self, i, **kwargs): form = super()._construct_form(i, **kwargs) if self.save_as_new: mutable = getattr(form.data, "_mutable", None) # Allow modifying an immutable QueryDict. if mutable is not None: form.data._mutable = True # Remove the primary key from the form's data, we are only # creating new instances form.data[form.add_prefix(self._pk_field.name)] = None # Remove the foreign key from the form's data form.data[form.add_prefix(self.fk.name)] = None if mutable is not None: form.data._mutable = mutable # Set the fk value here so that the form can do its validation. fk_value = self.instance.pk if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name: fk_value = getattr(self.instance, self.fk.remote_field.field_name) fk_value = getattr(fk_value, "pk", fk_value) setattr(form.instance, self.fk.get_attname(), fk_value) return form @classmethod def get_default_prefix(cls): return cls.fk.remote_field.get_accessor_name(model=cls.model).replace("+", "") def save_new(self, form, commit=True): # Ensure the latest copy of the related instance is present on each # form (it may have been saved after the formset was originally # instantiated). setattr(form.instance, self.fk.name, self.instance) return super().save_new(form, commit=commit) def add_fields(self, form, index): super().add_fields(form, index) if self._pk_field == self.fk: name = self._pk_field.name kwargs = {"pk_field": True} else: # The foreign key field might not be on the form, so we poke at the # Model field to get the label, since we need that for error messages. name = self.fk.name kwargs = { "label": getattr( form.fields.get(name), "label", capfirst(self.fk.verbose_name) ) } # The InlineForeignKeyField assumes that the foreign key relation is # based on the parent model's pk. If this isn't the case, set to_field # to correctly resolve the initial form value. if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name: kwargs["to_field"] = self.fk.remote_field.field_name # If we're adding a new object, ignore a parent's auto-generated key # as it will be regenerated on the save request. if self.instance._state.adding: if kwargs.get("to_field") is not None: to_field = self.instance._meta.get_field(kwargs["to_field"]) else: to_field = self.instance._meta.pk if to_field.has_default(): setattr(self.instance, to_field.attname, None) form.fields[name] = InlineForeignKeyField(self.instance, **kwargs) def get_unique_error_message(self, unique_check): unique_check = [field for field in unique_check if field != self.fk.name] return super().get_unique_error_message(unique_check) def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False): """ Find and return the ForeignKey from model to parent if there is one (return None if can_fail is True and no such field exists). If fk_name is provided, assume it is the name of the ForeignKey field. Unless can_fail is True, raise an exception if there isn't a ForeignKey from model to parent_model. """ # avoid circular import from django.db.models import ForeignKey opts = model._meta if fk_name: fks_to_parent = [f for f in opts.fields if f.name == fk_name] if len(fks_to_parent) == 1: fk = fks_to_parent[0] parent_list = parent_model._meta.get_parent_list() if ( not isinstance(fk, ForeignKey) or ( # ForeignKey to proxy models. fk.remote_field.model._meta.proxy and fk.remote_field.model._meta.proxy_for_model not in parent_list ) or ( # ForeignKey to concrete models. not fk.remote_field.model._meta.proxy and fk.remote_field.model != parent_model and fk.remote_field.model not in parent_list ) ): raise ValueError( "fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label) ) elif not fks_to_parent: raise ValueError( "'%s' has no field named '%s'." % (model._meta.label, fk_name) ) else: # Try to discover what the ForeignKey from model to parent_model is parent_list = parent_model._meta.get_parent_list() fks_to_parent = [ f for f in opts.fields if isinstance(f, ForeignKey) and ( f.remote_field.model == parent_model or f.remote_field.model in parent_list or ( f.remote_field.model._meta.proxy and f.remote_field.model._meta.proxy_for_model in parent_list ) ) ] if len(fks_to_parent) == 1: fk = fks_to_parent[0] elif not fks_to_parent: if can_fail: return raise ValueError( "'%s' has no ForeignKey to '%s'." % ( model._meta.label, parent_model._meta.label, ) ) else: raise ValueError( "'%s' has more than one ForeignKey to '%s'. You must specify " "a 'fk_name' attribute." % ( model._meta.label, parent_model._meta.label, ) ) return fk def inlineformset_factory( parent_model, model, form=ModelForm, formset=BaseInlineFormSet, fk_name=None, fields=None, exclude=None, extra=3, can_order=False, can_delete=True, max_num=None, formfield_callback=None, widgets=None, validate_max=False, localized_fields=None, labels=None, help_texts=None, error_messages=None, min_num=None, validate_min=False, field_classes=None, absolute_max=None, can_delete_extra=True, renderer=None, edit_only=False, ): """ Return an ``InlineFormSet`` for the given kwargs. ``fk_name`` must be provided if ``model`` has more than one ``ForeignKey`` to ``parent_model``. """ fk = _get_foreign_key(parent_model, model, fk_name=fk_name) # enforce a max_num=1 when the foreign key to the parent model is unique. if fk.unique: max_num = 1 kwargs = { "form": form, "formfield_callback": formfield_callback, "formset": formset, "extra": extra, "can_delete": can_delete, "can_order": can_order, "fields": fields, "exclude": exclude, "min_num": min_num, "max_num": max_num, "widgets": widgets, "validate_min": validate_min, "validate_max": validate_max, "localized_fields": localized_fields, "labels": labels, "help_texts": help_texts, "error_messages": error_messages, "field_classes": field_classes, "absolute_max": absolute_max, "can_delete_extra": can_delete_extra, "renderer": renderer, "edit_only": edit_only, } FormSet = modelformset_factory(model, **kwargs) FormSet.fk = fk return FormSet # Fields ##################################################################### class InlineForeignKeyField(Field): """ A basic integer field that deals with validating the given value to a given parent instance in an inline. """ widget = HiddenInput default_error_messages = { "invalid_choice": _("The inline value did not match the parent instance."), } def __init__(self, parent_instance, *args, pk_field=False, to_field=None, **kwargs): self.parent_instance = parent_instance self.pk_field = pk_field self.to_field = to_field if self.parent_instance is not None: if self.to_field: kwargs["initial"] = getattr(self.parent_instance, self.to_field) else: kwargs["initial"] = self.parent_instance.pk kwargs["required"] = False super().__init__(*args, **kwargs) def clean(self, value): if value in self.empty_values: if self.pk_field: return None # if there is no value act as we did before. return self.parent_instance # ensure the we compare the values as equal types. if self.to_field: orig = getattr(self.parent_instance, self.to_field) else: orig = self.parent_instance.pk if str(value) != str(orig): raise ValidationError( self.error_messages["invalid_choice"], code="invalid_choice" ) return self.parent_instance def has_changed(self, initial, data): return False class ModelChoiceIteratorValue: def __init__(self, value, instance): self.value = value self.instance = instance def __str__(self): return str(self.value) def __hash__(self): return hash(self.value) def __eq__(self, other): if isinstance(other, ModelChoiceIteratorValue): other = other.value return self.value == other class ModelChoiceIterator: def __init__(self, field): self.field = field self.queryset = field.queryset def __iter__(self): if self.field.empty_label is not None: yield ("", self.field.empty_label) queryset = self.queryset # Can't use iterator() when queryset uses prefetch_related() if not queryset._prefetch_related_lookups: queryset = queryset.iterator() for obj in queryset: yield self.choice(obj) def __len__(self): # count() adds a query but uses less memory since the QuerySet results # won't be cached. In most cases, the choices will only be iterated on, # and __len__() won't be called. return self.queryset.count() + (1 if self.field.empty_label is not None else 0) def __bool__(self): return self.field.empty_label is not None or self.queryset.exists() def choice(self, obj): return ( ModelChoiceIteratorValue(self.field.prepare_value(obj), obj), self.field.label_from_instance(obj), ) class ModelChoiceField(ChoiceField): """A ChoiceField whose choices are a model QuerySet.""" # This class is a subclass of ChoiceField for purity, but it doesn't # actually use any of ChoiceField's implementation. default_error_messages = { "invalid_choice": _( "Select a valid choice. That choice is not one of the available choices." ), } iterator = ModelChoiceIterator def __init__( self, queryset, *, empty_label="---------", required=True, widget=None, label=None, initial=None, help_text="", to_field_name=None, limit_choices_to=None, blank=False, **kwargs, ): # Call Field instead of ChoiceField __init__() because we don't need # ChoiceField.__init__(). Field.__init__( self, required=required, widget=widget, label=label, initial=initial, help_text=help_text, **kwargs, ) if (required and initial is not None) or ( isinstance(self.widget, RadioSelect) and not blank ): self.empty_label = None else: self.empty_label = empty_label self.queryset = queryset self.limit_choices_to = limit_choices_to # limit the queryset later. self.to_field_name = to_field_name def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this form field. If it is a callable, invoke it and return the result. """ if callable(self.limit_choices_to): return self.limit_choices_to() return self.limit_choices_to def __deepcopy__(self, memo): result = super(ChoiceField, self).__deepcopy__(memo) # Need to force a new ModelChoiceIterator to be created, bug #11183 if self.queryset is not None: result.queryset = self.queryset.all() return result def _get_queryset(self): return self._queryset def _set_queryset(self, queryset): self._queryset = None if queryset is None else queryset.all() self.widget.choices = self.choices queryset = property(_get_queryset, _set_queryset) # this method will be used to create object labels by the QuerySetIterator. # Override it to customize the label. def label_from_instance(self, obj): """ Convert objects into strings and generate the labels for the choices presented by this object. Subclasses can override this method to customize the display of the choices. """ return str(obj) def _get_choices(self): # If self._choices is set, then somebody must have manually set # the property self.choices. In this case, just return self._choices. if hasattr(self, "_choices"): return self._choices # Otherwise, execute the QuerySet in self.queryset to determine the # choices dynamically. Return a fresh ModelChoiceIterator that has not been # consumed. Note that we're instantiating a new ModelChoiceIterator *each* # time _get_choices() is called (and, thus, each time self.choices is # accessed) so that we can ensure the QuerySet has not been consumed. This # construct might look complicated but it allows for lazy evaluation of # the queryset. return self.iterator(self) choices = property(_get_choices, ChoiceField._set_choices) def prepare_value(self, value): if hasattr(value, "_meta"): if self.to_field_name: return value.serializable_value(self.to_field_name) else: return value.pk return super().prepare_value(value) def to_python(self, value): if value in self.empty_values: return None try: key = self.to_field_name or "pk" if isinstance(value, self.queryset.model): value = getattr(value, key) value = self.queryset.get(**{key: value}) except (ValueError, TypeError, self.queryset.model.DoesNotExist): raise ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": value}, ) return value def validate(self, value): return Field.validate(self, value) def has_changed(self, initial, data): if self.disabled: return False initial_value = initial if initial is not None else "" data_value = data if data is not None else "" return str(self.prepare_value(initial_value)) != str(data_value) class ModelMultipleChoiceField(ModelChoiceField): """A MultipleChoiceField whose choices are a model QuerySet.""" widget = SelectMultiple hidden_widget = MultipleHiddenInput default_error_messages = { "invalid_list": _("Enter a list of values."), "invalid_choice": _( "Select a valid choice. %(value)s is not one of the available choices." ), "invalid_pk_value": _("“%(pk)s” is not a valid value."), } def __init__(self, queryset, **kwargs): super().__init__(queryset, empty_label=None, **kwargs) def to_python(self, value): if not value: return [] return list(self._check_values(value)) def clean(self, value): value = self.prepare_value(value) if self.required and not value: raise ValidationError(self.error_messages["required"], code="required") elif not self.required and not value: return self.queryset.none() if not isinstance(value, (list, tuple)): raise ValidationError( self.error_messages["invalid_list"], code="invalid_list", ) qs = self._check_values(value) # Since this overrides the inherited ModelChoiceField.clean # we run custom validators here self.run_validators(value) return qs def _check_values(self, value): """ Given a list of possible PK values, return a QuerySet of the corresponding objects. Raise a ValidationError if a given value is invalid (not a valid PK, not in the queryset, etc.) """ key = self.to_field_name or "pk" # deduplicate given values to avoid creating many querysets or # requiring the database backend deduplicate efficiently. try: value = frozenset(value) except TypeError: # list of lists isn't hashable, for example raise ValidationError( self.error_messages["invalid_list"], code="invalid_list", ) for pk in value: try: self.queryset.filter(**{key: pk}) except (ValueError, TypeError): raise ValidationError( self.error_messages["invalid_pk_value"], code="invalid_pk_value", params={"pk": pk}, ) qs = self.queryset.filter(**{"%s__in" % key: value}) pks = {str(getattr(o, key)) for o in qs} for val in value: if str(val) not in pks: raise ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": val}, ) return qs def prepare_value(self, value): if ( hasattr(value, "__iter__") and not isinstance(value, str) and not hasattr(value, "_meta") ): prepare_value = super().prepare_value return [prepare_value(v) for v in value] return super().prepare_value(value) def has_changed(self, initial, data): if self.disabled: return False if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True initial_set = {str(value) for value in self.prepare_value(initial)} data_set = {str(value) for value in data} return data_set != initial_set def modelform_defines_fields(form_class): return hasattr(form_class, "_meta") and ( form_class._meta.fields is not None or form_class._meta.exclude is not None )
980ba3f646595e081504a12b427ea32a5d891e5e13d5653446ceebc17e86563d
""" HTML Widget classes """ import copy import datetime import warnings from collections import defaultdict from itertools import chain from django.forms.utils import to_current_timezone from django.templatetags.static import static from django.utils import formats from django.utils.datastructures import OrderedSet from django.utils.dates import MONTHS from django.utils.formats import get_format from django.utils.html import format_html, html_safe from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import mark_safe from django.utils.topological_sort import CyclicDependencyError, stable_topological_sort from django.utils.translation import gettext_lazy as _ from .renderers import get_default_renderer __all__ = ( "Media", "MediaDefiningClass", "Widget", "TextInput", "NumberInput", "EmailInput", "URLInput", "PasswordInput", "HiddenInput", "MultipleHiddenInput", "FileInput", "ClearableFileInput", "Textarea", "DateInput", "DateTimeInput", "TimeInput", "CheckboxInput", "Select", "NullBooleanSelect", "SelectMultiple", "RadioSelect", "CheckboxSelectMultiple", "MultiWidget", "SplitDateTimeWidget", "SplitHiddenDateTimeWidget", "SelectDateWidget", ) MEDIA_TYPES = ("css", "js") class MediaOrderConflictWarning(RuntimeWarning): pass @html_safe class Media: def __init__(self, media=None, css=None, js=None): if media is not None: css = getattr(media, "css", {}) js = getattr(media, "js", []) else: if css is None: css = {} if js is None: js = [] self._css_lists = [css] self._js_lists = [js] def __repr__(self): return "Media(css=%r, js=%r)" % (self._css, self._js) def __str__(self): return self.render() @property def _css(self): css = defaultdict(list) for css_list in self._css_lists: for medium, sublist in css_list.items(): css[medium].append(sublist) return {medium: self.merge(*lists) for medium, lists in css.items()} @property def _js(self): return self.merge(*self._js_lists) def render(self): return mark_safe( "\n".join( chain.from_iterable( getattr(self, "render_" + name)() for name in MEDIA_TYPES ) ) ) def render_js(self): return [ path.__html__() if hasattr(path, "__html__") else format_html('<script src="{}"></script>', self.absolute_path(path)) for path in self._js ] def render_css(self): # To keep rendering order consistent, we can't just iterate over items(). # We need to sort the keys, and iterate over the sorted list. media = sorted(self._css) return chain.from_iterable( [ path.__html__() if hasattr(path, "__html__") else format_html( '<link href="{}" media="{}" rel="stylesheet">', self.absolute_path(path), medium, ) for path in self._css[medium] ] for medium in media ) def absolute_path(self, path): """ Given a relative or absolute path to a static asset, return an absolute path. An absolute path will be returned unchanged while a relative path will be passed to django.templatetags.static.static(). """ if path.startswith(("http://", "https://", "/")): return path return static(path) def __getitem__(self, name): """Return a Media object that only contains media of the given type.""" if name in MEDIA_TYPES: return Media(**{str(name): getattr(self, "_" + name)}) raise KeyError('Unknown media type "%s"' % name) @staticmethod def merge(*lists): """ Merge lists while trying to keep the relative order of the elements. Warn if the lists have the same elements in a different relative order. For static assets it can be important to have them included in the DOM in a certain order. In JavaScript you may not be able to reference a global or in CSS you might want to override a style. """ dependency_graph = defaultdict(set) all_items = OrderedSet() for list_ in filter(None, lists): head = list_[0] # The first items depend on nothing but have to be part of the # dependency graph to be included in the result. dependency_graph.setdefault(head, set()) for item in list_: all_items.add(item) # No self dependencies if head != item: dependency_graph[item].add(head) head = item try: return stable_topological_sort(all_items, dependency_graph) except CyclicDependencyError: warnings.warn( "Detected duplicate Media files in an opposite order: {}".format( ", ".join(repr(list_) for list_ in lists) ), MediaOrderConflictWarning, ) return list(all_items) def __add__(self, other): combined = Media() combined._css_lists = self._css_lists[:] combined._js_lists = self._js_lists[:] for item in other._css_lists: if item and item not in self._css_lists: combined._css_lists.append(item) for item in other._js_lists: if item and item not in self._js_lists: combined._js_lists.append(item) return combined def media_property(cls): def _media(self): # Get the media property of the superclass, if it exists sup_cls = super(cls, self) try: base = sup_cls.media except AttributeError: base = Media() # Get the media definition for this class definition = getattr(cls, "Media", None) if definition: extend = getattr(definition, "extend", True) if extend: if extend is True: m = base else: m = Media() for medium in extend: m += base[medium] return m + Media(definition) return Media(definition) return base return property(_media) class MediaDefiningClass(type): """ Metaclass for classes that can have media definitions. """ def __new__(mcs, name, bases, attrs): new_class = super().__new__(mcs, name, bases, attrs) if "media" not in attrs: new_class.media = media_property(new_class) return new_class class Widget(metaclass=MediaDefiningClass): needs_multipart_form = False # Determines does this widget need multipart form is_localized = False is_required = False supports_microseconds = True use_fieldset = False def __init__(self, attrs=None): self.attrs = {} if attrs is None else attrs.copy() def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() memo[id(self)] = obj return obj @property def is_hidden(self): return self.input_type == "hidden" if hasattr(self, "input_type") else False def subwidgets(self, name, value, attrs=None): context = self.get_context(name, value, attrs) yield context["widget"] def format_value(self, value): """ Return a value as it should appear when rendered in a template. """ if value == "" or value is None: return None if self.is_localized: return formats.localize_input(value) return str(value) def get_context(self, name, value, attrs): return { "widget": { "name": name, "is_hidden": self.is_hidden, "required": self.is_required, "value": self.format_value(value), "attrs": self.build_attrs(self.attrs, attrs), "template_name": self.template_name, }, } def render(self, name, value, attrs=None, renderer=None): """Render the widget as an HTML string.""" context = self.get_context(name, value, attrs) return self._render(self.template_name, context, renderer) def _render(self, template_name, context, renderer=None): if renderer is None: renderer = get_default_renderer() return mark_safe(renderer.render(template_name, context)) def build_attrs(self, base_attrs, extra_attrs=None): """Build an attribute dictionary.""" return {**base_attrs, **(extra_attrs or {})} def value_from_datadict(self, data, files, name): """ Given a dictionary of data and this widget's name, return the value of this widget or None if it's not provided. """ return data.get(name) def value_omitted_from_data(self, data, files, name): return name not in data def id_for_label(self, id_): """ Return the HTML ID attribute of this Widget for use by a <label>, given the ID of the field. Return an empty string if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags. """ return id_ def use_required_attribute(self, initial): return not self.is_hidden class Input(Widget): """ Base class for all <input> widgets. """ input_type = None # Subclasses must define this. template_name = "django/forms/widgets/input.html" def __init__(self, attrs=None): if attrs is not None: attrs = attrs.copy() self.input_type = attrs.pop("type", self.input_type) super().__init__(attrs) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context["widget"]["type"] = self.input_type return context class TextInput(Input): input_type = "text" template_name = "django/forms/widgets/text.html" class NumberInput(Input): input_type = "number" template_name = "django/forms/widgets/number.html" class EmailInput(Input): input_type = "email" template_name = "django/forms/widgets/email.html" class URLInput(Input): input_type = "url" template_name = "django/forms/widgets/url.html" class PasswordInput(Input): input_type = "password" template_name = "django/forms/widgets/password.html" def __init__(self, attrs=None, render_value=False): super().__init__(attrs) self.render_value = render_value def get_context(self, name, value, attrs): if not self.render_value: value = None return super().get_context(name, value, attrs) class HiddenInput(Input): input_type = "hidden" template_name = "django/forms/widgets/hidden.html" class MultipleHiddenInput(HiddenInput): """ Handle <input type="hidden"> for fields that have a list of values. """ template_name = "django/forms/widgets/multiple_hidden.html" def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) final_attrs = context["widget"]["attrs"] id_ = context["widget"]["attrs"].get("id") subwidgets = [] for index, value_ in enumerate(context["widget"]["value"]): widget_attrs = final_attrs.copy() if id_: # An ID attribute was given. Add a numeric index as a suffix # so that the inputs don't all have the same ID attribute. widget_attrs["id"] = "%s_%s" % (id_, index) widget = HiddenInput() widget.is_required = self.is_required subwidgets.append(widget.get_context(name, value_, widget_attrs)["widget"]) context["widget"]["subwidgets"] = subwidgets return context def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def format_value(self, value): return [] if value is None else value class FileInput(Input): input_type = "file" needs_multipart_form = True template_name = "django/forms/widgets/file.html" def format_value(self, value): """File input never renders a value.""" return def value_from_datadict(self, data, files, name): "File widgets take data from FILES, not POST" return files.get(name) def value_omitted_from_data(self, data, files, name): return name not in files def use_required_attribute(self, initial): return super().use_required_attribute(initial) and not initial FILE_INPUT_CONTRADICTION = object() class ClearableFileInput(FileInput): clear_checkbox_label = _("Clear") initial_text = _("Currently") input_text = _("Change") template_name = "django/forms/widgets/clearable_file_input.html" def clear_checkbox_name(self, name): """ Given the name of the file input, return the name of the clear checkbox input. """ return name + "-clear" def clear_checkbox_id(self, name): """ Given the name of the clear checkbox input, return the HTML id for it. """ return name + "_id" def is_initial(self, value): """ Return whether value is considered to be initial value. """ return bool(value and getattr(value, "url", False)) def format_value(self, value): """ Return the file object if it has a defined url attribute. """ if self.is_initial(value): return value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) checkbox_name = self.clear_checkbox_name(name) checkbox_id = self.clear_checkbox_id(checkbox_name) context["widget"].update( { "checkbox_name": checkbox_name, "checkbox_id": checkbox_id, "is_initial": self.is_initial(value), "input_text": self.input_text, "initial_text": self.initial_text, "clear_checkbox_label": self.clear_checkbox_label, } ) context["widget"]["attrs"].setdefault("disabled", False) return context def value_from_datadict(self, data, files, name): upload = super().value_from_datadict(data, files, name) if not self.is_required and CheckboxInput().value_from_datadict( data, files, self.clear_checkbox_name(name) ): if upload: # If the user contradicts themselves (uploads a new file AND # checks the "clear" checkbox), we return a unique marker # object that FileField will turn into a ValidationError. return FILE_INPUT_CONTRADICTION # False signals to clear any existing value, as opposed to just None return False return upload def value_omitted_from_data(self, data, files, name): return ( super().value_omitted_from_data(data, files, name) and self.clear_checkbox_name(name) not in data ) class Textarea(Widget): template_name = "django/forms/widgets/textarea.html" def __init__(self, attrs=None): # Use slightly better defaults than HTML's 20x2 box default_attrs = {"cols": "40", "rows": "10"} if attrs: default_attrs.update(attrs) super().__init__(default_attrs) class DateTimeBaseInput(TextInput): format_key = "" supports_microseconds = False def __init__(self, attrs=None, format=None): super().__init__(attrs) self.format = format or None def format_value(self, value): return formats.localize_input( value, self.format or formats.get_format(self.format_key)[0] ) class DateInput(DateTimeBaseInput): format_key = "DATE_INPUT_FORMATS" template_name = "django/forms/widgets/date.html" class DateTimeInput(DateTimeBaseInput): format_key = "DATETIME_INPUT_FORMATS" template_name = "django/forms/widgets/datetime.html" class TimeInput(DateTimeBaseInput): format_key = "TIME_INPUT_FORMATS" template_name = "django/forms/widgets/time.html" # Defined at module level so that CheckboxInput is picklable (#17976) def boolean_check(v): return not (v is False or v is None or v == "") class CheckboxInput(Input): input_type = "checkbox" template_name = "django/forms/widgets/checkbox.html" def __init__(self, attrs=None, check_test=None): super().__init__(attrs) # check_test is a callable that takes a value and returns True # if the checkbox should be checked for that value. self.check_test = boolean_check if check_test is None else check_test def format_value(self, value): """Only return the 'value' attribute if value isn't empty.""" if value is True or value is False or value is None or value == "": return return str(value) def get_context(self, name, value, attrs): if self.check_test(value): attrs = {**(attrs or {}), "checked": True} return super().get_context(name, value, attrs) def value_from_datadict(self, data, files, name): if name not in data: # A missing value means False because HTML form submission does not # send results for unselected checkboxes. return False value = data.get(name) # Translate true and false strings to boolean values. values = {"true": True, "false": False} if isinstance(value, str): value = values.get(value.lower(), value) return bool(value) def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False class ChoiceWidget(Widget): allow_multiple_selected = False input_type = None template_name = None option_template_name = None add_id_index = True checked_attribute = {"checked": True} option_inherits_attrs = True def __init__(self, attrs=None, choices=()): super().__init__(attrs) # choices can be any iterable, but we may need to render this widget # multiple times. Thus, collapse it into a list so it can be consumed # more than once. self.choices = list(choices) def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() obj.choices = copy.copy(self.choices) memo[id(self)] = obj return obj def subwidgets(self, name, value, attrs=None): """ Yield all "subwidgets" of this widget. Used to enable iterating options from a BoundField for choice widgets. """ value = self.format_value(value) yield from self.options(name, value, attrs) def options(self, name, value, attrs=None): """Yield a flat list of options for this widget.""" for group in self.optgroups(name, value, attrs): yield from group[1] def optgroups(self, name, value, attrs=None): """Return a list of optgroups for this widget.""" groups = [] has_selected = False for index, (option_value, option_label) in enumerate(self.choices): if option_value is None: option_value = "" subgroup = [] if isinstance(option_label, (list, tuple)): group_name = option_value subindex = 0 choices = option_label else: group_name = None subindex = None choices = [(option_value, option_label)] groups.append((group_name, subgroup, index)) for subvalue, sublabel in choices: selected = (not has_selected or self.allow_multiple_selected) and str( subvalue ) in value has_selected |= selected subgroup.append( self.create_option( name, subvalue, sublabel, selected, index, subindex=subindex, attrs=attrs, ) ) if subindex is not None: subindex += 1 return groups def create_option( self, name, value, label, selected, index, subindex=None, attrs=None ): index = str(index) if subindex is None else "%s_%s" % (index, subindex) option_attrs = ( self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {} ) if selected: option_attrs.update(self.checked_attribute) if "id" in option_attrs: option_attrs["id"] = self.id_for_label(option_attrs["id"], index) return { "name": name, "value": value, "label": label, "selected": selected, "index": index, "attrs": option_attrs, "type": self.input_type, "template_name": self.option_template_name, "wrap_label": True, } def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context["widget"]["optgroups"] = self.optgroups( name, context["widget"]["value"], attrs ) return context def id_for_label(self, id_, index="0"): """ Use an incremented id for each option where the main widget references the zero index. """ if id_ and self.add_id_index: id_ = "%s_%s" % (id_, index) return id_ def value_from_datadict(self, data, files, name): getter = data.get if self.allow_multiple_selected: try: getter = data.getlist except AttributeError: pass return getter(name) def format_value(self, value): """Return selected values as a list.""" if value is None and self.allow_multiple_selected: return [] if not isinstance(value, (tuple, list)): value = [value] return [str(v) if v is not None else "" for v in value] class Select(ChoiceWidget): input_type = "select" template_name = "django/forms/widgets/select.html" option_template_name = "django/forms/widgets/select_option.html" add_id_index = False checked_attribute = {"selected": True} option_inherits_attrs = False def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.allow_multiple_selected: context["widget"]["attrs"]["multiple"] = True return context @staticmethod def _choice_has_empty_value(choice): """Return True if the choice's value is empty string or None.""" value, _ = choice return value is None or value == "" def use_required_attribute(self, initial): """ Don't render 'required' if the first <option> has a value, as that's invalid HTML. """ use_required_attribute = super().use_required_attribute(initial) # 'required' is always okay for <select multiple>. if self.allow_multiple_selected: return use_required_attribute first_choice = next(iter(self.choices), None) return ( use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice) ) class NullBooleanSelect(Select): """ A Select Widget intended to be used with NullBooleanField. """ def __init__(self, attrs=None): choices = ( ("unknown", _("Unknown")), ("true", _("Yes")), ("false", _("No")), ) super().__init__(attrs, choices) def format_value(self, value): try: return { True: "true", False: "false", "true": "true", "false": "false", # For backwards compatibility with Django < 2.2. "2": "true", "3": "false", }[value] except KeyError: return "unknown" def value_from_datadict(self, data, files, name): value = data.get(name) return { True: True, "True": True, "False": False, False: False, "true": True, "false": False, # For backwards compatibility with Django < 2.2. "2": True, "3": False, }.get(value) class SelectMultiple(Select): allow_multiple_selected = True def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def value_omitted_from_data(self, data, files, name): # An unselected <select multiple> doesn't appear in POST data, so it's # never known if the value is actually omitted. return False class RadioSelect(ChoiceWidget): input_type = "radio" template_name = "django/forms/widgets/radio.html" option_template_name = "django/forms/widgets/radio_option.html" use_fieldset = True def id_for_label(self, id_, index=None): """ Don't include for="field_0" in <label> to improve accessibility when using a screen reader, in addition clicking such a label would toggle the first input. """ if index is None: return "" return super().id_for_label(id_, index) class CheckboxSelectMultiple(RadioSelect): allow_multiple_selected = True input_type = "checkbox" template_name = "django/forms/widgets/checkbox_select.html" option_template_name = "django/forms/widgets/checkbox_option.html" def use_required_attribute(self, initial): # Don't use the 'required' attribute because browser validation would # require all checkboxes to be checked instead of at least one. return False def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False class MultiWidget(Widget): """ A widget that is composed of multiple widgets. In addition to the values added by Widget.get_context(), this widget adds a list of subwidgets to the context as widget['subwidgets']. These can be looped over and rendered like normal widgets. You'll probably want to use this class with MultiValueField. """ template_name = "django/forms/widgets/multiwidget.html" use_fieldset = True def __init__(self, widgets, attrs=None): if isinstance(widgets, dict): self.widgets_names = [("_%s" % name) if name else "" for name in widgets] widgets = widgets.values() else: self.widgets_names = ["_%s" % i for i in range(len(widgets))] self.widgets = [w() if isinstance(w, type) else w for w in widgets] super().__init__(attrs) @property def is_hidden(self): return all(w.is_hidden for w in self.widgets) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.is_localized: for widget in self.widgets: widget.is_localized = self.is_localized # value is a list/tuple of values, each corresponding to a widget # in self.widgets. if not isinstance(value, (list, tuple)): value = self.decompress(value) final_attrs = context["widget"]["attrs"] input_type = final_attrs.pop("type", None) id_ = final_attrs.get("id") subwidgets = [] for i, (widget_name, widget) in enumerate( zip(self.widgets_names, self.widgets) ): if input_type is not None: widget.input_type = input_type widget_name = name + widget_name try: widget_value = value[i] except IndexError: widget_value = None if id_: widget_attrs = final_attrs.copy() widget_attrs["id"] = "%s_%s" % (id_, i) else: widget_attrs = final_attrs subwidgets.append( widget.get_context(widget_name, widget_value, widget_attrs)["widget"] ) context["widget"]["subwidgets"] = subwidgets return context def id_for_label(self, id_): return "" def value_from_datadict(self, data, files, name): return [ widget.value_from_datadict(data, files, name + widget_name) for widget_name, widget in zip(self.widgets_names, self.widgets) ] def value_omitted_from_data(self, data, files, name): return all( widget.value_omitted_from_data(data, files, name + widget_name) for widget_name, widget in zip(self.widgets_names, self.widgets) ) def decompress(self, value): """ Return a list of decompressed values for the given compressed value. The given value can be assumed to be valid, but not necessarily non-empty. """ raise NotImplementedError("Subclasses must implement this method.") def _get_media(self): """ Media for a multiwidget is the combination of all media of the subwidgets. """ media = Media() for w in self.widgets: media += w.media return media media = property(_get_media) def __deepcopy__(self, memo): obj = super().__deepcopy__(memo) obj.widgets = copy.deepcopy(self.widgets) return obj @property def needs_multipart_form(self): return any(w.needs_multipart_form for w in self.widgets) class SplitDateTimeWidget(MultiWidget): """ A widget that splits datetime input into two <input type="text"> boxes. """ supports_microseconds = False template_name = "django/forms/widgets/splitdatetime.html" def __init__( self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None, ): widgets = ( DateInput( attrs=attrs if date_attrs is None else date_attrs, format=date_format, ), TimeInput( attrs=attrs if time_attrs is None else time_attrs, format=time_format, ), ) super().__init__(widgets) def decompress(self, value): if value: value = to_current_timezone(value) return [value.date(), value.time()] return [None, None] class SplitHiddenDateTimeWidget(SplitDateTimeWidget): """ A widget that splits datetime input into two <input type="hidden"> inputs. """ template_name = "django/forms/widgets/splithiddendatetime.html" def __init__( self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None, ): super().__init__(attrs, date_format, time_format, date_attrs, time_attrs) for widget in self.widgets: widget.input_type = "hidden" class SelectDateWidget(Widget): """ A widget that splits date input into three <select> boxes. This also serves as an example of a Widget that has more than one HTML element and hence implements value_from_datadict. """ none_value = ("", "---") month_field = "%s_month" day_field = "%s_day" year_field = "%s_year" template_name = "django/forms/widgets/select_date.html" input_type = "select" select_widget = Select date_re = _lazy_re_compile(r"(\d{4}|0)-(\d\d?)-(\d\d?)$") use_fieldset = True def __init__(self, attrs=None, years=None, months=None, empty_label=None): self.attrs = attrs or {} # Optional list or tuple of years to use in the "year" select box. if years: self.years = years else: this_year = datetime.date.today().year self.years = range(this_year, this_year + 10) # Optional dict of months to use in the "month" select box. if months: self.months = months else: self.months = MONTHS # Optional string, list, or tuple to use as empty_label. if isinstance(empty_label, (list, tuple)): if not len(empty_label) == 3: raise ValueError("empty_label list/tuple must have 3 elements.") self.year_none_value = ("", empty_label[0]) self.month_none_value = ("", empty_label[1]) self.day_none_value = ("", empty_label[2]) else: if empty_label is not None: self.none_value = ("", empty_label) self.year_none_value = self.none_value self.month_none_value = self.none_value self.day_none_value = self.none_value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) date_context = {} year_choices = [(i, str(i)) for i in self.years] if not self.is_required: year_choices.insert(0, self.year_none_value) year_name = self.year_field % name date_context["year"] = self.select_widget( attrs, choices=year_choices ).get_context( name=year_name, value=context["widget"]["value"]["year"], attrs={**context["widget"]["attrs"], "id": "id_%s" % year_name}, ) month_choices = list(self.months.items()) if not self.is_required: month_choices.insert(0, self.month_none_value) month_name = self.month_field % name date_context["month"] = self.select_widget( attrs, choices=month_choices ).get_context( name=month_name, value=context["widget"]["value"]["month"], attrs={**context["widget"]["attrs"], "id": "id_%s" % month_name}, ) day_choices = [(i, i) for i in range(1, 32)] if not self.is_required: day_choices.insert(0, self.day_none_value) day_name = self.day_field % name date_context["day"] = self.select_widget( attrs, choices=day_choices, ).get_context( name=day_name, value=context["widget"]["value"]["day"], attrs={**context["widget"]["attrs"], "id": "id_%s" % day_name}, ) subwidgets = [] for field in self._parse_date_fmt(): subwidgets.append(date_context[field]["widget"]) context["widget"]["subwidgets"] = subwidgets return context def format_value(self, value): """ Return a dict containing the year, month, and day of the current value. Use dict instead of a datetime to allow invalid dates such as February 31 to display correctly. """ year, month, day = None, None, None if isinstance(value, (datetime.date, datetime.datetime)): year, month, day = value.year, value.month, value.day elif isinstance(value, str): match = self.date_re.match(value) if match: # Convert any zeros in the date to empty strings to match the # empty option value. year, month, day = [int(val) or "" for val in match.groups()] else: input_format = get_format("DATE_INPUT_FORMATS")[0] try: d = datetime.datetime.strptime(value, input_format) except ValueError: pass else: year, month, day = d.year, d.month, d.day return {"year": year, "month": month, "day": day} @staticmethod def _parse_date_fmt(): fmt = get_format("DATE_FORMAT") escaped = False for char in fmt: if escaped: escaped = False elif char == "\\": escaped = True elif char in "Yy": yield "year" elif char in "bEFMmNn": yield "month" elif char in "dj": yield "day" def id_for_label(self, id_): for first_select in self._parse_date_fmt(): return "%s_%s" % (id_, first_select) return "%s_month" % id_ def value_from_datadict(self, data, files, name): y = data.get(self.year_field % name) m = data.get(self.month_field % name) d = data.get(self.day_field % name) if y == m == d == "": return None if y is not None and m is not None and d is not None: input_format = get_format("DATE_INPUT_FORMATS")[0] input_format = formats.sanitize_strftime_format(input_format) try: date_value = datetime.date(int(y), int(m), int(d)) except ValueError: # Return pseudo-ISO dates with zeros for any unselected values, # e.g. '2017-0-23'. return "%s-%s-%s" % (y or 0, m or 0, d or 0) return date_value.strftime(input_format) return data.get(name) def value_omitted_from_data(self, data, files, name): return not any( ("{}_{}".format(name, interval) in data) for interval in ("year", "month", "day") )
74aa8e008f389378666896367f7128c701f77e4f9a8c847bfef95c7829faecd4
""" Form classes """ import copy import datetime import warnings from django.core.exceptions import NON_FIELD_ERRORS, ValidationError from django.forms.fields import Field, FileField from django.forms.utils import ErrorDict, ErrorList, RenderableFormMixin from django.forms.widgets import Media, MediaDefiningClass from django.utils.datastructures import MultiValueDict from django.utils.deprecation import RemovedInDjango50Warning from django.utils.functional import cached_property from django.utils.html import conditional_escape from django.utils.safestring import SafeString, mark_safe from django.utils.translation import gettext as _ from .renderers import get_default_renderer __all__ = ("BaseForm", "Form") class DeclarativeFieldsMetaclass(MediaDefiningClass): """Collect Fields declared on the base classes.""" def __new__(mcs, name, bases, attrs): # Collect fields from current class and remove them from attrs. attrs["declared_fields"] = { key: attrs.pop(key) for key, value in list(attrs.items()) if isinstance(value, Field) } new_class = super().__new__(mcs, name, bases, attrs) # Walk through the MRO. declared_fields = {} for base in reversed(new_class.__mro__): # Collect fields from base class. if hasattr(base, "declared_fields"): declared_fields.update(base.declared_fields) # Field shadowing. for attr, value in base.__dict__.items(): if value is None and attr in declared_fields: declared_fields.pop(attr) new_class.base_fields = declared_fields new_class.declared_fields = declared_fields return new_class class BaseForm(RenderableFormMixin): """ The main implementation of all the Form logic. Note that this class is different than Form. See the comments by the Form class for more info. Any improvements to the form API should be made to this class, not to the Form class. """ default_renderer = None field_order = None prefix = None use_required_attribute = True template_name_div = "django/forms/div.html" template_name_p = "django/forms/p.html" template_name_table = "django/forms/table.html" template_name_ul = "django/forms/ul.html" template_name_label = "django/forms/label.html" def __init__( self, data=None, files=None, auto_id="id_%s", prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, field_order=None, use_required_attribute=None, renderer=None, ): self.is_bound = data is not None or files is not None self.data = MultiValueDict() if data is None else data self.files = MultiValueDict() if files is None else files self.auto_id = auto_id if prefix is not None: self.prefix = prefix self.initial = initial or {} self.error_class = error_class # Translators: This is the default suffix added to form field labels self.label_suffix = label_suffix if label_suffix is not None else _(":") self.empty_permitted = empty_permitted self._errors = None # Stores the errors after clean() has been called. # The base_fields class attribute is the *class-wide* definition of # fields. Because a particular *instance* of the class might want to # alter self.fields, we create self.fields here by copying base_fields. # Instances should always modify self.fields; they should not modify # self.base_fields. self.fields = copy.deepcopy(self.base_fields) self._bound_fields_cache = {} self.order_fields(self.field_order if field_order is None else field_order) if use_required_attribute is not None: self.use_required_attribute = use_required_attribute if self.empty_permitted and self.use_required_attribute: raise ValueError( "The empty_permitted and use_required_attribute arguments may " "not both be True." ) # Initialize form renderer. Use a global default if not specified # either as an argument or as self.default_renderer. if renderer is None: if self.default_renderer is None: renderer = get_default_renderer() else: renderer = self.default_renderer if isinstance(self.default_renderer, type): renderer = renderer() self.renderer = renderer def order_fields(self, field_order): """ Rearrange the fields according to field_order. field_order is a list of field names specifying the order. Append fields not included in the list in the default order for backward compatibility with subclasses not overriding field_order. If field_order is None, keep all fields in the order defined in the class. Ignore unknown fields in field_order to allow disabling fields in form subclasses without redefining ordering. """ if field_order is None: return fields = {} for key in field_order: try: fields[key] = self.fields.pop(key) except KeyError: # ignore unknown fields pass fields.update(self.fields) # add remaining fields in original order self.fields = fields def __repr__(self): if self._errors is None: is_valid = "Unknown" else: is_valid = self.is_bound and not self._errors return "<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>" % { "cls": self.__class__.__name__, "bound": self.is_bound, "valid": is_valid, "fields": ";".join(self.fields), } def _bound_items(self): """Yield (name, bf) pairs, where bf is a BoundField object.""" for name in self.fields: yield name, self[name] def __iter__(self): """Yield the form's fields as BoundField objects.""" for name in self.fields: yield self[name] def __getitem__(self, name): """Return a BoundField with the given name.""" try: return self._bound_fields_cache[name] except KeyError: pass try: field = self.fields[name] except KeyError: raise KeyError( "Key '%s' not found in '%s'. Choices are: %s." % ( name, self.__class__.__name__, ", ".join(sorted(self.fields)), ) ) bound_field = field.get_bound_field(self, name) self._bound_fields_cache[name] = bound_field return bound_field @property def errors(self): """Return an ErrorDict for the data provided for the form.""" if self._errors is None: self.full_clean() return self._errors def is_valid(self): """Return True if the form has no errors, or False otherwise.""" return self.is_bound and not self.errors def add_prefix(self, field_name): """ Return the field name with a prefix appended, if this Form has a prefix set. Subclasses may wish to override. """ return "%s-%s" % (self.prefix, field_name) if self.prefix else field_name def add_initial_prefix(self, field_name): """Add an 'initial' prefix for checking dynamic initial values.""" return "initial-%s" % self.add_prefix(field_name) def _widget_data_value(self, widget, html_name): # value_from_datadict() gets the data from the data dictionaries. # Each widget type knows how to retrieve its own data, because some # widgets split data over several HTML fields. return widget.value_from_datadict(self.data, self.files, html_name) def _html_output( self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row ): "Output HTML. Used by as_table(), as_ul(), as_p()." warnings.warn( "django.forms.BaseForm._html_output() is deprecated. " "Please use .render() and .get_context() instead.", RemovedInDjango50Warning, stacklevel=2, ) # Errors that should be displayed above all fields. top_errors = self.non_field_errors().copy() output, hidden_fields = [], [] for name, bf in self._bound_items(): field = bf.field html_class_attr = "" bf_errors = self.error_class(bf.errors) if bf.is_hidden: if bf_errors: top_errors.extend( [ _("(Hidden field %(name)s) %(error)s") % {"name": name, "error": str(e)} for e in bf_errors ] ) hidden_fields.append(str(bf)) else: # Create a 'class="..."' attribute if the row should have any # CSS classes applied. css_classes = bf.css_classes() if css_classes: html_class_attr = ' class="%s"' % css_classes if errors_on_separate_row and bf_errors: output.append(error_row % str(bf_errors)) if bf.label: label = conditional_escape(bf.label) label = bf.label_tag(label) or "" else: label = "" if field.help_text: help_text = help_text_html % field.help_text else: help_text = "" output.append( normal_row % { "errors": bf_errors, "label": label, "field": bf, "help_text": help_text, "html_class_attr": html_class_attr, "css_classes": css_classes, "field_name": bf.html_name, } ) if top_errors: output.insert(0, error_row % top_errors) if hidden_fields: # Insert any hidden fields in the last row. str_hidden = "".join(hidden_fields) if output: last_row = output[-1] # Chop off the trailing row_ender (e.g. '</td></tr>') and # insert the hidden fields. if not last_row.endswith(row_ender): # This can happen in the as_p() case (and possibly others # that users write): if there are only top errors, we may # not be able to conscript the last row for our purposes, # so insert a new, empty row. last_row = normal_row % { "errors": "", "label": "", "field": "", "help_text": "", "html_class_attr": html_class_attr, "css_classes": "", "field_name": "", } output.append(last_row) output[-1] = last_row[: -len(row_ender)] + str_hidden + row_ender else: # If there aren't any rows in the output, just append the # hidden fields. output.append(str_hidden) return mark_safe("\n".join(output)) @property def template_name(self): return self.renderer.form_template_name def get_context(self): fields = [] hidden_fields = [] top_errors = self.non_field_errors().copy() for name, bf in self._bound_items(): bf_errors = self.error_class(bf.errors, renderer=self.renderer) if bf.is_hidden: if bf_errors: top_errors += [ _("(Hidden field %(name)s) %(error)s") % {"name": name, "error": str(e)} for e in bf_errors ] hidden_fields.append(bf) else: errors_str = str(bf_errors) # RemovedInDjango50Warning. if not isinstance(errors_str, SafeString): warnings.warn( f"Returning a plain string from " f"{self.error_class.__name__} is deprecated. Please " f"customize via the template system instead.", RemovedInDjango50Warning, ) errors_str = mark_safe(errors_str) fields.append((bf, errors_str)) return { "form": self, "fields": fields, "hidden_fields": hidden_fields, "errors": top_errors, } def non_field_errors(self): """ Return an ErrorList of errors that aren't associated with a particular field -- i.e., from Form.clean(). Return an empty ErrorList if there are none. """ return self.errors.get( NON_FIELD_ERRORS, self.error_class(error_class="nonfield", renderer=self.renderer), ) def add_error(self, field, error): """ Update the content of `self._errors`. The `field` argument is the name of the field to which the errors should be added. If it's None, treat the errors as NON_FIELD_ERRORS. The `error` argument can be a single error, a list of errors, or a dictionary that maps field names to lists of errors. An "error" can be either a simple string or an instance of ValidationError with its message attribute set and a "list or dictionary" can be an actual `list` or `dict` or an instance of ValidationError with its `error_list` or `error_dict` attribute set. If `error` is a dictionary, the `field` argument *must* be None and errors will be added to the fields that correspond to the keys of the dictionary. """ if not isinstance(error, ValidationError): # Normalize to ValidationError and let its constructor # do the hard work of making sense of the input. error = ValidationError(error) if hasattr(error, "error_dict"): if field is not None: raise TypeError( "The argument `field` must be `None` when the `error` " "argument contains errors for multiple fields." ) else: error = error.error_dict else: error = {field or NON_FIELD_ERRORS: error.error_list} for field, error_list in error.items(): if field not in self.errors: if field != NON_FIELD_ERRORS and field not in self.fields: raise ValueError( "'%s' has no field named '%s'." % (self.__class__.__name__, field) ) if field == NON_FIELD_ERRORS: self._errors[field] = self.error_class( error_class="nonfield", renderer=self.renderer ) else: self._errors[field] = self.error_class(renderer=self.renderer) self._errors[field].extend(error_list) if field in self.cleaned_data: del self.cleaned_data[field] def has_error(self, field, code=None): return field in self.errors and ( code is None or any(error.code == code for error in self.errors.as_data()[field]) ) def full_clean(self): """ Clean all of self.data and populate self._errors and self.cleaned_data. """ self._errors = ErrorDict() if not self.is_bound: # Stop further processing. return self.cleaned_data = {} # If the form is permitted to be empty, and none of the form data has # changed from the initial data, short circuit any validation. if self.empty_permitted and not self.has_changed(): return self._clean_fields() self._clean_form() self._post_clean() def _clean_fields(self): for name, bf in self._bound_items(): field = bf.field value = bf.initial if field.disabled else bf.data try: if isinstance(field, FileField): value = field.clean(value, bf.initial) else: value = field.clean(value) self.cleaned_data[name] = value if hasattr(self, "clean_%s" % name): value = getattr(self, "clean_%s" % name)() self.cleaned_data[name] = value except ValidationError as e: self.add_error(name, e) def _clean_form(self): try: cleaned_data = self.clean() except ValidationError as e: self.add_error(None, e) else: if cleaned_data is not None: self.cleaned_data = cleaned_data def _post_clean(self): """ An internal hook for performing additional cleaning after form cleaning is complete. Used for model validation in model forms. """ pass def clean(self): """ Hook for doing any extra form-wide cleaning after Field.clean() has been called on every field. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field named '__all__'. """ return self.cleaned_data def has_changed(self): """Return True if data differs from initial.""" return bool(self.changed_data) @cached_property def changed_data(self): return [name for name, bf in self._bound_items() if bf._has_changed()] @property def media(self): """Return all media required to render the widgets on this form.""" media = Media() for field in self.fields.values(): media += field.widget.media return media def is_multipart(self): """ Return True if the form needs to be multipart-encoded, i.e. it has FileInput, or False otherwise. """ return any(field.widget.needs_multipart_form for field in self.fields.values()) def hidden_fields(self): """ Return a list of all the BoundField objects that are hidden fields. Useful for manual form layout in templates. """ return [field for field in self if field.is_hidden] def visible_fields(self): """ Return a list of BoundField objects that aren't hidden fields. The opposite of the hidden_fields() method. """ return [field for field in self if not field.is_hidden] def get_initial_for_field(self, field, field_name): """ Return initial data for field on form. Use initial data from the form or the field, in that order. Evaluate callable values. """ value = self.initial.get(field_name, field.initial) if callable(value): value = value() # If this is an auto-generated default date, nix the microseconds # for standardized handling. See #22502. if ( isinstance(value, (datetime.datetime, datetime.time)) and not field.widget.supports_microseconds ): value = value.replace(microsecond=0) return value class Form(BaseForm, metaclass=DeclarativeFieldsMetaclass): "A collection of Fields, plus their associated data." # This is a separate class from BaseForm in order to abstract the way # self.fields is specified. This class (Form) is the one that does the # fancy metaclass stuff purely for the semantic sugar -- it allows one # to define a form using declarative syntax. # BaseForm itself has no way of designating self.fields.
bc5632eea15098e8eebeb2f5be275ac14b31b174d5e5a5f77a9d9c12413bce39
import datetime import io import json import mimetypes import os import re import sys import time from email.header import Header from http.client import responses from urllib.parse import quote, urlparse from django.conf import settings from django.core import signals, signing from django.core.exceptions import DisallowedRedirect from django.core.serializers.json import DjangoJSONEncoder from django.http.cookie import SimpleCookie from django.utils import timezone from django.utils.datastructures import CaseInsensitiveMapping from django.utils.encoding import iri_to_uri from django.utils.http import http_date from django.utils.regex_helper import _lazy_re_compile _charset_from_content_type_re = _lazy_re_compile( r";\s*charset=(?P<charset>[^\s;]+)", re.I ) class ResponseHeaders(CaseInsensitiveMapping): def __init__(self, data): """ Populate the initial data using __setitem__ to ensure values are correctly encoded. """ self._store = {} if data: for header, value in self._unpack_items(data): self[header] = value def _convert_to_charset(self, value, charset, mime_encode=False): """ Convert headers key/value to ascii/latin-1 native strings. `charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and `value` can't be represented in the given charset, apply MIME-encoding. """ try: if isinstance(value, str): # Ensure string is valid in given charset value.encode(charset) elif isinstance(value, bytes): # Convert bytestring using given charset value = value.decode(charset) else: value = str(value) # Ensure string is valid in given charset. value.encode(charset) if "\n" in value or "\r" in value: raise BadHeaderError( f"Header values can't contain newlines (got {value!r})" ) except UnicodeError as e: # Encoding to a string of the specified charset failed, but we # don't know what type that value was, or if it contains newlines, # which we may need to check for before sending it to be # encoded for multiple character sets. if (isinstance(value, bytes) and (b"\n" in value or b"\r" in value)) or ( isinstance(value, str) and ("\n" in value or "\r" in value) ): raise BadHeaderError( f"Header values can't contain newlines (got {value!r})" ) from e if mime_encode: value = Header(value, "utf-8", maxlinelen=sys.maxsize).encode() else: e.reason += ", HTTP response headers must be in %s format" % charset raise return value def __delitem__(self, key): self.pop(key) def __setitem__(self, key, value): key = self._convert_to_charset(key, "ascii") value = self._convert_to_charset(value, "latin-1", mime_encode=True) self._store[key.lower()] = (key, value) def pop(self, key, default=None): return self._store.pop(key.lower(), default) def setdefault(self, key, value): if key not in self: self[key] = value class BadHeaderError(ValueError): pass class HttpResponseBase: """ An HTTP response base class with dictionary-accessed headers. This class doesn't handle content. It should not be used directly. Use the HttpResponse and StreamingHttpResponse subclasses instead. """ status_code = 200 def __init__( self, content_type=None, status=None, reason=None, charset=None, headers=None ): self.headers = ResponseHeaders(headers) self._charset = charset if "Content-Type" not in self.headers: if content_type is None: content_type = f"text/html; charset={self.charset}" self.headers["Content-Type"] = content_type elif content_type: raise ValueError( "'headers' must not contain 'Content-Type' when the " "'content_type' parameter is provided." ) self._resource_closers = [] # This parameter is set by the handler. It's necessary to preserve the # historical behavior of request_finished. self._handler_class = None self.cookies = SimpleCookie() self.closed = False if status is not None: try: self.status_code = int(status) except (ValueError, TypeError): raise TypeError("HTTP status code must be an integer.") if not 100 <= self.status_code <= 599: raise ValueError("HTTP status code must be an integer from 100 to 599.") self._reason_phrase = reason @property def reason_phrase(self): if self._reason_phrase is not None: return self._reason_phrase # Leave self._reason_phrase unset in order to use the default # reason phrase for status code. return responses.get(self.status_code, "Unknown Status Code") @reason_phrase.setter def reason_phrase(self, value): self._reason_phrase = value @property def charset(self): if self._charset is not None: return self._charset # The Content-Type header may not yet be set, because the charset is # being inserted *into* it. if content_type := self.headers.get("Content-Type"): if matched := _charset_from_content_type_re.search(content_type): # Extract the charset and strip its double quotes. # Note that having parsed it from the Content-Type, we don't # store it back into the _charset for later intentionally, to # allow for the Content-Type to be switched again later. return matched["charset"].replace('"', "") return settings.DEFAULT_CHARSET @charset.setter def charset(self, value): self._charset = value def serialize_headers(self): """HTTP headers as a bytestring.""" return b"\r\n".join( [ key.encode("ascii") + b": " + value.encode("latin-1") for key, value in self.headers.items() ] ) __bytes__ = serialize_headers @property def _content_type_for_repr(self): return ( ', "%s"' % self.headers["Content-Type"] if "Content-Type" in self.headers else "" ) def __setitem__(self, header, value): self.headers[header] = value def __delitem__(self, header): del self.headers[header] def __getitem__(self, header): return self.headers[header] def has_header(self, header): """Case-insensitive check for a header.""" return header in self.headers __contains__ = has_header def items(self): return self.headers.items() def get(self, header, alternate=None): return self.headers.get(header, alternate) def set_cookie( self, key, value="", max_age=None, expires=None, path="/", domain=None, secure=False, httponly=False, samesite=None, ): """ Set a cookie. ``expires`` can be: - a string in the correct format, - a naive ``datetime.datetime`` object in UTC, - an aware ``datetime.datetime`` object in any time zone. If it is a ``datetime.datetime`` object then calculate ``max_age``. ``max_age`` can be: - int/float specifying seconds, - ``datetime.timedelta`` object. """ self.cookies[key] = value if expires is not None: if isinstance(expires, datetime.datetime): if timezone.is_naive(expires): expires = timezone.make_aware(expires, datetime.timezone.utc) delta = expires - datetime.datetime.now(tz=datetime.timezone.utc) # Add one second so the date matches exactly (a fraction of # time gets lost between converting to a timedelta and # then the date string). delta += datetime.timedelta(seconds=1) # Just set max_age - the max_age logic will set expires. expires = None if max_age is not None: raise ValueError("'expires' and 'max_age' can't be used together.") max_age = max(0, delta.days * 86400 + delta.seconds) else: self.cookies[key]["expires"] = expires else: self.cookies[key]["expires"] = "" if max_age is not None: if isinstance(max_age, datetime.timedelta): max_age = max_age.total_seconds() self.cookies[key]["max-age"] = int(max_age) # IE requires expires, so set it if hasn't been already. if not expires: self.cookies[key]["expires"] = http_date(time.time() + max_age) if path is not None: self.cookies[key]["path"] = path if domain is not None: self.cookies[key]["domain"] = domain if secure: self.cookies[key]["secure"] = True if httponly: self.cookies[key]["httponly"] = True if samesite: if samesite.lower() not in ("lax", "none", "strict"): raise ValueError('samesite must be "lax", "none", or "strict".') self.cookies[key]["samesite"] = samesite def setdefault(self, key, value): """Set a header unless it has already been set.""" self.headers.setdefault(key, value) def set_signed_cookie(self, key, value, salt="", **kwargs): value = signing.get_cookie_signer(salt=key + salt).sign(value) return self.set_cookie(key, value, **kwargs) def delete_cookie(self, key, path="/", domain=None, samesite=None): # Browsers can ignore the Set-Cookie header if the cookie doesn't use # the secure flag and: # - the cookie name starts with "__Host-" or "__Secure-", or # - the samesite is "none". secure = key.startswith(("__Secure-", "__Host-")) or ( samesite and samesite.lower() == "none" ) self.set_cookie( key, max_age=0, path=path, domain=domain, secure=secure, expires="Thu, 01 Jan 1970 00:00:00 GMT", samesite=samesite, ) # Common methods used by subclasses def make_bytes(self, value): """Turn a value into a bytestring encoded in the output charset.""" # Per PEP 3333, this response body must be bytes. To avoid returning # an instance of a subclass, this function returns `bytes(value)`. # This doesn't make a copy when `value` already contains bytes. # Handle string types -- we can't rely on force_bytes here because: # - Python attempts str conversion first # - when self._charset != 'utf-8' it re-encodes the content if isinstance(value, (bytes, memoryview)): return bytes(value) if isinstance(value, str): return bytes(value.encode(self.charset)) # Handle non-string types. return str(value).encode(self.charset) # These methods partially implement the file-like object interface. # See https://docs.python.org/library/io.html#io.IOBase # The WSGI server must call this method upon completion of the request. # See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html def close(self): for closer in self._resource_closers: try: closer() except Exception: pass # Free resources that were still referenced. self._resource_closers.clear() self.closed = True signals.request_finished.send(sender=self._handler_class) def write(self, content): raise OSError("This %s instance is not writable" % self.__class__.__name__) def flush(self): pass def tell(self): raise OSError( "This %s instance cannot tell its position" % self.__class__.__name__ ) # These methods partially implement a stream-like object interface. # See https://docs.python.org/library/io.html#io.IOBase def readable(self): return False def seekable(self): return False def writable(self): return False def writelines(self, lines): raise OSError("This %s instance is not writable" % self.__class__.__name__) class HttpResponse(HttpResponseBase): """ An HTTP response class with a string as content. This content can be read, appended to, or replaced. """ streaming = False non_picklable_attrs = frozenset( [ "resolver_match", # Non-picklable attributes added by test clients. "client", "context", "json", "templates", ] ) def __init__(self, content=b"", *args, **kwargs): super().__init__(*args, **kwargs) # Content is a bytestring. See the `content` property methods. self.content = content def __getstate__(self): obj_dict = self.__dict__.copy() for attr in self.non_picklable_attrs: if attr in obj_dict: del obj_dict[attr] return obj_dict def __repr__(self): return "<%(cls)s status_code=%(status_code)d%(content_type)s>" % { "cls": self.__class__.__name__, "status_code": self.status_code, "content_type": self._content_type_for_repr, } def serialize(self): """Full HTTP message, including headers, as a bytestring.""" return self.serialize_headers() + b"\r\n\r\n" + self.content __bytes__ = serialize @property def content(self): return b"".join(self._container) @content.setter def content(self, value): # Consume iterators upon assignment to allow repeated iteration. if hasattr(value, "__iter__") and not isinstance( value, (bytes, memoryview, str) ): content = b"".join(self.make_bytes(chunk) for chunk in value) if hasattr(value, "close"): try: value.close() except Exception: pass else: content = self.make_bytes(value) # Create a list of properly encoded bytestrings to support write(). self._container = [content] def __iter__(self): return iter(self._container) def write(self, content): self._container.append(self.make_bytes(content)) def tell(self): return len(self.content) def getvalue(self): return self.content def writable(self): return True def writelines(self, lines): for line in lines: self.write(line) class StreamingHttpResponse(HttpResponseBase): """ A streaming HTTP response class with an iterator as content. This should only be iterated once, when the response is streamed to the client. However, it can be appended to or replaced with a new iterator that wraps the original content (or yields entirely new content). """ streaming = True def __init__(self, streaming_content=(), *args, **kwargs): super().__init__(*args, **kwargs) # `streaming_content` should be an iterable of bytestrings. # See the `streaming_content` property methods. self.streaming_content = streaming_content def __repr__(self): return "<%(cls)s status_code=%(status_code)d%(content_type)s>" % { "cls": self.__class__.__qualname__, "status_code": self.status_code, "content_type": self._content_type_for_repr, } @property def content(self): raise AttributeError( "This %s instance has no `content` attribute. Use " "`streaming_content` instead." % self.__class__.__name__ ) @property def streaming_content(self): return map(self.make_bytes, self._iterator) @streaming_content.setter def streaming_content(self, value): self._set_streaming_content(value) def _set_streaming_content(self, value): # Ensure we can never iterate on "value" more than once. self._iterator = iter(value) if hasattr(value, "close"): self._resource_closers.append(value.close) def __iter__(self): return self.streaming_content def getvalue(self): return b"".join(self.streaming_content) class FileResponse(StreamingHttpResponse): """ A streaming HTTP response class optimized for files. """ block_size = 4096 def __init__(self, *args, as_attachment=False, filename="", **kwargs): self.as_attachment = as_attachment self.filename = filename self._no_explicit_content_type = ( "content_type" not in kwargs or kwargs["content_type"] is None ) super().__init__(*args, **kwargs) def _set_streaming_content(self, value): if not hasattr(value, "read"): self.file_to_stream = None return super()._set_streaming_content(value) self.file_to_stream = filelike = value if hasattr(filelike, "close"): self._resource_closers.append(filelike.close) value = iter(lambda: filelike.read(self.block_size), b"") self.set_headers(filelike) super()._set_streaming_content(value) def set_headers(self, filelike): """ Set some common response headers (Content-Length, Content-Type, and Content-Disposition) based on the `filelike` response content. """ filename = getattr(filelike, "name", "") filename = filename if isinstance(filename, str) else "" seekable = hasattr(filelike, "seek") and ( not hasattr(filelike, "seekable") or filelike.seekable() ) if hasattr(filelike, "tell"): if seekable: initial_position = filelike.tell() filelike.seek(0, io.SEEK_END) self.headers["Content-Length"] = filelike.tell() - initial_position filelike.seek(initial_position) elif hasattr(filelike, "getbuffer"): self.headers["Content-Length"] = ( filelike.getbuffer().nbytes - filelike.tell() ) elif os.path.exists(filename): self.headers["Content-Length"] = ( os.path.getsize(filename) - filelike.tell() ) elif seekable: self.headers["Content-Length"] = sum( iter(lambda: len(filelike.read(self.block_size)), 0) ) filelike.seek(-int(self.headers["Content-Length"]), io.SEEK_END) filename = os.path.basename(self.filename or filename) if self._no_explicit_content_type: if filename: content_type, encoding = mimetypes.guess_type(filename) # Encoding isn't set to prevent browsers from automatically # uncompressing files. content_type = { "bzip2": "application/x-bzip", "gzip": "application/gzip", "xz": "application/x-xz", }.get(encoding, content_type) self.headers["Content-Type"] = ( content_type or "application/octet-stream" ) else: self.headers["Content-Type"] = "application/octet-stream" if filename: disposition = "attachment" if self.as_attachment else "inline" try: filename.encode("ascii") file_expr = 'filename="{}"'.format( filename.replace("\\", "\\\\").replace('"', r"\"") ) except UnicodeEncodeError: file_expr = "filename*=utf-8''{}".format(quote(filename)) self.headers["Content-Disposition"] = "{}; {}".format( disposition, file_expr ) elif self.as_attachment: self.headers["Content-Disposition"] = "attachment" class HttpResponseRedirectBase(HttpResponse): allowed_schemes = ["http", "https", "ftp"] def __init__(self, redirect_to, *args, **kwargs): super().__init__(*args, **kwargs) self["Location"] = iri_to_uri(redirect_to) parsed = urlparse(str(redirect_to)) if parsed.scheme and parsed.scheme not in self.allowed_schemes: raise DisallowedRedirect( "Unsafe redirect to URL with protocol '%s'" % parsed.scheme ) url = property(lambda self: self["Location"]) def __repr__(self): return ( '<%(cls)s status_code=%(status_code)d%(content_type)s, url="%(url)s">' % { "cls": self.__class__.__name__, "status_code": self.status_code, "content_type": self._content_type_for_repr, "url": self.url, } ) class HttpResponseRedirect(HttpResponseRedirectBase): status_code = 302 class HttpResponsePermanentRedirect(HttpResponseRedirectBase): status_code = 301 class HttpResponseNotModified(HttpResponse): status_code = 304 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) del self["content-type"] @HttpResponse.content.setter def content(self, value): if value: raise AttributeError( "You cannot set content to a 304 (Not Modified) response" ) self._container = [] class HttpResponseBadRequest(HttpResponse): status_code = 400 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods, *args, **kwargs): super().__init__(*args, **kwargs) self["Allow"] = ", ".join(permitted_methods) def __repr__(self): return "<%(cls)s [%(methods)s] status_code=%(status_code)d%(content_type)s>" % { "cls": self.__class__.__name__, "status_code": self.status_code, "content_type": self._content_type_for_repr, "methods": self["Allow"], } class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 class Http404(Exception): pass class JsonResponse(HttpResponse): """ An HTTP response class that consumes data to be serialized to JSON. :param data: Data to be dumped into json. By default only ``dict`` objects are allowed to be passed due to a security flaw before ECMAScript 5. See the ``safe`` parameter for more information. :param encoder: Should be a json encoder class. Defaults to ``django.core.serializers.json.DjangoJSONEncoder``. :param safe: Controls if only ``dict`` objects may be serialized. Defaults to ``True``. :param json_dumps_params: A dictionary of kwargs passed to json.dumps(). """ def __init__( self, data, encoder=DjangoJSONEncoder, safe=True, json_dumps_params=None, **kwargs, ): if safe and not isinstance(data, dict): raise TypeError( "In order to allow non-dict objects to be serialized set the " "safe parameter to False." ) if json_dumps_params is None: json_dumps_params = {} kwargs.setdefault("content_type", "application/json") data = json.dumps(data, cls=encoder, **json_dumps_params) super().__init__(content=data, **kwargs)
52b4be684dd70842bfa17d7c9261fc812800d743061cc78c4cb3801120edb9f2
from decimal import Decimal from django.conf import settings from django.template import Library, Node, TemplateSyntaxError, Variable from django.template.base import TokenType, render_value_in_context from django.template.defaulttags import token_kwargs from django.utils import translation from django.utils.safestring import SafeData, SafeString, mark_safe register = Library() class GetAvailableLanguagesNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = [ (k, translation.gettext(v)) for k, v in settings.LANGUAGES ] return "" class GetLanguageInfoNode(Node): def __init__(self, lang_code, variable): self.lang_code = lang_code self.variable = variable def render(self, context): lang_code = self.lang_code.resolve(context) context[self.variable] = translation.get_language_info(lang_code) return "" class GetLanguageInfoListNode(Node): def __init__(self, languages, variable): self.languages = languages self.variable = variable def get_language_info(self, language): # ``language`` is either a language code string or a sequence # with the language code as its first item if len(language[0]) > 1: return translation.get_language_info(language[0]) else: return translation.get_language_info(str(language)) def render(self, context): langs = self.languages.resolve(context) context[self.variable] = [self.get_language_info(lang) for lang in langs] return "" class GetCurrentLanguageNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = translation.get_language() return "" class GetCurrentLanguageBidiNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = translation.get_language_bidi() return "" class TranslateNode(Node): child_nodelists = () def __init__(self, filter_expression, noop, asvar=None, message_context=None): self.noop = noop self.asvar = asvar self.message_context = message_context self.filter_expression = filter_expression if isinstance(self.filter_expression.var, str): self.filter_expression.is_var = True self.filter_expression.var = Variable("'%s'" % self.filter_expression.var) def render(self, context): self.filter_expression.var.translate = not self.noop if self.message_context: self.filter_expression.var.message_context = self.message_context.resolve( context ) output = self.filter_expression.resolve(context) value = render_value_in_context(output, context) # Restore percent signs. Percent signs in template text are doubled # so they are not interpreted as string format flags. is_safe = isinstance(value, SafeData) value = value.replace("%%", "%") value = mark_safe(value) if is_safe else value if self.asvar: context[self.asvar] = value return "" else: return value class BlockTranslateNode(Node): def __init__( self, extra_context, singular, plural=None, countervar=None, counter=None, message_context=None, trimmed=False, asvar=None, tag_name="blocktranslate", ): self.extra_context = extra_context self.singular = singular self.plural = plural self.countervar = countervar self.counter = counter self.message_context = message_context self.trimmed = trimmed self.asvar = asvar self.tag_name = tag_name def __repr__(self): return ( f"<{self.__class__.__qualname__}: " f"extra_context={self.extra_context!r} " f"singular={self.singular!r} plural={self.plural!r}>" ) def render_token_list(self, tokens): result = [] vars = [] for token in tokens: if token.token_type == TokenType.TEXT: result.append(token.contents.replace("%", "%%")) elif token.token_type == TokenType.VAR: result.append("%%(%s)s" % token.contents) vars.append(token.contents) msg = "".join(result) if self.trimmed: msg = translation.trim_whitespace(msg) return msg, vars def render(self, context, nested=False): if self.message_context: message_context = self.message_context.resolve(context) else: message_context = None # Update() works like a push(), so corresponding context.pop() is at # the end of function context.update( {var: val.resolve(context) for var, val in self.extra_context.items()} ) singular, vars = self.render_token_list(self.singular) if self.plural and self.countervar and self.counter: count = self.counter.resolve(context) if not isinstance(count, (Decimal, float, int)): raise TemplateSyntaxError( "%r argument to %r tag must be a number." % (self.countervar, self.tag_name) ) context[self.countervar] = count plural, plural_vars = self.render_token_list(self.plural) if message_context: result = translation.npgettext(message_context, singular, plural, count) else: result = translation.ngettext(singular, plural, count) vars.extend(plural_vars) else: if message_context: result = translation.pgettext(message_context, singular) else: result = translation.gettext(singular) default_value = context.template.engine.string_if_invalid def render_value(key): if key in context: val = context[key] else: val = default_value % key if "%s" in default_value else default_value return render_value_in_context(val, context) data = {v: render_value(v) for v in vars} context.pop() try: result %= data except (KeyError, ValueError): if nested: # Either string is malformed, or it's a bug raise TemplateSyntaxError( "%r is unable to format string returned by gettext: %r " "using %r" % (self.tag_name, result, data) ) with translation.override(None): result = self.render(context, nested=True) if self.asvar: context[self.asvar] = SafeString(result) return "" else: return result class LanguageNode(Node): def __init__(self, nodelist, language): self.nodelist = nodelist self.language = language def render(self, context): with translation.override(self.language.resolve(context)): output = self.nodelist.render(context) return output @register.tag("get_available_languages") def do_get_available_languages(parser, token): """ Store a list of available languages in the context. Usage:: {% get_available_languages as languages %} {% for language in languages %} ... {% endfor %} This puts settings.LANGUAGES into the named variable. """ # token.split_contents() isn't useful here because this tag doesn't accept # variable as arguments. args = token.contents.split() if len(args) != 3 or args[1] != "as": raise TemplateSyntaxError( "'get_available_languages' requires 'as variable' (got %r)" % args ) return GetAvailableLanguagesNode(args[2]) @register.tag("get_language_info") def do_get_language_info(parser, token): """ Store the language information dictionary for the given language code in a context variable. Usage:: {% get_language_info for LANGUAGE_CODE as l %} {{ l.code }} {{ l.name }} {{ l.name_translated }} {{ l.name_local }} {{ l.bidi|yesno:"bi-directional,uni-directional" }} """ args = token.split_contents() if len(args) != 5 or args[1] != "for" or args[3] != "as": raise TemplateSyntaxError( "'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]) ) return GetLanguageInfoNode(parser.compile_filter(args[2]), args[4]) @register.tag("get_language_info_list") def do_get_language_info_list(parser, token): """ Store a list of language information dictionaries for the given language codes in a context variable. The language codes can be specified either as a list of strings or a settings.LANGUAGES style list (or any sequence of sequences whose first items are language codes). Usage:: {% get_language_info_list for LANGUAGES as langs %} {% for l in langs %} {{ l.code }} {{ l.name }} {{ l.name_translated }} {{ l.name_local }} {{ l.bidi|yesno:"bi-directional,uni-directional" }} {% endfor %} """ args = token.split_contents() if len(args) != 5 or args[1] != "for" or args[3] != "as": raise TemplateSyntaxError( "'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]) ) return GetLanguageInfoListNode(parser.compile_filter(args[2]), args[4]) @register.filter def language_name(lang_code): return translation.get_language_info(lang_code)["name"] @register.filter def language_name_translated(lang_code): english_name = translation.get_language_info(lang_code)["name"] return translation.gettext(english_name) @register.filter def language_name_local(lang_code): return translation.get_language_info(lang_code)["name_local"] @register.filter def language_bidi(lang_code): return translation.get_language_info(lang_code)["bidi"] @register.tag("get_current_language") def do_get_current_language(parser, token): """ Store the current language in the context. Usage:: {% get_current_language as language %} This fetches the currently active language and puts its value into the ``language`` context variable. """ # token.split_contents() isn't useful here because this tag doesn't accept # variable as arguments. args = token.contents.split() if len(args) != 3 or args[1] != "as": raise TemplateSyntaxError( "'get_current_language' requires 'as variable' (got %r)" % args ) return GetCurrentLanguageNode(args[2]) @register.tag("get_current_language_bidi") def do_get_current_language_bidi(parser, token): """ Store the current language layout in the context. Usage:: {% get_current_language_bidi as bidi %} This fetches the currently active language's layout and puts its value into the ``bidi`` context variable. True indicates right-to-left layout, otherwise left-to-right. """ # token.split_contents() isn't useful here because this tag doesn't accept # variable as arguments. args = token.contents.split() if len(args) != 3 or args[1] != "as": raise TemplateSyntaxError( "'get_current_language_bidi' requires 'as variable' (got %r)" % args ) return GetCurrentLanguageBidiNode(args[2]) @register.tag("translate") @register.tag("trans") def do_translate(parser, token): """ Mark a string for translation and translate the string for the current language. Usage:: {% translate "this is a test" %} This marks the string for translation so it will be pulled out by makemessages into the .po files and runs the string through the translation engine. There is a second form:: {% translate "this is a test" noop %} This marks the string for translation, but returns the string unchanged. Use it when you need to store values into forms that should be translated later on. You can use variables instead of constant strings to translate stuff you marked somewhere else:: {% translate variable %} This tries to translate the contents of the variable ``variable``. Make sure that the string in there is something that is in the .po file. It is possible to store the translated string into a variable:: {% translate "this is a test" as var %} {{ var }} Contextual translations are also supported:: {% translate "this is a test" context "greeting" %} This is equivalent to calling pgettext instead of (u)gettext. """ bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError("'%s' takes at least one argument" % bits[0]) message_string = parser.compile_filter(bits[1]) remaining = bits[2:] noop = False asvar = None message_context = None seen = set() invalid_context = {"as", "noop"} while remaining: option = remaining.pop(0) if option in seen: raise TemplateSyntaxError( "The '%s' option was specified more than once." % option, ) elif option == "noop": noop = True elif option == "context": try: value = remaining.pop(0) except IndexError: raise TemplateSyntaxError( "No argument provided to the '%s' tag for the context option." % bits[0] ) if value in invalid_context: raise TemplateSyntaxError( "Invalid argument '%s' provided to the '%s' tag for the context " "option" % (value, bits[0]), ) message_context = parser.compile_filter(value) elif option == "as": try: value = remaining.pop(0) except IndexError: raise TemplateSyntaxError( "No argument provided to the '%s' tag for the as option." % bits[0] ) asvar = value else: raise TemplateSyntaxError( "Unknown argument for '%s' tag: '%s'. The only options " "available are 'noop', 'context' \"xxx\", and 'as VAR'." % ( bits[0], option, ) ) seen.add(option) return TranslateNode(message_string, noop, asvar, message_context) @register.tag("blocktranslate") @register.tag("blocktrans") def do_block_translate(parser, token): """ Translate a block of text with parameters. Usage:: {% blocktranslate with bar=foo|filter boo=baz|filter %} This is {{ bar }} and {{ boo }}. {% endblocktranslate %} Additionally, this supports pluralization:: {% blocktranslate count count=var|length %} There is {{ count }} object. {% plural %} There are {{ count }} objects. {% endblocktranslate %} This is much like ngettext, only in template syntax. The "var as value" legacy format is still supported:: {% blocktranslate with foo|filter as bar and baz|filter as boo %} {% blocktranslate count var|length as count %} The translated string can be stored in a variable using `asvar`:: {% blocktranslate with bar=foo|filter boo=baz|filter asvar var %} This is {{ bar }} and {{ boo }}. {% endblocktranslate %} {{ var }} Contextual translations are supported:: {% blocktranslate with bar=foo|filter context "greeting" %} This is {{ bar }}. {% endblocktranslate %} This is equivalent to calling pgettext/npgettext instead of (u)gettext/(u)ngettext. """ bits = token.split_contents() options = {} remaining_bits = bits[1:] asvar = None while remaining_bits: option = remaining_bits.pop(0) if option in options: raise TemplateSyntaxError( "The %r option was specified more than once." % option ) if option == "with": value = token_kwargs(remaining_bits, parser, support_legacy=True) if not value: raise TemplateSyntaxError( '"with" in %r tag needs at least one keyword argument.' % bits[0] ) elif option == "count": value = token_kwargs(remaining_bits, parser, support_legacy=True) if len(value) != 1: raise TemplateSyntaxError( '"count" in %r tag expected exactly ' "one keyword argument." % bits[0] ) elif option == "context": try: value = remaining_bits.pop(0) value = parser.compile_filter(value) except Exception: raise TemplateSyntaxError( '"context" in %r tag expected exactly one argument.' % bits[0] ) elif option == "trimmed": value = True elif option == "asvar": try: value = remaining_bits.pop(0) except IndexError: raise TemplateSyntaxError( "No argument provided to the '%s' tag for the asvar option." % bits[0] ) asvar = value else: raise TemplateSyntaxError( "Unknown argument for %r tag: %r." % (bits[0], option) ) options[option] = value if "count" in options: countervar, counter = next(iter(options["count"].items())) else: countervar, counter = None, None if "context" in options: message_context = options["context"] else: message_context = None extra_context = options.get("with", {}) trimmed = options.get("trimmed", False) singular = [] plural = [] while parser.tokens: token = parser.next_token() if token.token_type in (TokenType.VAR, TokenType.TEXT): singular.append(token) else: break if countervar and counter: if token.contents.strip() != "plural": raise TemplateSyntaxError( "%r doesn't allow other block tags inside it" % bits[0] ) while parser.tokens: token = parser.next_token() if token.token_type in (TokenType.VAR, TokenType.TEXT): plural.append(token) else: break end_tag_name = "end%s" % bits[0] if token.contents.strip() != end_tag_name: raise TemplateSyntaxError( "%r doesn't allow other block tags (seen %r) inside it" % (bits[0], token.contents) ) return BlockTranslateNode( extra_context, singular, plural, countervar, counter, message_context, trimmed=trimmed, asvar=asvar, tag_name=bits[0], ) @register.tag def language(parser, token): """ Enable the given language just for this block. Usage:: {% language "de" %} This is {{ bar }} and {{ boo }}. {% endlanguage %} """ bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0]) language = parser.compile_filter(bits[1]) nodelist = parser.parse(("endlanguage",)) parser.delete_first_token() return LanguageNode(nodelist, language)
30a14e9b66f7b06f312eb47ec10155b7e46a15ac08f259bc7038b5e235f83807
""" Internationalization support. """ from contextlib import ContextDecorator from decimal import ROUND_UP, Decimal from django.utils.autoreload import autoreload_started, file_changed from django.utils.functional import lazy from django.utils.regex_helper import _lazy_re_compile __all__ = [ "activate", "deactivate", "override", "deactivate_all", "get_language", "get_language_from_request", "get_language_info", "get_language_bidi", "check_for_language", "to_language", "to_locale", "templatize", "gettext", "gettext_lazy", "gettext_noop", "ngettext", "ngettext_lazy", "pgettext", "pgettext_lazy", "npgettext", "npgettext_lazy", ] class TranslatorCommentWarning(SyntaxWarning): pass # Here be dragons, so a short explanation of the logic won't hurt: # We are trying to solve two problems: (1) access settings, in particular # settings.USE_I18N, as late as possible, so that modules can be imported # without having to first configure Django, and (2) if some other code creates # a reference to one of these functions, don't break that reference when we # replace the functions with their real counterparts (once we do access the # settings). class Trans: """ The purpose of this class is to store the actual translation function upon receiving the first call to that function. After this is done, changes to USE_I18N will have no effect to which function is served upon request. If your tests rely on changing USE_I18N, you can delete all the functions from _trans.__dict__. Note that storing the function with setattr will have a noticeable performance effect, as access to the function goes the normal path, instead of using __getattr__. """ def __getattr__(self, real_name): from django.conf import settings if settings.USE_I18N: from django.utils.translation import trans_real as trans from django.utils.translation.reloader import ( translation_file_changed, watch_for_translation_changes, ) autoreload_started.connect( watch_for_translation_changes, dispatch_uid="translation_file_changed" ) file_changed.connect( translation_file_changed, dispatch_uid="translation_file_changed" ) else: from django.utils.translation import trans_null as trans setattr(self, real_name, getattr(trans, real_name)) return getattr(trans, real_name) _trans = Trans() # The Trans class is no more needed, so remove it from the namespace. del Trans def gettext_noop(message): return _trans.gettext_noop(message) def gettext(message): return _trans.gettext(message) def ngettext(singular, plural, number): return _trans.ngettext(singular, plural, number) def pgettext(context, message): return _trans.pgettext(context, message) def npgettext(context, singular, plural, number): return _trans.npgettext(context, singular, plural, number) gettext_lazy = lazy(gettext, str) pgettext_lazy = lazy(pgettext, str) def lazy_number(func, resultclass, number=None, **kwargs): if isinstance(number, int): kwargs["number"] = number proxy = lazy(func, resultclass)(**kwargs) else: original_kwargs = kwargs.copy() class NumberAwareString(resultclass): def __bool__(self): return bool(kwargs["singular"]) def _get_number_value(self, values): try: return values[number] except KeyError: raise KeyError( "Your dictionary lacks key '%s'. Please provide " "it, because it is required to determine whether " "string is singular or plural." % number ) def _translate(self, number_value): kwargs["number"] = number_value return func(**kwargs) def format(self, *args, **kwargs): number_value = ( self._get_number_value(kwargs) if kwargs and number else args[0] ) return self._translate(number_value).format(*args, **kwargs) def __mod__(self, rhs): if isinstance(rhs, dict) and number: number_value = self._get_number_value(rhs) else: number_value = rhs translated = self._translate(number_value) try: translated %= rhs except TypeError: # String doesn't contain a placeholder for the number. pass return translated proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs) proxy.__reduce__ = lambda: ( _lazy_number_unpickle, (func, resultclass, number, original_kwargs), ) return proxy def _lazy_number_unpickle(func, resultclass, number, kwargs): return lazy_number(func, resultclass, number=number, **kwargs) def ngettext_lazy(singular, plural, number=None): return lazy_number(ngettext, str, singular=singular, plural=plural, number=number) def npgettext_lazy(context, singular, plural, number=None): return lazy_number( npgettext, str, context=context, singular=singular, plural=plural, number=number ) def activate(language): return _trans.activate(language) def deactivate(): return _trans.deactivate() class override(ContextDecorator): def __init__(self, language, deactivate=False): self.language = language self.deactivate = deactivate def __enter__(self): self.old_language = get_language() if self.language is not None: activate(self.language) else: deactivate_all() def __exit__(self, exc_type, exc_value, traceback): if self.old_language is None: deactivate_all() elif self.deactivate: deactivate() else: activate(self.old_language) def get_language(): return _trans.get_language() def get_language_bidi(): return _trans.get_language_bidi() def check_for_language(lang_code): return _trans.check_for_language(lang_code) def to_language(locale): """Turn a locale name (en_US) into a language name (en-us).""" p = locale.find("_") if p >= 0: return locale[:p].lower() + "-" + locale[p + 1 :].lower() else: return locale.lower() def to_locale(language): """Turn a language name (en-us) into a locale name (en_US).""" lang, _, country = language.lower().partition("-") if not country: return language[:3].lower() + language[3:] # A language with > 2 characters after the dash only has its first # character after the dash capitalized; e.g. sr-latn becomes sr_Latn. # A language with 2 characters after the dash has both characters # capitalized; e.g. en-us becomes en_US. country, _, tail = country.partition("-") country = country.title() if len(country) > 2 else country.upper() if tail: country += "-" + tail return lang + "_" + country def get_language_from_request(request, check_path=False): return _trans.get_language_from_request(request, check_path) def get_language_from_path(path): return _trans.get_language_from_path(path) def get_supported_language_variant(lang_code, *, strict=False): return _trans.get_supported_language_variant(lang_code, strict) def templatize(src, **kwargs): from .template import templatize return templatize(src, **kwargs) def deactivate_all(): return _trans.deactivate_all() def get_language_info(lang_code): from django.conf.locale import LANG_INFO try: lang_info = LANG_INFO[lang_code] if "fallback" in lang_info and "name" not in lang_info: info = get_language_info(lang_info["fallback"][0]) else: info = lang_info except KeyError: if "-" not in lang_code: raise KeyError("Unknown language code %s." % lang_code) generic_lang_code = lang_code.split("-")[0] try: info = LANG_INFO[generic_lang_code] except KeyError: raise KeyError( "Unknown language code %s and %s." % (lang_code, generic_lang_code) ) if info: info["name_translated"] = gettext_lazy(info["name"]) return info trim_whitespace_re = _lazy_re_compile(r"\s*\n\s*") def trim_whitespace(s): return trim_whitespace_re.sub(" ", s.strip()) def round_away_from_one(value): return int(Decimal(value - 1).quantize(Decimal("0"), rounding=ROUND_UP)) + 1
01df683d1cb1407298081ed147bb9bd7f261e1db2b51ce1173ef8e23ca586116
""" The main QuerySet implementation. This provides the public API for the ORM. """ import copy import operator import warnings from itertools import chain, islice from asgiref.sync import sync_to_async import django from django.conf import settings from django.core import exceptions from django.db import ( DJANGO_VERSION_PICKLE_KEY, IntegrityError, NotSupportedError, connections, router, transaction, ) from django.db.models import AutoField, DateField, DateTimeField, Field, sql from django.db.models.constants import LOOKUP_SEP, OnConflict from django.db.models.deletion import Collector from django.db.models.expressions import Case, F, Ref, Value, When from django.db.models.functions import Cast, Trunc from django.db.models.query_utils import FilteredRelation, Q from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE from django.db.models.utils import ( AltersData, create_namedtuple_class, resolve_callables, ) from django.utils import timezone from django.utils.deprecation import RemovedInDjango50Warning from django.utils.functional import cached_property, partition # The maximum number of results to fetch in a get() query. MAX_GET_RESULTS = 21 # The maximum number of items to display in a QuerySet.__repr__ REPR_OUTPUT_SIZE = 20 class BaseIterable: def __init__( self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE ): self.queryset = queryset self.chunked_fetch = chunked_fetch self.chunk_size = chunk_size async def _async_generator(self): # Generators don't actually start running until the first time you call # next() on them, so make the generator object in the async thread and # then repeatedly dispatch to it in a sync thread. sync_generator = self.__iter__() def next_slice(gen): return list(islice(gen, self.chunk_size)) while True: chunk = await sync_to_async(next_slice)(sync_generator) for item in chunk: yield item if len(chunk) < self.chunk_size: break # __aiter__() is a *synchronous* method that has to then return an # *asynchronous* iterator/generator. Thus, nest an async generator inside # it. # This is a generic iterable converter for now, and is going to suffer a # performance penalty on large sets of items due to the cost of crossing # over the sync barrier for each chunk. Custom __aiter__() methods should # be added to each Iterable subclass, but that needs some work in the # Compiler first. def __aiter__(self): return self._async_generator() class ModelIterable(BaseIterable): """Iterable that yields a model instance for each row.""" def __iter__(self): queryset = self.queryset db = queryset.db compiler = queryset.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ) select, klass_info, annotation_col_map = ( compiler.select, compiler.klass_info, compiler.annotation_col_map, ) model_cls = klass_info["model"] select_fields = klass_info["select_fields"] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [ f[0].target.attname for f in select[model_fields_start:model_fields_end] ] related_populators = get_related_populators(klass_info, select, db) known_related_objects = [ ( field, related_objs, operator.attrgetter( *[ field.attname if from_field == "self" else queryset.model._meta.get_field(from_field).attname for from_field in field.from_fields ] ), ) for field, related_objs in queryset._known_related_objects.items() ] for row in compiler.results_iter(results): obj = model_cls.from_db( db, init_list, row[model_fields_start:model_fields_end] ) for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model. for field, rel_objs, rel_getter in known_related_objects: # Avoid overwriting objects loaded by, e.g., select_related(). if field.is_cached(obj): continue rel_obj_id = rel_getter(obj) try: rel_obj = rel_objs[rel_obj_id] except KeyError: pass # May happen in qs1 | qs2 scenarios. else: setattr(obj, field.name, rel_obj) yield obj class RawModelIterable(BaseIterable): """ Iterable that yields a model instance for each row from a raw queryset. """ def __iter__(self): # Cache some things for performance reasons outside the loop. db = self.queryset.db query = self.queryset.query connection = connections[db] compiler = connection.ops.compiler("SQLCompiler")(query, connection, db) query_iterator = iter(query) try: ( model_init_names, model_init_pos, annotation_fields, ) = self.queryset.resolve_model_init_order() model_cls = self.queryset.model if model_cls._meta.pk.attname not in model_init_names: raise exceptions.FieldDoesNotExist( "Raw query must include the primary key" ) fields = [self.queryset.model_fields.get(c) for c in self.queryset.columns] converters = compiler.get_converters( [f.get_col(f.model._meta.db_table) if f else None for f in fields] ) if converters: query_iterator = compiler.apply_converters(query_iterator, converters) for values in query_iterator: # Associate fields to values model_init_values = [values[pos] for pos in model_init_pos] instance = model_cls.from_db(db, model_init_names, model_init_values) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) yield instance finally: # Done iterating the Query. If it has its own cursor, close it. if hasattr(query, "cursor") and query.cursor: query.cursor.close() class ValuesIterable(BaseIterable): """ Iterable returned by QuerySet.values() that yields a dict for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] indexes = range(len(names)) for row in compiler.results_iter( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ): yield {names[i]: row[i] for i in indexes} class ValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=False) that yields a tuple for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) if queryset._fields: # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] fields = [ *queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields), ] if fields != names: # Reorder according to fields. index_map = {name: idx for idx, name in enumerate(names)} rowfactory = operator.itemgetter(*[index_map[f] for f in fields]) return map( rowfactory, compiler.results_iter( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ), ) return compiler.results_iter( tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size, ) class NamedValuesListIterable(ValuesListIterable): """ Iterable returned by QuerySet.values_list(named=True) that yields a namedtuple for each row. """ def __iter__(self): queryset = self.queryset if queryset._fields: names = queryset._fields else: query = queryset.query names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] tuple_class = create_namedtuple_class(*names) new = tuple.__new__ for row in super().__iter__(): yield new(tuple_class, row) class FlatValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=True) that yields single values. """ def __iter__(self): queryset = self.queryset compiler = queryset.query.get_compiler(queryset.db) for row in compiler.results_iter( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ): yield row[0] class QuerySet(AltersData): """Represent a lazy database lookup for a set of objects.""" def __init__(self, model=None, query=None, using=None, hints=None): self.model = model self._db = using self._hints = hints or {} self._query = query or sql.Query(self.model) self._result_cache = None self._sticky_filter = False self._for_write = False self._prefetch_related_lookups = () self._prefetch_done = False self._known_related_objects = {} # {rel_field: {pk: rel_obj}} self._iterable_class = ModelIterable self._fields = None self._defer_next_filter = False self._deferred_filter = None @property def query(self): if self._deferred_filter: negate, args, kwargs = self._deferred_filter self._filter_or_exclude_inplace(negate, args, kwargs) self._deferred_filter = None return self._query @query.setter def query(self, value): if value.values_select: self._iterable_class = ValuesIterable self._query = value def as_manager(cls): # Address the circular dependency between `Queryset` and `Manager`. from django.db.models.manager import Manager manager = Manager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) ######################## # PYTHON MAGIC METHODS # ######################## def __deepcopy__(self, memo): """Don't populate the QuerySet's cache.""" obj = self.__class__() for k, v in self.__dict__.items(): if k == "_result_cache": obj.__dict__[k] = None else: obj.__dict__[k] = copy.deepcopy(v, memo) return obj def __getstate__(self): # Force the cache to be fully populated. self._fetch_all() return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__} def __setstate__(self, state): pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: if pickled_version != django.__version__: warnings.warn( "Pickled queryset instance's Django version %s does not " "match the current version %s." % (pickled_version, django.__version__), RuntimeWarning, stacklevel=2, ) else: warnings.warn( "Pickled queryset instance's Django version is not specified.", RuntimeWarning, stacklevel=2, ) self.__dict__.update(state) def __repr__(self): data = list(self[: REPR_OUTPUT_SIZE + 1]) if len(data) > REPR_OUTPUT_SIZE: data[-1] = "...(remaining elements truncated)..." return "<%s %r>" % (self.__class__.__name__, data) def __len__(self): self._fetch_all() return len(self._result_cache) def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler.execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql.compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache) def __aiter__(self): # Remember, __aiter__ itself is synchronous, it's the thing it returns # that is async! async def generator(): await sync_to_async(self._fetch_all)() for item in self._result_cache: yield item return generator() def __bool__(self): self._fetch_all() return bool(self._result_cache) def __getitem__(self, k): """Retrieve an item or slice from the set of results.""" if not isinstance(k, (int, slice)): raise TypeError( "QuerySet indices must be integers or slices, not %s." % type(k).__name__ ) if (isinstance(k, int) and k < 0) or ( isinstance(k, slice) and ( (k.start is not None and k.start < 0) or (k.stop is not None and k.stop < 0) ) ): raise ValueError("Negative indexing is not supported.") if self._result_cache is not None: return self._result_cache[k] if isinstance(k, slice): qs = self._chain() if k.start is not None: start = int(k.start) else: start = None if k.stop is not None: stop = int(k.stop) else: stop = None qs.query.set_limits(start, stop) return list(qs)[:: k.step] if k.step else qs qs = self._chain() qs.query.set_limits(k, k + 1) qs._fetch_all() return qs._result_cache[0] def __class_getitem__(cls, *args, **kwargs): return cls def __and__(self, other): self._check_operator_queryset(other, "&") self._merge_sanity_check(other) if isinstance(other, EmptyQuerySet): return other if isinstance(self, EmptyQuerySet): return self combined = self._chain() combined._merge_known_related_objects(other) combined.query.combine(other.query, sql.AND) return combined def __or__(self, other): self._check_operator_queryset(other, "|") self._merge_sanity_check(other) if isinstance(self, EmptyQuerySet): return other if isinstance(other, EmptyQuerySet): return self query = ( self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values("pk")) ) combined = query._chain() combined._merge_known_related_objects(other) if not other.query.can_filter(): other = other.model._base_manager.filter(pk__in=other.values("pk")) combined.query.combine(other.query, sql.OR) return combined def __xor__(self, other): self._check_operator_queryset(other, "^") self._merge_sanity_check(other) if isinstance(self, EmptyQuerySet): return other if isinstance(other, EmptyQuerySet): return self query = ( self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values("pk")) ) combined = query._chain() combined._merge_known_related_objects(other) if not other.query.can_filter(): other = other.model._base_manager.filter(pk__in=other.values("pk")) combined.query.combine(other.query, sql.XOR) return combined #################################### # METHODS THAT DO DATABASE QUERIES # #################################### def _iterator(self, use_chunked_fetch, chunk_size): iterable = self._iterable_class( self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size or 2000, ) if not self._prefetch_related_lookups or chunk_size is None: yield from iterable return iterator = iter(iterable) while results := list(islice(iterator, chunk_size)): prefetch_related_objects(results, *self._prefetch_related_lookups) yield from results def iterator(self, chunk_size=None): """ An iterator over the results from applying this QuerySet to the database. chunk_size must be provided for QuerySets that prefetch related objects. Otherwise, a default chunk_size of 2000 is supplied. """ if chunk_size is None: if self._prefetch_related_lookups: # When the deprecation ends, replace with: # raise ValueError( # 'chunk_size must be provided when using ' # 'QuerySet.iterator() after prefetch_related().' # ) warnings.warn( "Using QuerySet.iterator() after prefetch_related() " "without specifying chunk_size is deprecated.", category=RemovedInDjango50Warning, stacklevel=2, ) elif chunk_size <= 0: raise ValueError("Chunk size must be strictly positive.") use_chunked_fetch = not connections[self.db].settings_dict.get( "DISABLE_SERVER_SIDE_CURSORS" ) return self._iterator(use_chunked_fetch, chunk_size) async def aiterator(self, chunk_size=2000): """ An asynchronous iterator over the results from applying this QuerySet to the database. """ if self._prefetch_related_lookups: raise NotSupportedError( "Using QuerySet.aiterator() after prefetch_related() is not supported." ) if chunk_size <= 0: raise ValueError("Chunk size must be strictly positive.") use_chunked_fetch = not connections[self.db].settings_dict.get( "DISABLE_SERVER_SIDE_CURSORS" ) async for item in self._iterable_class( self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size ): yield item def aggregate(self, *args, **kwargs): """ Return a dictionary containing the calculations (aggregation) over the current queryset. If args is present the expression is passed as a kwarg using the Aggregate object's default alias. """ if self.query.distinct_fields: raise NotImplementedError("aggregate() + distinct(fields) not implemented.") self._validate_values_are_expressions( (*args, *kwargs.values()), method_name="aggregate" ) for arg in args: # The default_alias property raises TypeError if default_alias # can't be set automatically or AttributeError if it isn't an # attribute. try: arg.default_alias except (AttributeError, TypeError): raise TypeError("Complex aggregates require an alias") kwargs[arg.default_alias] = arg query = self.query.chain() for (alias, aggregate_expr) in kwargs.items(): query.add_annotation(aggregate_expr, alias, is_summary=True) annotation = query.annotations[alias] if not annotation.contains_aggregate: raise TypeError("%s is not an aggregate expression" % alias) for expr in annotation.get_source_expressions(): if ( expr.contains_aggregate and isinstance(expr, Ref) and expr.refs in kwargs ): name = expr.refs raise exceptions.FieldError( "Cannot compute %s('%s'): '%s' is an aggregate" % (annotation.name, name, name) ) return query.get_aggregation(self.db, kwargs) async def aaggregate(self, *args, **kwargs): return await sync_to_async(self.aggregate)(*args, **kwargs) def count(self): """ Perform a SELECT COUNT() and return the number of records as an integer. If the QuerySet is already fully cached, return the length of the cached results set to avoid multiple SELECT COUNT(*) calls. """ if self._result_cache is not None: return len(self._result_cache) return self.query.get_count(using=self.db) async def acount(self): return await sync_to_async(self.count)() def get(self, *args, **kwargs): """ Perform the query and return a single object matching the given keyword arguments. """ if self.query.combinator and (args or kwargs): raise NotSupportedError( "Calling QuerySet.get(...) with filters after %s() is not " "supported." % self.query.combinator ) clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs) if self.query.can_filter() and not self.query.distinct_fields: clone = clone.order_by() limit = None if ( not clone.query.select_for_update or connections[clone.db].features.supports_select_for_update_with_limit ): limit = MAX_GET_RESULTS clone.query.set_limits(high=limit) num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise self.model.DoesNotExist( "%s matching query does not exist." % self.model._meta.object_name ) raise self.model.MultipleObjectsReturned( "get() returned more than one %s -- it returned %s!" % ( self.model._meta.object_name, num if not limit or num < limit else "more than %s" % (limit - 1), ) ) async def aget(self, *args, **kwargs): return await sync_to_async(self.get)(*args, **kwargs) def create(self, **kwargs): """ Create a new object with the given kwargs, saving it to the database and returning the created object. """ obj = self.model(**kwargs) self._for_write = True obj.save(force_insert=True, using=self.db) return obj async def acreate(self, **kwargs): return await sync_to_async(self.create)(**kwargs) def _prepare_for_bulk_create(self, objs): for obj in objs: if obj.pk is None: # Populate new PK values. obj.pk = obj._meta.pk.get_pk_value_on_save(obj) obj._prepare_related_fields_for_save(operation_name="bulk_create") def _check_bulk_create_options( self, ignore_conflicts, update_conflicts, update_fields, unique_fields ): if ignore_conflicts and update_conflicts: raise ValueError( "ignore_conflicts and update_conflicts are mutually exclusive." ) db_features = connections[self.db].features if ignore_conflicts: if not db_features.supports_ignore_conflicts: raise NotSupportedError( "This database backend does not support ignoring conflicts." ) return OnConflict.IGNORE elif update_conflicts: if not db_features.supports_update_conflicts: raise NotSupportedError( "This database backend does not support updating conflicts." ) if not update_fields: raise ValueError( "Fields that will be updated when a row insertion fails " "on conflicts must be provided." ) if unique_fields and not db_features.supports_update_conflicts_with_target: raise NotSupportedError( "This database backend does not support updating " "conflicts with specifying unique fields that can trigger " "the upsert." ) if not unique_fields and db_features.supports_update_conflicts_with_target: raise ValueError( "Unique fields that can trigger the upsert must be provided." ) # Updating primary keys and non-concrete fields is forbidden. update_fields = [self.model._meta.get_field(name) for name in update_fields] if any(not f.concrete or f.many_to_many for f in update_fields): raise ValueError( "bulk_create() can only be used with concrete fields in " "update_fields." ) if any(f.primary_key for f in update_fields): raise ValueError( "bulk_create() cannot be used with primary keys in " "update_fields." ) if unique_fields: # Primary key is allowed in unique_fields. unique_fields = [ self.model._meta.get_field(name) for name in unique_fields if name != "pk" ] if any(not f.concrete or f.many_to_many for f in unique_fields): raise ValueError( "bulk_create() can only be used with concrete fields " "in unique_fields." ) return OnConflict.UPDATE return None def bulk_create( self, objs, batch_size=None, ignore_conflicts=False, update_conflicts=False, update_fields=None, unique_fields=None, ): """ Insert each of the instances into the database. Do *not* call save() on each of the instances, do not send any pre/post_save signals, and do not set the primary key attribute if it is an autoincrement field (except if features.can_return_rows_from_bulk_insert=True). Multi-table models are not supported. """ # When you bulk insert you don't get the primary keys back (if it's an # autoincrement, except if can_return_rows_from_bulk_insert=True), so # you can't insert into the child tables which references this. There # are two workarounds: # 1) This could be implemented if you didn't have an autoincrement pk # 2) You could do it by doing O(n) normal inserts into the parent # tables to get the primary keys back and then doing a single bulk # insert into the childmost table. # We currently set the primary keys on the objects when using # PostgreSQL via the RETURNING ID clause. It should be possible for # Oracle as well, but the semantics for extracting the primary keys is # trickier so it's not done yet. if batch_size is not None and batch_size <= 0: raise ValueError("Batch size must be a positive integer.") # Check that the parents share the same concrete model with the our # model to detect the inheritance pattern ConcreteGrandParent -> # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy # would not identify that case as involving multiple tables. for parent in self.model._meta.get_parent_list(): if parent._meta.concrete_model is not self.model._meta.concrete_model: raise ValueError("Can't bulk create a multi-table inherited model") if not objs: return objs on_conflict = self._check_bulk_create_options( ignore_conflicts, update_conflicts, update_fields, unique_fields, ) self._for_write = True opts = self.model._meta fields = opts.concrete_fields objs = list(objs) self._prepare_for_bulk_create(objs) with transaction.atomic(using=self.db, savepoint=False): objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) if objs_with_pk: returned_columns = self._batched_insert( objs_with_pk, fields, batch_size, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) for obj_with_pk, results in zip(objs_with_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): if field != opts.pk: setattr(obj_with_pk, field.attname, result) for obj_with_pk in objs_with_pk: obj_with_pk._state.adding = False obj_with_pk._state.db = self.db if objs_without_pk: fields = [f for f in fields if not isinstance(f, AutoField)] returned_columns = self._batched_insert( objs_without_pk, fields, batch_size, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) connection = connections[self.db] if ( connection.features.can_return_rows_from_bulk_insert and on_conflict is None ): assert len(returned_columns) == len(objs_without_pk) for obj_without_pk, results in zip(objs_without_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): setattr(obj_without_pk, field.attname, result) obj_without_pk._state.adding = False obj_without_pk._state.db = self.db return objs async def abulk_create( self, objs, batch_size=None, ignore_conflicts=False, update_conflicts=False, update_fields=None, unique_fields=None, ): return await sync_to_async(self.bulk_create)( objs=objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts, update_conflicts=update_conflicts, update_fields=update_fields, unique_fields=unique_fields, ) def bulk_update(self, objs, fields, batch_size=None): """ Update the given fields in each of the given objects in the database. """ if batch_size is not None and batch_size <= 0: raise ValueError("Batch size must be a positive integer.") if not fields: raise ValueError("Field names must be given to bulk_update().") objs = tuple(objs) if any(obj.pk is None for obj in objs): raise ValueError("All bulk_update() objects must have a primary key set.") fields = [self.model._meta.get_field(name) for name in fields] if any(not f.concrete or f.many_to_many for f in fields): raise ValueError("bulk_update() can only be used with concrete fields.") if any(f.primary_key for f in fields): raise ValueError("bulk_update() cannot be used with primary key fields.") if not objs: return 0 for obj in objs: obj._prepare_related_fields_for_save( operation_name="bulk_update", fields=fields ) # PK is used twice in the resulting update query, once in the filter # and once in the WHEN. Each field will also have one CAST. self._for_write = True connection = connections[self.db] max_batch_size = connection.ops.bulk_batch_size(["pk", "pk"] + fields, objs) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size requires_casting = connection.features.requires_casted_case_in_updates batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size)) updates = [] for batch_objs in batches: update_kwargs = {} for field in fields: when_statements = [] for obj in batch_objs: attr = getattr(obj, field.attname) if not hasattr(attr, "resolve_expression"): attr = Value(attr, output_field=field) when_statements.append(When(pk=obj.pk, then=attr)) case_statement = Case(*when_statements, output_field=field) if requires_casting: case_statement = Cast(case_statement, output_field=field) update_kwargs[field.attname] = case_statement updates.append(([obj.pk for obj in batch_objs], update_kwargs)) rows_updated = 0 queryset = self.using(self.db) with transaction.atomic(using=self.db, savepoint=False): for pks, update_kwargs in updates: rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs) return rows_updated bulk_update.alters_data = True async def abulk_update(self, objs, fields, batch_size=None): return await sync_to_async(self.bulk_update)( objs=objs, fields=fields, batch_size=batch_size, ) abulk_update.alters_data = True def get_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, creating one if necessary. Return a tuple of (object, created), where created is a boolean specifying whether an object was created. """ # The get() needs to be targeted at the write database in order # to avoid potential transaction consistency problems. self._for_write = True try: return self.get(**kwargs), False except self.model.DoesNotExist: params = self._extract_model_params(defaults, **kwargs) # Try to create an object using passed params. try: with transaction.atomic(using=self.db): params = dict(resolve_callables(params)) return self.create(**params), True except IntegrityError: try: return self.get(**kwargs), False except self.model.DoesNotExist: pass raise async def aget_or_create(self, defaults=None, **kwargs): return await sync_to_async(self.get_or_create)( defaults=defaults, **kwargs, ) def update_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, updating one with defaults if it exists, otherwise create a new one. Return a tuple (object, created), where created is a boolean specifying whether an object was created. """ defaults = defaults or {} self._for_write = True with transaction.atomic(using=self.db): # Lock the row so that a concurrent update is blocked until # update_or_create() has performed its save. obj, created = self.select_for_update().get_or_create(defaults, **kwargs) if created: return obj, created for k, v in resolve_callables(defaults): setattr(obj, k, v) update_fields = set(defaults) concrete_field_names = self.model._meta._non_pk_concrete_field_names # update_fields does not support non-concrete fields. if concrete_field_names.issuperset(update_fields): # Add fields which are set on pre_save(), e.g. auto_now fields. # This is to maintain backward compatibility as these fields # are not updated unless explicitly specified in the # update_fields list. for field in self.model._meta.local_concrete_fields: if not ( field.primary_key or field.__class__.pre_save is Field.pre_save ): update_fields.add(field.name) if field.name != field.attname: update_fields.add(field.attname) obj.save(using=self.db, update_fields=update_fields) else: obj.save(using=self.db) return obj, False async def aupdate_or_create(self, defaults=None, **kwargs): return await sync_to_async(self.update_or_create)( defaults=defaults, **kwargs, ) def _extract_model_params(self, defaults, **kwargs): """ Prepare `params` for creating a model instance based on the given kwargs; for use by get_or_create(). """ defaults = defaults or {} params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k} params.update(defaults) property_names = self.model._meta._property_names invalid_params = [] for param in params: try: self.model._meta.get_field(param) except exceptions.FieldDoesNotExist: # It's okay to use a model's property if it has a setter. if not (param in property_names and getattr(self.model, param).fset): invalid_params.append(param) if invalid_params: raise exceptions.FieldError( "Invalid field name(s) for model %s: '%s'." % ( self.model._meta.object_name, "', '".join(sorted(invalid_params)), ) ) return params def _earliest(self, *fields): """ Return the earliest object according to fields (if given) or by the model's Meta.get_latest_by. """ if fields: order_by = fields else: order_by = getattr(self.model._meta, "get_latest_by") if order_by and not isinstance(order_by, (tuple, list)): order_by = (order_by,) if order_by is None: raise ValueError( "earliest() and latest() require either fields as positional " "arguments or 'get_latest_by' in the model's Meta." ) obj = self._chain() obj.query.set_limits(high=1) obj.query.clear_ordering(force=True) obj.query.add_ordering(*order_by) return obj.get() def earliest(self, *fields): if self.query.is_sliced: raise TypeError("Cannot change a query once a slice has been taken.") return self._earliest(*fields) async def aearliest(self, *fields): return await sync_to_async(self.earliest)(*fields) def latest(self, *fields): """ Return the latest object according to fields (if given) or by the model's Meta.get_latest_by. """ if self.query.is_sliced: raise TypeError("Cannot change a query once a slice has been taken.") return self.reverse()._earliest(*fields) async def alatest(self, *fields): return await sync_to_async(self.latest)(*fields) def first(self): """Return the first object of a query or None if no match is found.""" if self.ordered: queryset = self else: self._check_ordering_first_last_queryset_aggregation(method="first") queryset = self.order_by("pk") for obj in queryset[:1]: return obj async def afirst(self): return await sync_to_async(self.first)() def last(self): """Return the last object of a query or None if no match is found.""" if self.ordered: queryset = self.reverse() else: self._check_ordering_first_last_queryset_aggregation(method="last") queryset = self.order_by("-pk") for obj in queryset[:1]: return obj async def alast(self): return await sync_to_async(self.last)() def in_bulk(self, id_list=None, *, field_name="pk"): """ Return a dictionary mapping each of the given IDs to the object with that ID. If `id_list` isn't provided, evaluate the entire QuerySet. """ if self.query.is_sliced: raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().") opts = self.model._meta unique_fields = [ constraint.fields[0] for constraint in opts.total_unique_constraints if len(constraint.fields) == 1 ] if ( field_name != "pk" and not opts.get_field(field_name).unique and field_name not in unique_fields and self.query.distinct_fields != (field_name,) ): raise ValueError( "in_bulk()'s field_name must be a unique field but %r isn't." % field_name ) if id_list is not None: if not id_list: return {} filter_key = "{}__in".format(field_name) batch_size = connections[self.db].features.max_query_params id_list = tuple(id_list) # If the database has a limit on the number of query parameters # (e.g. SQLite), retrieve objects in batches if necessary. if batch_size and batch_size < len(id_list): qs = () for offset in range(0, len(id_list), batch_size): batch = id_list[offset : offset + batch_size] qs += tuple(self.filter(**{filter_key: batch}).order_by()) else: qs = self.filter(**{filter_key: id_list}).order_by() else: qs = self._chain() return {getattr(obj, field_name): obj for obj in qs} async def ain_bulk(self, id_list=None, *, field_name="pk"): return await sync_to_async(self.in_bulk)( id_list=id_list, field_name=field_name, ) def delete(self): """Delete the records in the current QuerySet.""" self._not_support_combined_queries("delete") if self.query.is_sliced: raise TypeError("Cannot use 'limit' or 'offset' with delete().") if self.query.distinct or self.query.distinct_fields: raise TypeError("Cannot call delete() after .distinct().") if self._fields is not None: raise TypeError("Cannot call delete() after .values() or .values_list()") del_query = self._chain() # The delete is actually 2 queries - one to find related objects, # and one to delete. Make sure that the discovery of related # objects is performed on the same database as the deletion. del_query._for_write = True # Disable non-supported fields. del_query.query.select_for_update = False del_query.query.select_related = False del_query.query.clear_ordering(force=True) collector = Collector(using=del_query.db, origin=self) collector.collect(del_query) deleted, _rows_count = collector.delete() # Clear the result cache, in case this QuerySet gets reused. self._result_cache = None return deleted, _rows_count delete.alters_data = True delete.queryset_only = True async def adelete(self): return await sync_to_async(self.delete)() adelete.alters_data = True adelete.queryset_only = True def _raw_delete(self, using): """ Delete objects found from the given queryset in single direct SQL query. No signals are sent and there is no protection for cascades. """ query = self.query.clone() query.__class__ = sql.DeleteQuery cursor = query.get_compiler(using).execute_sql(CURSOR) if cursor: with cursor: return cursor.rowcount return 0 _raw_delete.alters_data = True def update(self, **kwargs): """ Update all elements in the current QuerySet, setting all the given fields to the appropriate values. """ self._not_support_combined_queries("update") if self.query.is_sliced: raise TypeError("Cannot update a query once a slice has been taken.") self._for_write = True query = self.query.chain(sql.UpdateQuery) query.add_update_values(kwargs) # Inline annotations in order_by(), if possible. new_order_by = [] for col in query.order_by: if annotation := query.annotations.get(col): if getattr(annotation, "contains_aggregate", False): raise exceptions.FieldError( f"Cannot update when ordering by an aggregate: {annotation}" ) new_order_by.append(annotation) else: new_order_by.append(col) query.order_by = tuple(new_order_by) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} with transaction.mark_for_rollback_on_error(using=self.db): rows = query.get_compiler(self.db).execute_sql(CURSOR) self._result_cache = None return rows update.alters_data = True async def aupdate(self, **kwargs): return await sync_to_async(self.update)(**kwargs) aupdate.alters_data = True def _update(self, values): """ A version of update() that accepts field objects instead of field names. Used primarily for model saving and not intended for use by general code (it requires too much poking around at model internals to be useful at that level). """ if self.query.is_sliced: raise TypeError("Cannot update a query once a slice has been taken.") query = self.query.chain(sql.UpdateQuery) query.add_update_fields(values) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} self._result_cache = None return query.get_compiler(self.db).execute_sql(CURSOR) _update.alters_data = True _update.queryset_only = False def exists(self): """ Return True if the QuerySet would have any results, False otherwise. """ if self._result_cache is None: return self.query.has_results(using=self.db) return bool(self._result_cache) async def aexists(self): return await sync_to_async(self.exists)() def contains(self, obj): """ Return True if the QuerySet contains the provided obj, False otherwise. """ self._not_support_combined_queries("contains") if self._fields is not None: raise TypeError( "Cannot call QuerySet.contains() after .values() or .values_list()." ) try: if obj._meta.concrete_model != self.model._meta.concrete_model: return False except AttributeError: raise TypeError("'obj' must be a model instance.") if obj.pk is None: raise ValueError("QuerySet.contains() cannot be used on unsaved objects.") if self._result_cache is not None: return obj in self._result_cache return self.filter(pk=obj.pk).exists() async def acontains(self, obj): return await sync_to_async(self.contains)(obj=obj) def _prefetch_related_objects(self): # This method can only be called once the result cache has been filled. prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def explain(self, *, format=None, **options): """ Runs an EXPLAIN on the SQL query this QuerySet would perform, and returns the results. """ return self.query.explain(using=self.db, format=format, **options) async def aexplain(self, *, format=None, **options): return await sync_to_async(self.explain)(format=format, **options) ################################################## # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # ################################################## def raw(self, raw_query, params=(), translations=None, using=None): if using is None: using = self.db qs = RawQuerySet( raw_query, model=self.model, params=params, translations=translations, using=using, ) qs._prefetch_related_lookups = self._prefetch_related_lookups[:] return qs def _values(self, *fields, **expressions): clone = self._chain() if expressions: clone = clone.annotate(**expressions) clone._fields = fields clone.query.set_values(fields) return clone def values(self, *fields, **expressions): fields += tuple(expressions) clone = self._values(*fields, **expressions) clone._iterable_class = ValuesIterable return clone def values_list(self, *fields, flat=False, named=False): if flat and named: raise TypeError("'flat' and 'named' can't be used together.") if flat and len(fields) > 1: raise TypeError( "'flat' is not valid when values_list is called with more than one " "field." ) field_names = {f for f in fields if not hasattr(f, "resolve_expression")} _fields = [] expressions = {} counter = 1 for field in fields: if hasattr(field, "resolve_expression"): field_id_prefix = getattr( field, "default_alias", field.__class__.__name__.lower() ) while True: field_id = field_id_prefix + str(counter) counter += 1 if field_id not in field_names: break expressions[field_id] = field _fields.append(field_id) else: _fields.append(field) clone = self._values(*_fields, **expressions) clone._iterable_class = ( NamedValuesListIterable if named else FlatValuesListIterable if flat else ValuesListIterable ) return clone def dates(self, field_name, kind, order="ASC"): """ Return a list of date objects representing all available dates for the given field_name, scoped to 'kind'. """ if kind not in ("year", "month", "week", "day"): raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.") if order not in ("ASC", "DESC"): raise ValueError("'order' must be either 'ASC' or 'DESC'.") return ( self.annotate( datefield=Trunc(field_name, kind, output_field=DateField()), plain_field=F(field_name), ) .values_list("datefield", flat=True) .distinct() .filter(plain_field__isnull=False) .order_by(("-" if order == "DESC" else "") + "datefield") ) # RemovedInDjango50Warning: when the deprecation ends, remove is_dst # argument. def datetimes( self, field_name, kind, order="ASC", tzinfo=None, is_dst=timezone.NOT_PASSED ): """ Return a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'. """ if kind not in ("year", "month", "week", "day", "hour", "minute", "second"): raise ValueError( "'kind' must be one of 'year', 'month', 'week', 'day', " "'hour', 'minute', or 'second'." ) if order not in ("ASC", "DESC"): raise ValueError("'order' must be either 'ASC' or 'DESC'.") if settings.USE_TZ: if tzinfo is None: tzinfo = timezone.get_current_timezone() else: tzinfo = None return ( self.annotate( datetimefield=Trunc( field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo, is_dst=is_dst, ), plain_field=F(field_name), ) .values_list("datetimefield", flat=True) .distinct() .filter(plain_field__isnull=False) .order_by(("-" if order == "DESC" else "") + "datetimefield") ) def none(self): """Return an empty QuerySet.""" clone = self._chain() clone.query.set_empty() return clone ################################################################## # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # ################################################################## def all(self): """ Return a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases. """ return self._chain() def filter(self, *args, **kwargs): """ Return a new QuerySet instance with the args ANDed to the existing set. """ self._not_support_combined_queries("filter") return self._filter_or_exclude(False, args, kwargs) def exclude(self, *args, **kwargs): """ Return a new QuerySet instance with NOT (args) ANDed to the existing set. """ self._not_support_combined_queries("exclude") return self._filter_or_exclude(True, args, kwargs) def _filter_or_exclude(self, negate, args, kwargs): if (args or kwargs) and self.query.is_sliced: raise TypeError("Cannot filter a query once a slice has been taken.") clone = self._chain() if self._defer_next_filter: self._defer_next_filter = False clone._deferred_filter = negate, args, kwargs else: clone._filter_or_exclude_inplace(negate, args, kwargs) return clone def _filter_or_exclude_inplace(self, negate, args, kwargs): if negate: self._query.add_q(~Q(*args, **kwargs)) else: self._query.add_q(Q(*args, **kwargs)) def complex_filter(self, filter_obj): """ Return a new QuerySet instance with filter_obj added to the filters. filter_obj can be a Q object or a dictionary of keyword lookup arguments. This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods. """ if isinstance(filter_obj, Q): clone = self._chain() clone.query.add_q(filter_obj) return clone else: return self._filter_or_exclude(False, args=(), kwargs=filter_obj) def _combinator_query(self, combinator, *other_qs, all=False): # Clone the query to inherit the select list and everything clone = self._chain() # Clear limits and ordering so they can be reapplied clone.query.clear_ordering(force=True) clone.query.clear_limits() clone.query.combined_queries = (self.query,) + tuple( qs.query for qs in other_qs ) clone.query.combinator = combinator clone.query.combinator_all = all return clone def union(self, *other_qs, all=False): # If the query is an EmptyQuerySet, combine all nonempty querysets. if isinstance(self, EmptyQuerySet): qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)] if not qs: return self if len(qs) == 1: return qs[0] return qs[0]._combinator_query("union", *qs[1:], all=all) return self._combinator_query("union", *other_qs, all=all) def intersection(self, *other_qs): # If any query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self for other in other_qs: if isinstance(other, EmptyQuerySet): return other return self._combinator_query("intersection", *other_qs) def difference(self, *other_qs): # If the query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self return self._combinator_query("difference", *other_qs) def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False): """ Return a new QuerySet instance that will select objects with a FOR UPDATE lock. """ if nowait and skip_locked: raise ValueError("The nowait option cannot be used with skip_locked.") obj = self._chain() obj._for_write = True obj.query.select_for_update = True obj.query.select_for_update_nowait = nowait obj.query.select_for_update_skip_locked = skip_locked obj.query.select_for_update_of = of obj.query.select_for_no_key_update = no_key return obj def select_related(self, *fields): """ Return a new QuerySet instance that will select related objects. If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection. If select_related(None) is called, clear the list. """ self._not_support_combined_queries("select_related") if self._fields is not None: raise TypeError( "Cannot call select_related() after .values() or .values_list()" ) obj = self._chain() if fields == (None,): obj.query.select_related = False elif fields: obj.query.add_select_related(fields) else: obj.query.select_related = True return obj def prefetch_related(self, *lookups): """ Return a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated. When prefetch_related() is called more than once, append to the list of prefetch lookups. If prefetch_related(None) is called, clear the list. """ self._not_support_combined_queries("prefetch_related") clone = self._chain() if lookups == (None,): clone._prefetch_related_lookups = () else: for lookup in lookups: if isinstance(lookup, Prefetch): lookup = lookup.prefetch_to lookup = lookup.split(LOOKUP_SEP, 1)[0] if lookup in self.query._filtered_relations: raise ValueError( "prefetch_related() is not supported with FilteredRelation." ) clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def annotate(self, *args, **kwargs): """ Return a query set in which the returned objects have been annotated with extra data or aggregations. """ self._not_support_combined_queries("annotate") return self._annotate(args, kwargs, select=True) def alias(self, *args, **kwargs): """ Return a query set with added aliases for extra data or aggregations. """ self._not_support_combined_queries("alias") return self._annotate(args, kwargs, select=False) def _annotate(self, args, kwargs, select=True): self._validate_values_are_expressions( args + tuple(kwargs.values()), method_name="annotate" ) annotations = {} for arg in args: # The default_alias property may raise a TypeError. try: if arg.default_alias in kwargs: raise ValueError( "The named annotation '%s' conflicts with the " "default name for another annotation." % arg.default_alias ) except TypeError: raise TypeError("Complex annotations require an alias") annotations[arg.default_alias] = arg annotations.update(kwargs) clone = self._chain() names = self._fields if names is None: names = set( chain.from_iterable( (field.name, field.attname) if hasattr(field, "attname") else (field.name,) for field in self.model._meta.get_fields() ) ) for alias, annotation in annotations.items(): if alias in names: raise ValueError( "The annotation '%s' conflicts with a field on " "the model." % alias ) if isinstance(annotation, FilteredRelation): clone.query.add_filtered_relation(annotation, alias) else: clone.query.add_annotation( annotation, alias, is_summary=False, select=select, ) for alias, annotation in clone.query.annotations.items(): if alias in annotations and annotation.contains_aggregate: if clone._fields is None: clone.query.group_by = True else: clone.query.set_group_by() break return clone def order_by(self, *field_names): """Return a new QuerySet instance with the ordering changed.""" if self.query.is_sliced: raise TypeError("Cannot reorder a query once a slice has been taken.") obj = self._chain() obj.query.clear_ordering(force=True, clear_default=False) obj.query.add_ordering(*field_names) return obj def distinct(self, *field_names): """ Return a new QuerySet instance that will select only distinct results. """ self._not_support_combined_queries("distinct") if self.query.is_sliced: raise TypeError( "Cannot create distinct fields once a slice has been taken." ) obj = self._chain() obj.query.add_distinct_fields(*field_names) return obj def extra( self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None, ): """Add extra SQL fragments to the query.""" self._not_support_combined_queries("extra") if self.query.is_sliced: raise TypeError("Cannot change a query once a slice has been taken.") clone = self._chain() clone.query.add_extra(select, select_params, where, params, tables, order_by) return clone def reverse(self): """Reverse the ordering of the QuerySet.""" if self.query.is_sliced: raise TypeError("Cannot reverse a query once a slice has been taken.") clone = self._chain() clone.query.standard_ordering = not clone.query.standard_ordering return clone def defer(self, *fields): """ Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case removal all deferrals. """ self._not_support_combined_queries("defer") if self._fields is not None: raise TypeError("Cannot call defer() after .values() or .values_list()") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone def only(self, *fields): """ Essentially, the opposite of defer(). Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated. """ self._not_support_combined_queries("only") if self._fields is not None: raise TypeError("Cannot call only() after .values() or .values_list()") if fields == (None,): # Can only pass None to defer(), not only(), as the rest option. # That won't stop people trying to do this, so let's be explicit. raise TypeError("Cannot pass None as an argument to only().") for field in fields: field = field.split(LOOKUP_SEP, 1)[0] if field in self.query._filtered_relations: raise ValueError("only() is not supported with FilteredRelation.") clone = self._chain() clone.query.add_immediate_loading(fields) return clone def using(self, alias): """Select which database this QuerySet should execute against.""" clone = self._chain() clone._db = alias return clone ################################### # PUBLIC INTROSPECTION ATTRIBUTES # ################################### @property def ordered(self): """ Return True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model (or is empty). """ if isinstance(self, EmptyQuerySet): return True if self.query.extra_order_by or self.query.order_by: return True elif ( self.query.default_ordering and self.query.get_meta().ordering and # A default ordering doesn't affect GROUP BY queries. not self.query.group_by ): return True else: return False @property def db(self): """Return the database used if this query is executed now.""" if self._for_write: return self._db or router.db_for_write(self.model, **self._hints) return self._db or router.db_for_read(self.model, **self._hints) ################### # PRIVATE METHODS # ################### def _insert( self, objs, fields, returning_fields=None, raw=False, using=None, on_conflict=None, update_fields=None, unique_fields=None, ): """ Insert a new record for the given model. This provides an interface to the InsertQuery class and is how Model.save() is implemented. """ self._for_write = True if using is None: using = self.db query = sql.InsertQuery( self.model, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) query.insert_values(fields, objs, raw=raw) return query.get_compiler(using=using).execute_sql(returning_fields) _insert.alters_data = True _insert.queryset_only = False def _batched_insert( self, objs, fields, batch_size, on_conflict=None, update_fields=None, unique_fields=None, ): """ Helper method for bulk_create() to insert objs one batch at a time. """ connection = connections[self.db] ops = connection.ops max_batch_size = max(ops.bulk_batch_size(fields, objs), 1) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size inserted_rows = [] bulk_return = connection.features.can_return_rows_from_bulk_insert for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]: if bulk_return and on_conflict is None: inserted_rows.extend( self._insert( item, fields=fields, using=self.db, returning_fields=self.model._meta.db_returning_fields, ) ) else: self._insert( item, fields=fields, using=self.db, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) return inserted_rows def _chain(self): """ Return a copy of the current QuerySet that's ready for another operation. """ obj = self._clone() if obj._sticky_filter: obj.query.filter_is_sticky = True obj._sticky_filter = False return obj def _clone(self): """ Return a copy of the current QuerySet. A lightweight alternative to deepcopy(). """ c = self.__class__( model=self.model, query=self.query.chain(), using=self._db, hints=self._hints, ) c._sticky_filter = self._sticky_filter c._for_write = self._for_write c._prefetch_related_lookups = self._prefetch_related_lookups[:] c._known_related_objects = self._known_related_objects c._iterable_class = self._iterable_class c._fields = self._fields return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self._iterable_class(self)) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def _next_is_sticky(self): """ Indicate that the next filter call and the one following that should be treated as a single filter. This is only important when it comes to determining when to reuse tables for many-to-many filters. Required so that we can filter naturally on the results of related managers. This doesn't return a clone of the current QuerySet (it returns "self"). The method is only used internally and should be immediately followed by a filter() that does create a clone. """ self._sticky_filter = True return self def _merge_sanity_check(self, other): """Check that two QuerySet classes may be merged.""" if self._fields is not None and ( set(self.query.values_select) != set(other.query.values_select) or set(self.query.extra_select) != set(other.query.extra_select) or set(self.query.annotation_select) != set(other.query.annotation_select) ): raise TypeError( "Merging '%s' classes must involve the same values in each case." % self.__class__.__name__ ) def _merge_known_related_objects(self, other): """ Keep track of all known related objects from either QuerySet instance. """ for field, objects in other._known_related_objects.items(): self._known_related_objects.setdefault(field, {}).update(objects) def resolve_expression(self, *args, **kwargs): if self._fields and len(self._fields) > 1: # values() queryset can only be used as nested queries # if they are set up to select only a single field. raise TypeError("Cannot use multi-field values as a filter value.") query = self.query.resolve_expression(*args, **kwargs) query._db = self._db return query resolve_expression.queryset_only = True def _add_hints(self, **hints): """ Update hinting information for use by routers. Add new key/values or overwrite existing key/values. """ self._hints.update(hints) def _has_filters(self): """ Check if this QuerySet has any filtering going on. This isn't equivalent with checking if all objects are present in results, for example, qs[1:]._has_filters() -> False. """ return self.query.has_filters() @staticmethod def _validate_values_are_expressions(values, method_name): invalid_args = sorted( str(arg) for arg in values if not hasattr(arg, "resolve_expression") ) if invalid_args: raise TypeError( "QuerySet.%s() received non-expression(s): %s." % ( method_name, ", ".join(invalid_args), ) ) def _not_support_combined_queries(self, operation_name): if self.query.combinator: raise NotSupportedError( "Calling QuerySet.%s() after %s() is not supported." % (operation_name, self.query.combinator) ) def _check_operator_queryset(self, other, operator_): if self.query.combinator or other.query.combinator: raise TypeError(f"Cannot use {operator_} operator with combined queryset.") def _check_ordering_first_last_queryset_aggregation(self, method): if isinstance(self.query.group_by, tuple) and not any( col.output_field is self.model._meta.pk for col in self.query.group_by ): raise TypeError( f"Cannot use QuerySet.{method}() on an unordered queryset performing " f"aggregation. Add an ordering with order_by()." ) class InstanceCheckMeta(type): def __instancecheck__(self, instance): return isinstance(instance, QuerySet) and instance.query.is_empty() class EmptyQuerySet(metaclass=InstanceCheckMeta): """ Marker class to checking if a queryset is empty by .none(): isinstance(qs.none(), EmptyQuerySet) -> True """ def __init__(self, *args, **kwargs): raise TypeError("EmptyQuerySet can't be instantiated") class RawQuerySet: """ Provide an iterator which converts the results of raw SQL queries into annotated model instances. """ def __init__( self, raw_query, model=None, query=None, params=(), translations=None, using=None, hints=None, ): self.raw_query = raw_query self.model = model self._db = using self._hints = hints or {} self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) self.params = params self.translations = translations or {} self._result_cache = None self._prefetch_related_lookups = () self._prefetch_done = False def resolve_model_init_order(self): """Resolve the init field names and value positions.""" converter = connections[self.db].introspection.identifier_converter model_init_fields = [ f for f in self.model._meta.fields if converter(f.column) in self.columns ] annotation_fields = [ (column, pos) for pos, column in enumerate(self.columns) if column not in self.model_fields ] model_init_order = [ self.columns.index(converter(f.column)) for f in model_init_fields ] model_init_names = [f.attname for f in model_init_fields] return model_init_names, model_init_order, annotation_fields def prefetch_related(self, *lookups): """Same as QuerySet.prefetch_related()""" clone = self._clone() if lookups == (None,): clone._prefetch_related_lookups = () else: clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def _prefetch_related_objects(self): prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def _clone(self): """Same as QuerySet._clone()""" c = self.__class__( self.raw_query, model=self.model, query=self.query, params=self.params, translations=self.translations, using=self._db, hints=self._hints, ) c._prefetch_related_lookups = self._prefetch_related_lookups[:] return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self.iterator()) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def __len__(self): self._fetch_all() return len(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __iter__(self): self._fetch_all() return iter(self._result_cache) def __aiter__(self): # Remember, __aiter__ itself is synchronous, it's the thing it returns # that is async! async def generator(): await sync_to_async(self._fetch_all)() for item in self._result_cache: yield item return generator() def iterator(self): yield from RawModelIterable(self) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.query) def __getitem__(self, k): return list(self)[k] @property def db(self): """Return the database used if this query is executed now.""" return self._db or router.db_for_read(self.model, **self._hints) def using(self, alias): """Select the database this RawQuerySet should execute against.""" return RawQuerySet( self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias, ) @cached_property def columns(self): """ A list of model field names in the order they'll appear in the query results. """ columns = self.query.get_columns() # Adjust any column names which don't match field names for (query_name, model_name) in self.translations.items(): # Ignore translations for nonexistent column names try: index = columns.index(query_name) except ValueError: pass else: columns[index] = model_name return columns @cached_property def model_fields(self): """A dict mapping column names to model field names.""" converter = connections[self.db].introspection.identifier_converter model_fields = {} for field in self.model._meta.fields: name, column = field.get_attname_column() model_fields[converter(column)] = field return model_fields class Prefetch: def __init__(self, lookup, queryset=None, to_attr=None): # `prefetch_through` is the path we traverse to perform the prefetch. self.prefetch_through = lookup # `prefetch_to` is the path to the attribute that stores the result. self.prefetch_to = lookup if queryset is not None and ( isinstance(queryset, RawQuerySet) or ( hasattr(queryset, "_iterable_class") and not issubclass(queryset._iterable_class, ModelIterable) ) ): raise ValueError( "Prefetch querysets cannot use raw(), values(), and values_list()." ) if to_attr: self.prefetch_to = LOOKUP_SEP.join( lookup.split(LOOKUP_SEP)[:-1] + [to_attr] ) self.queryset = queryset self.to_attr = to_attr def __getstate__(self): obj_dict = self.__dict__.copy() if self.queryset is not None: queryset = self.queryset._chain() # Prevent the QuerySet from being evaluated queryset._result_cache = [] queryset._prefetch_done = True obj_dict["queryset"] = queryset return obj_dict def add_prefix(self, prefix): self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to def get_current_prefetch_to(self, level): return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1]) def get_current_to_attr(self, level): parts = self.prefetch_to.split(LOOKUP_SEP) to_attr = parts[level] as_attr = self.to_attr and level == len(parts) - 1 return to_attr, as_attr def get_current_queryset(self, level): if self.get_current_prefetch_to(level) == self.prefetch_to: return self.queryset return None def __eq__(self, other): if not isinstance(other, Prefetch): return NotImplemented return self.prefetch_to == other.prefetch_to def __hash__(self): return hash((self.__class__, self.prefetch_to)) def normalize_prefetch_lookups(lookups, prefix=None): """Normalize lookups into Prefetch objects.""" ret = [] for lookup in lookups: if not isinstance(lookup, Prefetch): lookup = Prefetch(lookup) if prefix: lookup.add_prefix(prefix) ret.append(lookup) return ret def prefetch_related_objects(model_instances, *related_lookups): """ Populate prefetched object caches for a list of model instances based on the lookups/Prefetch instances given. """ if not model_instances: return # nothing to do # We need to be able to dynamically add to the list of prefetch_related # lookups that we look up (see below). So we need some book keeping to # ensure we don't do duplicate work. done_queries = {} # dictionary of things like 'foo__bar': [results] auto_lookups = set() # we add to this as we go through. followed_descriptors = set() # recursion protection all_lookups = normalize_prefetch_lookups(reversed(related_lookups)) while all_lookups: lookup = all_lookups.pop() if lookup.prefetch_to in done_queries: if lookup.queryset is not None: raise ValueError( "'%s' lookup was already seen with a different queryset. " "You may need to adjust the ordering of your lookups." % lookup.prefetch_to ) continue # Top level, the list of objects to decorate is the result cache # from the primary QuerySet. It won't be for deeper levels. obj_list = model_instances through_attrs = lookup.prefetch_through.split(LOOKUP_SEP) for level, through_attr in enumerate(through_attrs): # Prepare main instances if not obj_list: break prefetch_to = lookup.get_current_prefetch_to(level) if prefetch_to in done_queries: # Skip any prefetching, and any object preparation obj_list = done_queries[prefetch_to] continue # Prepare objects: good_objects = True for obj in obj_list: # Since prefetching can re-use instances, it is possible to have # the same instance multiple times in obj_list, so obj might # already be prepared. if not hasattr(obj, "_prefetched_objects_cache"): try: obj._prefetched_objects_cache = {} except (AttributeError, TypeError): # Must be an immutable object from # values_list(flat=True), for example (TypeError) or # a QuerySet subclass that isn't returning Model # instances (AttributeError), either in Django or a 3rd # party. prefetch_related() doesn't make sense, so quit. good_objects = False break if not good_objects: break # Descend down tree # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. first_obj = obj_list[0] to_attr = lookup.get_current_to_attr(level)[0] prefetcher, descriptor, attr_found, is_fetched = get_prefetcher( first_obj, through_attr, to_attr ) if not attr_found: raise AttributeError( "Cannot find '%s' on %s object, '%s' is an invalid " "parameter to prefetch_related()" % ( through_attr, first_obj.__class__.__name__, lookup.prefetch_through, ) ) if level == len(through_attrs) - 1 and prefetcher is None: # Last one, this *must* resolve to something that supports # prefetching, otherwise there is no point adding it and the # developer asking for it has made a mistake. raise ValueError( "'%s' does not resolve to an item that supports " "prefetching - this is an invalid parameter to " "prefetch_related()." % lookup.prefetch_through ) obj_to_fetch = None if prefetcher is not None: obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)] if obj_to_fetch: obj_list, additional_lookups = prefetch_one_level( obj_to_fetch, prefetcher, lookup, level, ) # We need to ensure we don't keep adding lookups from the # same relationships to stop infinite recursion. So, if we # are already on an automatically added lookup, don't add # the new lookups from relationships we've seen already. if not ( prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors ): done_queries[prefetch_to] = obj_list new_lookups = normalize_prefetch_lookups( reversed(additional_lookups), prefetch_to ) auto_lookups.update(new_lookups) all_lookups.extend(new_lookups) followed_descriptors.add(descriptor) else: # Either a singly related object that has already been fetched # (e.g. via select_related), or hopefully some other property # that doesn't support prefetching but needs to be traversed. # We replace the current list of parent objects with the list # of related objects, filtering out empty or missing values so # that we can continue with nullable or reverse relations. new_obj_list = [] for obj in obj_list: if through_attr in getattr(obj, "_prefetched_objects_cache", ()): # If related objects have been prefetched, use the # cache rather than the object's through_attr. new_obj = list(obj._prefetched_objects_cache.get(through_attr)) else: try: new_obj = getattr(obj, through_attr) except exceptions.ObjectDoesNotExist: continue if new_obj is None: continue # We special-case `list` rather than something more generic # like `Iterable` because we don't want to accidentally match # user models that define __iter__. if isinstance(new_obj, list): new_obj_list.extend(new_obj) else: new_obj_list.append(new_obj) obj_list = new_obj_list def get_prefetcher(instance, through_attr, to_attr): """ For the attribute 'through_attr' on the given instance, find an object that has a get_prefetch_queryset(). Return a 4 tuple containing: (the object with get_prefetch_queryset (or None), the descriptor object representing this relationship (or None), a boolean that is False if the attribute was not found at all, a function that takes an instance and returns a boolean that is True if the attribute has already been fetched for that instance) """ def has_to_attr_attribute(instance): return hasattr(instance, to_attr) prefetcher = None is_fetched = has_to_attr_attribute # For singly related objects, we have to avoid getting the attribute # from the object, as this will trigger the query. So we first try # on the class, in order to get the descriptor object. rel_obj_descriptor = getattr(instance.__class__, through_attr, None) if rel_obj_descriptor is None: attr_found = hasattr(instance, through_attr) else: attr_found = True if rel_obj_descriptor: # singly related object, descriptor object has the # get_prefetch_queryset() method. if hasattr(rel_obj_descriptor, "get_prefetch_queryset"): prefetcher = rel_obj_descriptor is_fetched = rel_obj_descriptor.is_cached else: # descriptor doesn't support prefetching, so we go ahead and get # the attribute on the instance rather than the class to # support many related managers rel_obj = getattr(instance, through_attr) if hasattr(rel_obj, "get_prefetch_queryset"): prefetcher = rel_obj if through_attr != to_attr: # Special case cached_property instances because hasattr # triggers attribute computation and assignment. if isinstance( getattr(instance.__class__, to_attr, None), cached_property ): def has_cached_property(instance): return to_attr in instance.__dict__ is_fetched = has_cached_property else: def in_prefetched_cache(instance): return through_attr in instance._prefetched_objects_cache is_fetched = in_prefetched_cache return prefetcher, rel_obj_descriptor, attr_found, is_fetched def prefetch_one_level(instances, prefetcher, lookup, level): """ Helper function for prefetch_related_objects(). Run prefetches on all instances using the prefetcher object, assigning results to relevant caches in instance. Return the prefetched objects along with any additional prefetches that must be done due to prefetch_related lookups found from default managers. """ # prefetcher must have a method get_prefetch_queryset() which takes a list # of instances, and returns a tuple: # (queryset of instances of self.model that are related to passed in instances, # callable that gets value to be matched for returned instances, # callable that gets value to be matched for passed in instances, # boolean that is True for singly related objects, # cache or field name to assign to, # boolean that is True when the previous argument is a cache name vs a field name). # The 'values to be matched' must be hashable as they will be used # in a dictionary. ( rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor, ) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)) # We have to handle the possibility that the QuerySet we just got back # contains some prefetch_related lookups. We don't want to trigger the # prefetch_related functionality by evaluating the query. Rather, we need # to merge in the prefetch_related lookups. # Copy the lookups in case it is a Prefetch object which could be reused # later (happens in nested prefetch_related). additional_lookups = [ copy.copy(additional_lookup) for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ()) ] if additional_lookups: # Don't need to clone because the manager should have given us a fresh # instance, so we access an internal instead of using public interface # for performance reasons. rel_qs._prefetch_related_lookups = () all_related_objects = list(rel_qs) rel_obj_cache = {} for rel_obj in all_related_objects: rel_attr_val = rel_obj_attr(rel_obj) rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) to_attr, as_attr = lookup.get_current_to_attr(level) # Make sure `to_attr` does not conflict with a field. if as_attr and instances: # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. model = instances[0].__class__ try: model._meta.get_field(to_attr) except exceptions.FieldDoesNotExist: pass else: msg = "to_attr={} conflicts with a field on the {} model." raise ValueError(msg.format(to_attr, model.__name__)) # Whether or not we're prefetching the last part of the lookup. leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level for obj in instances: instance_attr_val = instance_attr(obj) vals = rel_obj_cache.get(instance_attr_val, []) if single: val = vals[0] if vals else None if as_attr: # A to_attr has been given for the prefetch. setattr(obj, to_attr, val) elif is_descriptor: # cache_name points to a field name in obj. # This field is a descriptor for a related object. setattr(obj, cache_name, val) else: # No to_attr has been given for this prefetch operation and the # cache_name does not point to a descriptor. Store the value of # the field in the object's field cache. obj._state.fields_cache[cache_name] = val else: if as_attr: setattr(obj, to_attr, vals) else: manager = getattr(obj, to_attr) if leaf and lookup.queryset is not None: qs = manager._apply_rel_filters(lookup.queryset) else: qs = manager.get_queryset() qs._result_cache = vals # We don't want the individual qs doing prefetch_related now, # since we have merged this into the current work. qs._prefetch_done = True obj._prefetched_objects_cache[cache_name] = qs return all_related_objects, additional_lookups class RelatedPopulator: """ RelatedPopulator is used for select_related() object instantiation. The idea is that each select_related() model will be populated by a different RelatedPopulator instance. The RelatedPopulator instances get klass_info and select (computed in SQLCompiler) plus the used db as input for initialization. That data is used to compute which columns to use, how to instantiate the model, and how to populate the links between the objects. The actual creation of the objects is done in populate() method. This method gets row and from_obj as input and populates the select_related() model instance. """ def __init__(self, klass_info, select, db): self.db = db # Pre-compute needed attributes. The attributes are: # - model_cls: the possibly deferred model class to instantiate # - either: # - cols_start, cols_end: usually the columns in the row are # in the same order model_cls.__init__ expects them, so we # can instantiate by model_cls(*row[cols_start:cols_end]) # - reorder_for_init: When select_related descends to a child # class, then we want to reuse the already selected parent # data. However, in this case the parent data isn't necessarily # in the same order that Model.__init__ expects it to be, so # we have to reorder the parent data. The reorder_for_init # attribute contains a function used to reorder the field data # in the order __init__ expects it. # - pk_idx: the index of the primary key field in the reordered # model data. Used to check if a related object exists at all. # - init_list: the field attnames fetched from the database. For # deferred models this isn't the same as all attnames of the # model's fields. # - related_populators: a list of RelatedPopulator instances if # select_related() descends to related models from this model. # - local_setter, remote_setter: Methods to set cached values on # the object being populated and on the remote object. Usually # these are Field.set_cached_value() methods. select_fields = klass_info["select_fields"] from_parent = klass_info["from_parent"] if not from_parent: self.cols_start = select_fields[0] self.cols_end = select_fields[-1] + 1 self.init_list = [ f[0].target.attname for f in select[self.cols_start : self.cols_end] ] self.reorder_for_init = None else: attname_indexes = { select[idx][0].target.attname: idx for idx in select_fields } model_init_attnames = ( f.attname for f in klass_info["model"]._meta.concrete_fields ) self.init_list = [ attname for attname in model_init_attnames if attname in attname_indexes ] self.reorder_for_init = operator.itemgetter( *[attname_indexes[attname] for attname in self.init_list] ) self.model_cls = klass_info["model"] self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname) self.related_populators = get_related_populators(klass_info, select, self.db) self.local_setter = klass_info["local_setter"] self.remote_setter = klass_info["remote_setter"] def populate(self, row, from_obj): if self.reorder_for_init: obj_data = self.reorder_for_init(row) else: obj_data = row[self.cols_start : self.cols_end] if obj_data[self.pk_idx] is None: obj = None else: obj = self.model_cls.from_db(self.db, self.init_list, obj_data) for rel_iter in self.related_populators: rel_iter.populate(row, obj) self.local_setter(from_obj, obj) if obj is not None: self.remote_setter(obj, from_obj) def get_related_populators(klass_info, select, db): iterators = [] related_klass_infos = klass_info.get("related_klass_infos", []) for rel_klass_info in related_klass_infos: rel_cls = RelatedPopulator(rel_klass_info, select, db) iterators.append(rel_cls) return iterators
484e4be42568530af685861718647ffb1ae58c5c076a247836296a89617512e6
import copy import inspect import warnings from functools import partialmethod from itertools import chain from asgiref.sync import sync_to_async import django from django.apps import apps from django.conf import settings from django.core import checks from django.core.exceptions import ( NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, ObjectDoesNotExist, ValidationError, ) from django.db import ( DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, connections, router, transaction, ) from django.db.models import NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value from django.db.models.constants import LOOKUP_SEP from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, Collector from django.db.models.expressions import RawSQL from django.db.models.fields.related import ( ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, ) from django.db.models.functions import Coalesce from django.db.models.manager import Manager from django.db.models.options import Options from django.db.models.query import F, Q from django.db.models.signals import ( class_prepared, post_init, post_save, pre_init, pre_save, ) from django.db.models.utils import AltersData, make_model_tuple from django.utils.encoding import force_str from django.utils.hashable import make_hashable from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext_lazy as _ class Deferred: def __repr__(self): return "<Deferred field>" def __str__(self): return "<Deferred field>" DEFERRED = Deferred() def subclass_exception(name, bases, module, attached_to): """ Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class. """ return type( name, bases, { "__module__": module, "__qualname__": "%s.%s" % (attached_to.__qualname__, name), }, ) def _has_contribute_to_class(value): # Only call contribute_to_class() if it's bound. return not inspect.isclass(value) and hasattr(value, "contribute_to_class") class ModelBase(type): """Metaclass for all models.""" def __new__(cls, name, bases, attrs, **kwargs): super_new = super().__new__ # Also ensure initialization is only performed for subclasses of Model # (excluding Model class itself). parents = [b for b in bases if isinstance(b, ModelBase)] if not parents: return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop("__module__") new_attrs = {"__module__": module} classcell = attrs.pop("__classcell__", None) if classcell is not None: new_attrs["__classcell__"] = classcell attr_meta = attrs.pop("Meta", None) # Pass all attrs without a (Django-specific) contribute_to_class() # method to type.__new__() so that they're properly initialized # (i.e. __set_name__()). contributable_attrs = {} for obj_name, obj in attrs.items(): if _has_contribute_to_class(obj): contributable_attrs[obj_name] = obj else: new_attrs[obj_name] = obj new_class = super_new(cls, name, bases, new_attrs, **kwargs) abstract = getattr(attr_meta, "abstract", False) meta = attr_meta or getattr(new_class, "Meta", None) base_meta = getattr(new_class, "_meta", None) app_label = None # Look for an application configuration to attach the model to. app_config = apps.get_containing_app_config(module) if getattr(meta, "app_label", None) is None: if app_config is None: if not abstract: raise RuntimeError( "Model class %s.%s doesn't declare an explicit " "app_label and isn't in an application in " "INSTALLED_APPS." % (module, name) ) else: app_label = app_config.label new_class.add_to_class("_meta", Options(meta, app_label)) if not abstract: new_class.add_to_class( "DoesNotExist", subclass_exception( "DoesNotExist", tuple( x.DoesNotExist for x in parents if hasattr(x, "_meta") and not x._meta.abstract ) or (ObjectDoesNotExist,), module, attached_to=new_class, ), ) new_class.add_to_class( "MultipleObjectsReturned", subclass_exception( "MultipleObjectsReturned", tuple( x.MultipleObjectsReturned for x in parents if hasattr(x, "_meta") and not x._meta.abstract ) or (MultipleObjectsReturned,), module, attached_to=new_class, ), ) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, "ordering"): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, "get_latest_by"): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError( "%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped) ) # Add remaining attributes (those with a contribute_to_class() method) # to the class. for obj_name, obj in contributable_attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = chain( new_class._meta.local_fields, new_class._meta.local_many_to_many, new_class._meta.private_fields, ) field_names = {f.name for f in new_fields} # Basic setup for proxy models. if is_proxy: base = None for parent in [kls for kls in parents if hasattr(kls, "_meta")]: if parent._meta.abstract: if parent._meta.fields: raise TypeError( "Abstract base class containing model fields not " "permitted for proxy model '%s'." % name ) else: continue if base is None: base = parent elif parent._meta.concrete_model is not base._meta.concrete_model: raise TypeError( "Proxy model '%s' has more than one non-abstract model base " "class." % name ) if base is None: raise TypeError( "Proxy model '%s' has no non-abstract model base class." % name ) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Collect the parent links for multi-table inheritance. parent_links = {} for base in reversed([new_class] + parents): # Conceptually equivalent to `if base is Model`. if not hasattr(base, "_meta"): continue # Skip concrete parent classes. if base != new_class and not base._meta.abstract: continue # Locate OneToOneField instances. for field in base._meta.local_fields: if isinstance(field, OneToOneField) and field.remote_field.parent_link: related = resolve_relation(new_class, field.remote_field.model) parent_links[make_model_tuple(related)] = field # Track fields inherited from base models. inherited_attributes = set() # Do the appropriate setup for any model parents. for base in new_class.mro(): if base not in parents or not hasattr(base, "_meta"): # Things without _meta aren't functional models, so they're # uninteresting parents. inherited_attributes.update(base.__dict__) continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many if not base._meta.abstract: # Check for clashes between locally declared fields and those # on the base classes. for field in parent_fields: if field.name in field_names: raise FieldError( "Local field %r in class %r clashes with field of " "the same name from base class %r." % ( field.name, name, base.__name__, ) ) else: inherited_attributes.add(field.name) # Concrete classes... base = base._meta.concrete_model base_key = make_model_tuple(base) if base_key in parent_links: field = parent_links[base_key] elif not is_proxy: attr_name = "%s_ptr" % base._meta.model_name field = OneToOneField( base, on_delete=CASCADE, name=attr_name, auto_created=True, parent_link=True, ) if attr_name in field_names: raise FieldError( "Auto-generated field '%s' in class %r for " "parent_link to base class %r clashes with " "declared field of the same name." % ( attr_name, name, base.__name__, ) ) # Only add the ptr field if it's not already present; # e.g. migrations will already have it specified if not hasattr(new_class, attr_name): new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: base_parents = base._meta.parents.copy() # Add fields from abstract base class if it wasn't overridden. for field in parent_fields: if ( field.name not in field_names and field.name not in new_class.__dict__ and field.name not in inherited_attributes ): new_field = copy.deepcopy(field) new_class.add_to_class(field.name, new_field) # Replace parent links defined on this base by the new # field. It will be appropriately resolved if required. if field.one_to_one: for parent, parent_link in base_parents.items(): if field == parent_link: base_parents[parent] = new_field # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base_parents) # Inherit private fields (like GenericForeignKey) from the parent # class for field in base._meta.private_fields: if field.name in field_names: if not base._meta.abstract: raise FieldError( "Local field %r in class %r clashes with field of " "the same name from base class %r." % ( field.name, name, base.__name__, ) ) else: field = copy.deepcopy(field) if not base._meta.abstract: field.mti_inherited = True new_class.add_to_class(field.name, field) # Copy indexes so that index names are unique when models extend an # abstract model. new_class._meta.indexes = [ copy.deepcopy(idx) for idx in new_class._meta.indexes ] if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() new_class._meta.apps.register_model(new_class._meta.app_label, new_class) return new_class def add_to_class(cls, name, value): if _has_contribute_to_class(value): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """Create some methods once self._meta has been populated.""" opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = partialmethod( cls._get_next_or_previous_in_order, is_next=True ) cls.get_previous_in_order = partialmethod( cls._get_next_or_previous_in_order, is_next=False ) # Defer creating accessors on the foreign class until it has been # created and registered. If remote_field is None, we're ordering # with respect to a GenericForeignKey and don't know what the # foreign class is - we'll add those accessors later in # contribute_to_class(). if opts.order_with_respect_to.remote_field: wrt = opts.order_with_respect_to remote = wrt.remote_field.model lazy_related_operation(make_foreign_order_accessors, cls, remote) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % ( cls.__name__, ", ".join(f.name for f in opts.fields), ) get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get( opts.label_lower ) if get_absolute_url_override: setattr(cls, "get_absolute_url", get_absolute_url_override) if not opts.managers: if any(f.name == "objects" for f in opts.fields): raise ValueError( "Model %s must specify a custom Manager, because it has a " "field named 'objects'." % cls.__name__ ) manager = Manager() manager.auto_created = True cls.add_to_class("objects", manager) # Set the name of _meta.indexes. This can't be done in # Options.contribute_to_class() because fields haven't been added to # the model at that point. for index in cls._meta.indexes: if not index.name: index.set_name_with_model(cls) class_prepared.send(sender=cls) @property def _base_manager(cls): return cls._meta.base_manager @property def _default_manager(cls): return cls._meta.default_manager class ModelStateFieldsCacheDescriptor: def __get__(self, instance, cls=None): if instance is None: return self res = instance.fields_cache = {} return res class ModelState: """Store model instance state.""" db = None # If true, uniqueness validation checks will consider this a new, unsaved # object. Necessary for correct validation of new instances of objects with # explicit (non-auto) PKs. This impacts validation only; it has no effect # on the actual save. adding = True fields_cache = ModelStateFieldsCacheDescriptor() class Model(AltersData, metaclass=ModelBase): def __init__(self, *args, **kwargs): # Alias some things as locals to avoid repeat global lookups cls = self.__class__ opts = self._meta _setattr = setattr _DEFERRED = DEFERRED if opts.abstract: raise TypeError("Abstract models cannot be instantiated.") pre_init.send(sender=cls, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) else: # Slower, kwargs-ready version. fields_iter = iter(opts.fields) for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) if kwargs.pop(field.name, NOT_PROVIDED) is not NOT_PROVIDED: raise TypeError( f"{cls.__qualname__}() got both positional and " f"keyword arguments for field '{field.name}'." ) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # Virtual field if field.attname not in kwargs and field.column is None: continue if kwargs: if isinstance(field.remote_field, ForeignObjectRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. if rel_obj is not _DEFERRED: _setattr(self, field.name, rel_obj) else: if val is not _DEFERRED: _setattr(self, field.attname, val) if kwargs: property_names = opts._property_names unexpected = () for prop, value in kwargs.items(): # Any remaining kwargs must correspond to properties or virtual # fields. if prop in property_names: if value is not _DEFERRED: _setattr(self, prop, value) else: try: opts.get_field(prop) except FieldDoesNotExist: unexpected += (prop,) else: if value is not _DEFERRED: _setattr(self, prop, value) if unexpected: unexpected_names = ", ".join(repr(n) for n in unexpected) raise TypeError( f"{cls.__name__}() got unexpected keyword arguments: " f"{unexpected_names}" ) super().__init__() post_init.send(sender=cls, instance=self) @classmethod def from_db(cls, db, field_names, values): if len(values) != len(cls._meta.concrete_fields): values_iter = iter(values) values = [ next(values_iter) if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields ] new = cls(*values) new._state.adding = False new._state.db = db return new def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def __str__(self): return "%s object (%s)" % (self.__class__.__name__, self.pk) def __eq__(self, other): if not isinstance(other, Model): return NotImplemented if self._meta.concrete_model != other._meta.concrete_model: return False my_pk = self.pk if my_pk is None: return self is other return my_pk == other.pk def __hash__(self): if self.pk is None: raise TypeError("Model instances without primary key value are unhashable") return hash(self.pk) def __reduce__(self): data = self.__getstate__() data[DJANGO_VERSION_PICKLE_KEY] = django.__version__ class_id = self._meta.app_label, self._meta.object_name return model_unpickle, (class_id,), data def __getstate__(self): """Hook to allow choosing the attributes to pickle.""" state = self.__dict__.copy() state["_state"] = copy.copy(state["_state"]) state["_state"].fields_cache = state["_state"].fields_cache.copy() # memoryview cannot be pickled, so cast it to bytes and store # separately. _memoryview_attrs = [] for attr, value in state.items(): if isinstance(value, memoryview): _memoryview_attrs.append((attr, bytes(value))) if _memoryview_attrs: state["_memoryview_attrs"] = _memoryview_attrs for attr, value in _memoryview_attrs: state.pop(attr) return state def __setstate__(self, state): pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: if pickled_version != django.__version__: warnings.warn( "Pickled model instance's Django version %s does not " "match the current version %s." % (pickled_version, django.__version__), RuntimeWarning, stacklevel=2, ) else: warnings.warn( "Pickled model instance's Django version is not specified.", RuntimeWarning, stacklevel=2, ) if "_memoryview_attrs" in state: for attr, value in state.pop("_memoryview_attrs"): state[attr] = memoryview(value) self.__dict__.update(state) def _get_pk_val(self, meta=None): meta = meta or self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): for parent_link in self._meta.parents.values(): if parent_link and parent_link != self._meta.pk: setattr(self, parent_link.target_field.attname, value) return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def get_deferred_fields(self): """ Return a set containing names of deferred fields for this instance. """ return { f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__ } def refresh_from_db(self, using=None, fields=None): """ Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. """ if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, "_prefetched_objects_cache", ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' "are not allowed in fields." % LOOKUP_SEP ) hints = {"instance": self} db_instance_qs = self.__class__._base_manager.db_manager( using, hints=hints ).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [ f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields ] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db async def arefresh_from_db(self, using=None, fields=None): return await sync_to_async(self.refresh_from_db)(using=using, fields=fields) def serializable_value(self, field_name): """ Return the value of the field name for this instance. If the field is a foreign key, return the id value instead of the object. If there's no Field object with this name on the model, return the model attribute's value. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field(field_name) except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save( self, force_insert=False, force_update=False, using=None, update_fields=None ): """ Save the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ self._prepare_related_fields_for_save(operation_name="save") using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") deferred_fields = self.get_deferred_fields() if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if not update_fields: return update_fields = frozenset(update_fields) field_names = self._meta._non_pk_concrete_field_names non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError( "The following fields do not exist in this model, are m2m " "fields, or are non-concrete fields: %s" % ", ".join(non_model_fields) ) # If saving to the same database, and this model is deferred, then # automatically do an "update_fields" save on the loaded fields. elif not force_insert and deferred_fields and using == self._state.db: field_names = set() for field in self._meta.concrete_fields: if not field.primary_key and not hasattr(field, "through"): field_names.add(field.attname) loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base( using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields, ) save.alters_data = True async def asave( self, force_insert=False, force_update=False, using=None, update_fields=None ): return await sync_to_async(self.save)( force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields, ) asave.alters_data = True def save_base( self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None, ): """ Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading. """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ # Skip proxies, but keep the origin as the proxy model. if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send( sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields, ) # A transaction isn't needed if one query is issued. if meta.parents: context_manager = transaction.atomic(using=using, savepoint=False) else: context_manager = transaction.mark_for_rollback_on_error(using=using) with context_manager: parent_inserted = False if not raw: parent_inserted = self._save_parents(cls, using, update_fields) updated = self._save_table( raw, cls, force_insert or parent_inserted, force_update, using, update_fields, ) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if not meta.auto_created: post_save.send( sender=origin, instance=self, created=(not updated), update_fields=update_fields, raw=raw, using=using, ) save_base.alters_data = True def _save_parents(self, cls, using, update_fields): """Save all the parents of cls using values from self.""" meta = cls._meta inserted = False for parent, field in meta.parents.items(): # Make sure the link fields are synced between parent and self. if ( field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None ): setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) parent_inserted = self._save_parents( cls=parent, using=using, update_fields=update_fields ) updated = self._save_table( cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted, ) if not updated: inserted = True # Set the parent's PK value to self. if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) # Since we didn't have an instance of the parent handy set # attname directly, bypassing the descriptor. Invalidate # the related object cache, in case it's been accidentally # populated. A fresh instance will be re-built from the # database if necessary. if field.is_cached(self): field.delete_cached_value(self) return inserted def _save_table( self, raw=False, cls=None, force_insert=False, force_update=False, using=None, update_fields=None, ): """ Do the heavy-lifting involved in saving. Update or insert the data for a single table. """ meta = cls._meta non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] if update_fields: non_pks = [ f for f in non_pks if f.name in update_fields or f.attname in update_fields ] pk_val = self._get_pk_val(meta) if pk_val is None: pk_val = meta.pk.get_pk_value_on_save(self) setattr(self, meta.pk.attname, pk_val) pk_set = pk_val is not None if not pk_set and (force_update or update_fields): raise ValueError("Cannot force an update in save() with no primary key.") updated = False # Skip an UPDATE when adding an instance and primary key has a default. if ( not raw and not force_insert and self._state.adding and meta.pk.default and meta.pk.default is not NOT_PROVIDED ): force_insert = True # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [ ( f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)), ) for f in non_pks ] forced_update = update_fields or force_update updated = self._do_update( base_qs, using, pk_val, values, update_fields, forced_update ) if force_update and not updated: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not updated: raise DatabaseError("Save with update_fields did not affect any rows.") if not updated: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to filter_args = field.get_filter_kwargs_for_object(self) self._order = ( cls._base_manager.using(using) .filter(**filter_args) .aggregate( _order__max=Coalesce( ExpressionWrapper( Max("_order") + Value(1), output_field=IntegerField() ), Value(0), ), )["_order__max"] ) fields = meta.local_concrete_fields if not pk_set: fields = [f for f in fields if f is not meta.auto_field] returning_fields = meta.db_returning_fields results = self._do_insert( cls._base_manager, using, fields, returning_fields, raw ) if results: for value, field in zip(results[0], returning_fields): setattr(self, field.attname, value) return updated def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): """ Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB). """ filtered = base_qs.filter(pk=pk_val) if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that # case we just say the update succeeded. Another case ending up here # is a model with just PK - in that case check that the PK still # exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and # It may happen that the object is deleted from the DB right after # this check, causing the subsequent UPDATE to return zero matching # rows. The same result can occur in some rare cases when the # database returns zero despite the UPDATE being executed # successfully (a row is matched and updated). In order to # distinguish these two cases, the object's existence in the # database is again checked for if the UPDATE query returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 def _do_insert(self, manager, using, fields, returning_fields, raw): """ Do an INSERT. If returning_fields is defined then this method should return the newly created data for the model. """ return manager._insert( [self], fields=fields, returning_fields=returning_fields, using=using, raw=raw, ) def _prepare_related_fields_for_save(self, operation_name, fields=None): # Ensure that a model instance without a PK hasn't been assigned to # a ForeignKey, GenericForeignKey or OneToOneField on this model. If # the field is nullable, allowing the save would result in silent data # loss. for field in self._meta.concrete_fields: if fields and field not in fields: continue # If the related field isn't cached, then an instance hasn't been # assigned and there's no need to worry about this check. if field.is_relation and field.is_cached(self): obj = getattr(self, field.name, None) if not obj: continue # A pk may have been assigned manually to a model instance not # saved to the database (or auto-generated in a case like # UUIDField), but we allow the save to proceed and rely on the # database to raise an IntegrityError if applicable. If # constraints aren't supported by the database, there's the # unavoidable risk of data corruption. if obj.pk is None: # Remove the object from a related instance cache. if not field.remote_field.multiple: field.remote_field.delete_cached_value(obj) raise ValueError( "%s() prohibited to prevent data loss due to unsaved " "related object '%s'." % (operation_name, field.name) ) elif getattr(self, field.attname) in field.empty_values: # Set related object if it has been saved after an # assignment. setattr(self, field.name, obj) # If the relationship's pk/to_field was changed, clear the # cached relationship. if getattr(obj, field.target_field.attname) != getattr( self, field.attname ): field.delete_cached_value(self) # GenericForeignKeys are private. for field in self._meta.private_fields: if fields and field not in fields: continue if ( field.is_relation and field.is_cached(self) and hasattr(field, "fk_field") ): obj = field.get_cached_value(self, default=None) if obj and obj.pk is None: raise ValueError( f"{operation_name}() prohibited to prevent data loss due to " f"unsaved related object '{field.name}'." ) def delete(self, using=None, keep_parents=False): if self.pk is None: raise ValueError( "%s object can't be deleted because its %s attribute is set " "to None." % (self._meta.object_name, self._meta.pk.attname) ) using = using or router.db_for_write(self.__class__, instance=self) collector = Collector(using=using, origin=self) collector.collect([self], keep_parents=keep_parents) return collector.delete() delete.alters_data = True async def adelete(self, using=None, keep_parents=False): return await sync_to_async(self.delete)( using=using, keep_parents=keep_parents, ) adelete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) choices_dict = dict(make_hashable(field.flatchoices)) # force_str() to coerce lazy strings. return force_str( choices_dict.get(make_hashable(value), value), strings_only=True ) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = "gt" if is_next else "lt" order = "" if is_next else "-" param = getattr(self, field.attname) q = Q.create([(field.name, param), (f"pk__{op}", self.pk)], connector=Q.AND) q = Q.create([q, (f"{field.name}__{op}", param)], connector=Q.OR) qs = ( self.__class__._default_manager.using(self._state.db) .filter(**kwargs) .filter(q) .order_by("%s%s" % (order, field.name), "%spk" % order) ) try: return qs[0] except IndexError: raise self.DoesNotExist( "%s matching query does not exist." % self.__class__._meta.object_name ) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = "gt" if is_next else "lt" order = "_order" if is_next else "-_order" order_field = self._meta.order_with_respect_to filter_args = order_field.get_filter_kwargs_for_object(self) obj = ( self.__class__._default_manager.filter(**filter_args) .filter( **{ "_order__%s" % op: self.__class__._default_manager.values("_order").filter( **{self._meta.pk.name: self.pk} ) } ) .order_by(order)[:1] .get() ) setattr(self, cachename, obj) return getattr(self, cachename) def _get_field_value_map(self, meta, exclude=None): if exclude is None: exclude = set() meta = meta or self._meta return { field.name: Value(getattr(self, field.attname), field) for field in meta.local_concrete_fields if field.name not in exclude } def prepare_database_save(self, field): if self.pk is None: raise ValueError( "Unsaved model instance %r cannot be used in an ORM query." % self ) return getattr(self, field.remote_field.get_related_field().attname) def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Check unique constraints on the model and raise ValidationError if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None, include_meta_constraints=False): """ Return a list of checks to perform. Since validate_unique() could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = set() unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] constraints = [] if include_meta_constraints: constraints = [(self.__class__, self._meta.total_unique_constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.unique_together: unique_togethers.append( (parent_class, parent_class._meta.unique_together) ) if include_meta_constraints and parent_class._meta.total_unique_constraints: constraints.append( (parent_class, parent_class._meta.total_unique_constraints) ) for model_class, unique_together in unique_togethers: for check in unique_together: if not any(name in exclude for name in check): # Add the check if the field isn't excluded. unique_checks.append((model_class, tuple(check))) if include_meta_constraints: for model_class, model_constraints in constraints: for constraint in model_constraints: if not any(name in exclude for name in constraint.fields): unique_checks.append((model_class, constraint.fields)) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.get_parent_list(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, "date", name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, "year", name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, "month", name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) # TODO: Handle multiple backends with different feature flags. if lookup_value is None or ( lookup_value == "" and connection.features.interprets_empty_strings_as_nulls ): # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append( self.unique_error_message(model_class, unique_check) ) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == "date": lookup_kwargs["%s__day" % unique_for] = date.day lookup_kwargs["%s__month" % unique_for] = date.month lookup_kwargs["%s__year" % unique_for] = date.year else: lookup_kwargs["%s__%s" % (unique_for, lookup_type)] = getattr( date, lookup_type ) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field_name, unique_for): opts = self._meta field = opts.get_field(field_name) return ValidationError( message=field.error_messages["unique_for_date"], code="unique_for_date", params={ "model": self, "model_name": capfirst(opts.verbose_name), "lookup_type": lookup_type, "field": field_name, "field_label": capfirst(field.verbose_name), "date_field": unique_for, "date_field_label": capfirst(opts.get_field(unique_for).verbose_name), }, ) def unique_error_message(self, model_class, unique_check): opts = model_class._meta params = { "model": self, "model_class": model_class, "model_name": capfirst(opts.verbose_name), "unique_check": unique_check, } # A unique field if len(unique_check) == 1: field = opts.get_field(unique_check[0]) params["field_label"] = capfirst(field.verbose_name) return ValidationError( message=field.error_messages["unique"], code="unique", params=params, ) # unique_together else: field_labels = [ capfirst(opts.get_field(f).verbose_name) for f in unique_check ] params["field_labels"] = get_text_list(field_labels, _("and")) return ValidationError( message=_("%(model_name)s with this %(field_labels)s already exists."), code="unique_together", params=params, ) def get_constraints(self): constraints = [(self.__class__, self._meta.constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.constraints: constraints.append((parent_class, parent_class._meta.constraints)) return constraints def validate_constraints(self, exclude=None): constraints = self.get_constraints() using = router.db_for_write(self.__class__, instance=self) errors = {} for model_class, model_constraints in constraints: for constraint in model_constraints: try: constraint.validate(model_class, self, exclude=exclude, using=using) except ValidationError as e: if e.code == "unique" and len(constraint.fields) == 1: errors.setdefault(constraint.fields[0], []).append(e) else: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def full_clean(self, exclude=None, validate_unique=True, validate_constraints=True): """ Call clean_fields(), clean(), validate_unique(), and validate_constraints() on the model. Raise a ValidationError for any errors that occur. """ errors = {} if exclude is None: exclude = set() else: exclude = set(exclude) try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. if validate_unique: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.add(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Run constraints checks, but only for fields that passed validation. if validate_constraints: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.add(name) try: self.validate_constraints(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Clean all fields and raise a ValidationError containing a dict of all validation errors if any occur. """ if exclude is None: exclude = set() errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in f.empty_values: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.error_list if errors: raise ValidationError(errors) @classmethod def check(cls, **kwargs): errors = [ *cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs), ] if not cls._meta.swapped: databases = kwargs.get("databases") or [] errors += [ *cls._check_fields(**kwargs), *cls._check_m2m_through_same_relationship(), *cls._check_long_column_names(databases), ] clash_errors = ( *cls._check_id_field(), *cls._check_field_name_clashes(), *cls._check_model_name_db_lookup_clashes(), *cls._check_property_name_related_field_accessor_clashes(), *cls._check_single_primary_key(), ) errors.extend(clash_errors) # If there are field name clashes, hide consequent column name # clashes. if not clash_errors: errors.extend(cls._check_column_name_clashes()) errors += [ *cls._check_index_together(), *cls._check_unique_together(), *cls._check_indexes(databases), *cls._check_ordering(), *cls._check_constraints(databases), *cls._check_default_pk(), ] return errors @classmethod def _check_default_pk(cls): if ( not cls._meta.abstract and cls._meta.pk.auto_created and # Inherited PKs are checked in parents models. not ( isinstance(cls._meta.pk, OneToOneField) and cls._meta.pk.remote_field.parent_link ) and not settings.is_overridden("DEFAULT_AUTO_FIELD") and cls._meta.app_config and not cls._meta.app_config._is_default_auto_field_overridden ): return [ checks.Warning( f"Auto-created primary key used when not defining a " f"primary key type, by default " f"'{settings.DEFAULT_AUTO_FIELD}'.", hint=( f"Configure the DEFAULT_AUTO_FIELD setting or the " f"{cls._meta.app_config.__class__.__qualname__}." f"default_auto_field attribute to point to a subclass " f"of AutoField, e.g. 'django.db.models.BigAutoField'." ), obj=cls, id="models.W042", ), ] return [] @classmethod def _check_swappable(cls): """Check if the swapped model exists.""" errors = [] if cls._meta.swapped: try: apps.get_model(cls._meta.swapped) except ValueError: errors.append( checks.Error( "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, id="models.E001", ) ) except LookupError: app_label, model_name = cls._meta.swapped.split(".") errors.append( checks.Error( "'%s' references '%s.%s', which has not been " "installed, or is abstract." % (cls._meta.swappable, app_label, model_name), id="models.E002", ) ) return errors @classmethod def _check_model(cls): errors = [] if cls._meta.proxy: if cls._meta.local_fields or cls._meta.local_many_to_many: errors.append( checks.Error( "Proxy model '%s' contains model fields." % cls.__name__, id="models.E017", ) ) return errors @classmethod def _check_managers(cls, **kwargs): """Perform all manager checks.""" errors = [] for manager in cls._meta.managers: errors.extend(manager.check(**kwargs)) return errors @classmethod def _check_fields(cls, **kwargs): """Perform all field checks.""" errors = [] for field in cls._meta.local_fields: errors.extend(field.check(**kwargs)) for field in cls._meta.local_many_to_many: errors.extend(field.check(from_model=cls, **kwargs)) return errors @classmethod def _check_m2m_through_same_relationship(cls): """Check if no relationship model is used by more than one m2m field.""" errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id="models.E003", ) ) else: seen_intermediary_signatures.append(signature) return errors @classmethod def _check_id_field(cls): """Check if `id` field is a primary key.""" fields = [ f for f in cls._meta.local_fields if f.name == "id" and f != cls._meta.pk ] # fields is empty or consists of the invalid "id" field if fields and not fields[0].primary_key and cls._meta.pk.name == "id": return [ checks.Error( "'id' can only be used as a field name if the field also " "sets 'primary_key=True'.", obj=cls, id="models.E004", ) ] else: return [] @classmethod def _check_field_name_clashes(cls): """Forbid field shadowing in multi-table inheritance.""" errors = [] used_fields = {} # name or attname -> field # Check that multi-inheritance doesn't cause field name shadowing. for parent in cls._meta.get_parent_list(): for f in parent._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None if clash: errors.append( checks.Error( "The field '%s' from parent model " "'%s' clashes with the field '%s' " "from parent model '%s'." % (clash.name, clash.model._meta, f.name, f.model._meta), obj=cls, id="models.E005", ) ) used_fields[f.name] = f used_fields[f.attname] = f # Check that fields defined in the model don't clash with fields from # parents, including auto-generated fields like multi-table inheritance # child accessors. for parent in cls._meta.get_parent_list(): for f in parent._meta.get_fields(): if f not in used_fields: used_fields[f.name] = f for f in cls._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None # Note that we may detect clash between user-defined non-unique # field "id" and automatically added unique field "id", both # defined at the same model. This special case is considered in # _check_id_field and here we ignore it. id_conflict = ( f.name == "id" and clash and clash.name == "id" and clash.model == cls ) if clash and not id_conflict: errors.append( checks.Error( "The field '%s' clashes with the field '%s' " "from model '%s'." % (f.name, clash.name, clash.model._meta), obj=f, id="models.E006", ) ) used_fields[f.name] = f used_fields[f.attname] = f return errors @classmethod def _check_column_name_clashes(cls): # Store a list of column names which have already been used by other fields. used_column_names = [] errors = [] for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Ensure the column name is not already in use. if column_name and column_name in used_column_names: errors.append( checks.Error( "Field '%s' has column name '%s' that is used by " "another field." % (f.name, column_name), hint="Specify a 'db_column' for the field.", obj=cls, id="models.E007", ) ) else: used_column_names.append(column_name) return errors @classmethod def _check_model_name_db_lookup_clashes(cls): errors = [] model_name = cls.__name__ if model_name.startswith("_") or model_name.endswith("_"): errors.append( checks.Error( "The model name '%s' cannot start or end with an underscore " "as it collides with the query lookup syntax." % model_name, obj=cls, id="models.E023", ) ) elif LOOKUP_SEP in model_name: errors.append( checks.Error( "The model name '%s' cannot contain double underscores as " "it collides with the query lookup syntax." % model_name, obj=cls, id="models.E024", ) ) return errors @classmethod def _check_property_name_related_field_accessor_clashes(cls): errors = [] property_names = cls._meta._property_names related_field_accessors = ( f.get_attname() for f in cls._meta._get_fields(reverse=False) if f.is_relation and f.related_model is not None ) for accessor in related_field_accessors: if accessor in property_names: errors.append( checks.Error( "The property '%s' clashes with a related field " "accessor." % accessor, obj=cls, id="models.E025", ) ) return errors @classmethod def _check_single_primary_key(cls): errors = [] if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: errors.append( checks.Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=cls, id="models.E026", ) ) return errors # RemovedInDjango51Warning. @classmethod def _check_index_together(cls): """Check the value of "index_together" option.""" if not isinstance(cls._meta.index_together, (tuple, list)): return [ checks.Error( "'index_together' must be a list or tuple.", obj=cls, id="models.E008", ) ] elif any( not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together ): return [ checks.Error( "All 'index_together' elements must be lists or tuples.", obj=cls, id="models.E009", ) ] else: errors = [] for fields in cls._meta.index_together: errors.extend(cls._check_local_fields(fields, "index_together")) return errors @classmethod def _check_unique_together(cls): """Check the value of "unique_together" option.""" if not isinstance(cls._meta.unique_together, (tuple, list)): return [ checks.Error( "'unique_together' must be a list or tuple.", obj=cls, id="models.E010", ) ] elif any( not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together ): return [ checks.Error( "All 'unique_together' elements must be lists or tuples.", obj=cls, id="models.E011", ) ] else: errors = [] for fields in cls._meta.unique_together: errors.extend(cls._check_local_fields(fields, "unique_together")) return errors @classmethod def _check_indexes(cls, databases): """Check fields, names, and conditions of indexes.""" errors = [] references = set() for index in cls._meta.indexes: # Index name can't start with an underscore or a number, restricted # for cross-database compatibility with Oracle. if index.name[0] == "_" or index.name[0].isdigit(): errors.append( checks.Error( "The index name '%s' cannot start with an underscore " "or a number." % index.name, obj=cls, id="models.E033", ), ) if len(index.name) > index.max_name_length: errors.append( checks.Error( "The index name '%s' cannot be longer than %d " "characters." % (index.name, index.max_name_length), obj=cls, id="models.E034", ), ) if index.contains_expressions: for expression in index.expressions: references.update( ref[0] for ref in cls._get_expr_references(expression) ) for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_partial_indexes or "supports_partial_indexes" in cls._meta.required_db_features ) and any(index.condition is not None for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes with conditions." % connection.display_name, hint=( "Conditions will be ignored. Silence this warning " "if you don't care about it." ), obj=cls, id="models.W037", ) ) if not ( connection.features.supports_covering_indexes or "supports_covering_indexes" in cls._meta.required_db_features ) and any(index.include for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes with non-key columns." % connection.display_name, hint=( "Non-key columns will be ignored. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W040", ) ) if not ( connection.features.supports_expression_indexes or "supports_expression_indexes" in cls._meta.required_db_features ) and any(index.contains_expressions for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes on expressions." % connection.display_name, hint=( "An index won't be created. Silence this warning " "if you don't care about it." ), obj=cls, id="models.W043", ) ) fields = [ field for index in cls._meta.indexes for field, _ in index.fields_orders ] fields += [include for index in cls._meta.indexes for include in index.include] fields += references errors.extend(cls._check_local_fields(fields, "indexes")) return errors @classmethod def _check_local_fields(cls, fields, option): from django.db import models # In order to avoid hitting the relation tree prematurely, we use our # own fields_map instead of using get_field() forward_fields_map = {} for field in cls._meta._get_fields(reverse=False): forward_fields_map[field.name] = field if hasattr(field, "attname"): forward_fields_map[field.attname] = field errors = [] for field_name in fields: try: field = forward_fields_map[field_name] except KeyError: errors.append( checks.Error( "'%s' refers to the nonexistent field '%s'." % ( option, field_name, ), obj=cls, id="models.E012", ) ) else: if isinstance(field.remote_field, models.ManyToManyRel): errors.append( checks.Error( "'%s' refers to a ManyToManyField '%s', but " "ManyToManyFields are not permitted in '%s'." % ( option, field_name, option, ), obj=cls, id="models.E013", ) ) elif field not in cls._meta.local_fields: errors.append( checks.Error( "'%s' refers to field '%s' which is not local to model " "'%s'." % (option, field_name, cls._meta.object_name), hint="This issue may be caused by multi-table inheritance.", obj=cls, id="models.E016", ) ) return errors @classmethod def _check_ordering(cls): """ Check "ordering" option -- is it a list of strings and do all fields exist? """ if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id="models.E021", ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by " "only one field).", obj=cls, id="models.E014", ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != "?") # Convert "-field" to "field". fields = ((f[1:] if f.startswith("-") else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: # pk is an alias that won't be found by opts.get_field. if part == "pk": fld = _cls._meta.pk else: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.path_infos[-1].to_opts.model else: _cls = None except (FieldDoesNotExist, AttributeError): if fld is None or ( fld.get_transform(part) is None and fld.get_lookup(part) is None ): errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id="models.E015", ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != "pk"} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set( chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) ) ) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id="models.E015", ) ) return errors @classmethod def _check_long_column_names(cls, databases): """ Check that any auto-generated column names are shorter than the limits for each database in which the model will be created. """ if not databases: return [] errors = [] allowed_len = None db_alias = None # Find the minimum max allowed length among all specified db_aliases. for db in databases: # skip databases where the model won't be created if not router.allow_migrate_model(db, cls): continue connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is None or connection.features.truncates_names: continue else: if allowed_len is None: allowed_len = max_name_length db_alias = db elif max_name_length < allowed_len: allowed_len = max_name_length db_alias = db if allowed_len is None: return errors for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Check if auto-generated name for the field is too long # for the database. if ( f.db_column is None and column_name is not None and len(column_name) > allowed_len ): errors.append( checks.Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (column_name, allowed_len, db_alias), hint="Set the column name manually using 'db_column'.", obj=cls, id="models.E018", ) ) for f in cls._meta.local_many_to_many: # Skip nonexistent models. if isinstance(f.remote_field.through, str): continue # Check if auto-generated name for the M2M field is too long # for the database. for m2m in f.remote_field.through._meta.local_fields: _, rel_name = m2m.get_attname_column() if ( m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len ): errors.append( checks.Error( "Autogenerated column name too long for M2M field " '"%s". Maximum length is "%s" for database "%s".' % (rel_name, allowed_len, db_alias), hint=( "Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'." ), obj=cls, id="models.E019", ) ) return errors @classmethod def _get_expr_references(cls, expr): if isinstance(expr, Q): for child in expr.children: if isinstance(child, tuple): lookup, value = child yield tuple(lookup.split(LOOKUP_SEP)) yield from cls._get_expr_references(value) else: yield from cls._get_expr_references(child) elif isinstance(expr, F): yield tuple(expr.name.split(LOOKUP_SEP)) elif hasattr(expr, "get_source_expressions"): for src_expr in expr.get_source_expressions(): yield from cls._get_expr_references(src_expr) @classmethod def _check_constraints(cls, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_table_check_constraints or "supports_table_check_constraints" in cls._meta.required_db_features ) and any( isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support check constraints." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W027", ) ) if not ( connection.features.supports_partial_indexes or "supports_partial_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.condition is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints with " "conditions." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W036", ) ) if not ( connection.features.supports_deferrable_unique_constraints or "supports_deferrable_unique_constraints" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.deferrable is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support deferrable unique constraints." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W038", ) ) if not ( connection.features.supports_covering_indexes or "supports_covering_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.include for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints with non-key " "columns." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W039", ) ) if not ( connection.features.supports_expression_indexes or "supports_expression_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.contains_expressions for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints on " "expressions." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W044", ) ) fields = set( chain.from_iterable( (*constraint.fields, *constraint.include) for constraint in cls._meta.constraints if isinstance(constraint, UniqueConstraint) ) ) references = set() for constraint in cls._meta.constraints: if isinstance(constraint, UniqueConstraint): if ( connection.features.supports_partial_indexes or "supports_partial_indexes" not in cls._meta.required_db_features ) and isinstance(constraint.condition, Q): references.update( cls._get_expr_references(constraint.condition) ) if ( connection.features.supports_expression_indexes or "supports_expression_indexes" not in cls._meta.required_db_features ) and constraint.contains_expressions: for expression in constraint.expressions: references.update(cls._get_expr_references(expression)) elif isinstance(constraint, CheckConstraint): if ( connection.features.supports_table_check_constraints or "supports_table_check_constraints" not in cls._meta.required_db_features ): if isinstance(constraint.check, Q): references.update( cls._get_expr_references(constraint.check) ) if any( isinstance(expr, RawSQL) for expr in constraint.check.flatten() ): errors.append( checks.Warning( f"Check constraint {constraint.name!r} contains " f"RawSQL() expression and won't be validated " f"during the model full_clean().", hint=( "Silence this warning if you don't care about " "it." ), obj=cls, id="models.W045", ), ) for field_name, *lookups in references: # pk is an alias that won't be found by opts.get_field. if field_name != "pk": fields.add(field_name) if not lookups: # If it has no lookups it cannot result in a JOIN. continue try: if field_name == "pk": field = cls._meta.pk else: field = cls._meta.get_field(field_name) if not field.is_relation or field.many_to_many or field.one_to_many: continue except FieldDoesNotExist: continue # JOIN must happen at the first lookup. first_lookup = lookups[0] if ( hasattr(field, "get_transform") and hasattr(field, "get_lookup") and field.get_transform(first_lookup) is None and field.get_lookup(first_lookup) is None ): errors.append( checks.Error( "'constraints' refers to the joined field '%s'." % LOOKUP_SEP.join([field_name] + lookups), obj=cls, id="models.E041", ) ) errors.extend(cls._check_local_fields(fields, "constraints")) return errors ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(self, ordered_obj, id_list, using=None): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update( [ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list)], ["_order"], ) def method_get_order(self, ordered_obj): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) pk_name = ordered_obj._meta.pk.name return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) def make_foreign_order_accessors(model, related_model): setattr( related_model, "get_%s_order" % model.__name__.lower(), partialmethod(method_get_order, model), ) setattr( related_model, "set_%s_order" % model.__name__.lower(), partialmethod(method_set_order, model), ) ######## # MISC # ######## def model_unpickle(model_id): """Used to unpickle Model subclasses with deferred fields.""" if isinstance(model_id, tuple): model = apps.get_model(*model_id) else: # Backwards compat - the model was cached directly in earlier versions. model = model_id return model.__new__(model) model_unpickle.__safe_for_unpickle__ = True
bf3a232f4b94407b8e9b62b14c9d7d0c71a7435a415db9d6693973474bd58a62
import functools from collections import namedtuple def make_model_tuple(model): """ Take a model or a string of the form "app_label.ModelName" and return a corresponding ("app_label", "modelname") tuple. If a tuple is passed in, assume it's a valid model tuple already and return it unchanged. """ try: if isinstance(model, tuple): model_tuple = model elif isinstance(model, str): app_label, model_name = model.split(".") model_tuple = app_label, model_name.lower() else: model_tuple = model._meta.app_label, model._meta.model_name assert len(model_tuple) == 2 return model_tuple except (ValueError, AssertionError): raise ValueError( "Invalid model reference '%s'. String model references " "must be of the form 'app_label.ModelName'." % model ) def resolve_callables(mapping): """ Generate key/value pairs for the given mapping where the values are evaluated if they're callable. """ for k, v in mapping.items(): yield k, v() if callable(v) else v def unpickle_named_row(names, values): return create_namedtuple_class(*names)(*values) @functools.lru_cache def create_namedtuple_class(*names): # Cache type() with @lru_cache since it's too slow to be called for every # QuerySet evaluation. def __reduce__(self): return unpickle_named_row, (names, tuple(self)) return type( "Row", (namedtuple("Row", names),), {"__reduce__": __reduce__, "__slots__": ()}, ) class AltersData: """ Make subclasses preserve the alters_data attribute on overridden methods. """ def __init_subclass__(cls, **kwargs): for fn_name, fn in vars(cls).items(): if callable(fn) and not hasattr(fn, "alters_data"): for base in cls.__bases__: if base_fn := getattr(base, fn_name, None): if hasattr(base_fn, "alters_data"): fn.alters_data = base_fn.alters_data break super().__init_subclass__(**kwargs)
5f9f14455eea12fa231422acc58b100c8d8c0b37d6626fb399463dc0b6cdf29d
import copy import datetime import functools import inspect import warnings from collections import defaultdict from decimal import Decimal from uuid import UUID from django.core.exceptions import EmptyResultSet, FieldError from django.db import DatabaseError, NotSupportedError, connection from django.db.models import fields from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import Q from django.utils.deconstruct import deconstructible from django.utils.deprecation import RemovedInDjango50Warning from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLiteNumericMixin: """ Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered. """ def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() == "DecimalField": sql = "CAST(%s AS NUMERIC)" % sql except FieldError: pass return sql, params class Combinable: """ Provide the ability to combine one or two objects with some connector. For example F('foo') + F('bar'). """ # Arithmetic connectors ADD = "+" SUB = "-" MUL = "*" DIV = "/" POW = "^" # The following is a quoted % operator - it is quoted because it can be # used in strings that also have parameter substitution. MOD = "%%" # Bitwise operators - note that these are generated by .bitand() # and .bitor(), the '&' and '|' are reserved for boolean operator # usage. BITAND = "&" BITOR = "|" BITLEFTSHIFT = "<<" BITRIGHTSHIFT = ">>" BITXOR = "#" def _combine(self, other, connector, reversed): if not hasattr(other, "resolve_expression"): # everything must be resolvable to an expression other = Value(other) if reversed: return CombinedExpression(other, connector, self) return CombinedExpression(self, connector, other) ############# # OPERATORS # ############# def __neg__(self): return self._combine(-1, self.MUL, False) def __add__(self, other): return self._combine(other, self.ADD, False) def __sub__(self, other): return self._combine(other, self.SUB, False) def __mul__(self, other): return self._combine(other, self.MUL, False) def __truediv__(self, other): return self._combine(other, self.DIV, False) def __mod__(self, other): return self._combine(other, self.MOD, False) def __pow__(self, other): return self._combine(other, self.POW, False) def __and__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) & Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitand(self, other): return self._combine(other, self.BITAND, False) def bitleftshift(self, other): return self._combine(other, self.BITLEFTSHIFT, False) def bitrightshift(self, other): return self._combine(other, self.BITRIGHTSHIFT, False) def __xor__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) ^ Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitxor(self, other): return self._combine(other, self.BITXOR, False) def __or__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) | Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitor(self, other): return self._combine(other, self.BITOR, False) def __radd__(self, other): return self._combine(other, self.ADD, True) def __rsub__(self, other): return self._combine(other, self.SUB, True) def __rmul__(self, other): return self._combine(other, self.MUL, True) def __rtruediv__(self, other): return self._combine(other, self.DIV, True) def __rmod__(self, other): return self._combine(other, self.MOD, True) def __rpow__(self, other): return self._combine(other, self.POW, True) def __rand__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def __ror__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def __rxor__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def __invert__(self): return NegatedExpression(self) class BaseExpression: """Base class for all query expressions.""" empty_result_set_value = NotImplemented # aggregate specific fields is_summary = False _output_field_resolved_to_none = False # Can the expression be used in a WHERE clause? filterable = True # Can the expression can be used as a source expression in Window? window_compatible = False def __init__(self, output_field=None): if output_field is not None: self.output_field = output_field def __getstate__(self): state = self.__dict__.copy() state.pop("convert_value", None) return state def get_db_converters(self, connection): return ( [] if self.convert_value is self._convert_value_noop else [self.convert_value] ) + self.output_field.get_db_converters(connection) def get_source_expressions(self): return [] def set_source_expressions(self, exprs): assert not exprs def _parse_expressions(self, *expressions): return [ arg if hasattr(arg, "resolve_expression") else (F(arg) if isinstance(arg, str) else Value(arg)) for arg in expressions ] def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()") @cached_property def contains_aggregate(self): return any( expr and expr.contains_aggregate for expr in self.get_source_expressions() ) @cached_property def contains_over_clause(self): return any( expr and expr.contains_over_clause for expr in self.get_source_expressions() ) @cached_property def contains_column_references(self): return any( expr and expr.contains_column_references for expr in self.get_source_expressions() ) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions( [ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ] ) return c @property def conditional(self): return isinstance(self.output_field, fields.BooleanField) @property def field(self): return self.output_field @cached_property def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError("Cannot resolve expression type, unknown output_field") return output_field @cached_property def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise def _resolve_output_field(self): """ Attempt to infer the output type of the expression. As a guess, if the output fields of all source fields match then simply infer the same type here. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ # This guess is mostly a bad idea, but there is quite a lot of code # (especially 3rd party Func subclasses) that depend on it, we'd need a # deprecation path to fix it. sources_iter = ( source for source in self.get_source_fields() if source is not None ) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( "Expression contains mixed types: %s, %s. You must " "set output_field." % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field @staticmethod def _convert_value_noop(value, expression, connection): return value @cached_property def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == "FloatField": return ( lambda value, expression, connection: None if value is None else float(value) ) elif internal_type.endswith("IntegerField"): return ( lambda value, expression, connection: None if value is None else int(value) ) elif internal_type == "DecimalField": return ( lambda value, expression, connection: None if value is None else Decimal(value) ) return self._convert_value_noop def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def get_transform(self, name): return self.output_field.get_transform(name) def relabeled_clone(self, change_map): clone = self.copy() clone.set_source_expressions( [ e.relabeled_clone(change_map) if e is not None else None for e in self.get_source_expressions() ] ) return clone def replace_expressions(self, replacements): if replacement := replacements.get(self): return replacement clone = self.copy() source_expressions = clone.get_source_expressions() clone.set_source_expressions( [ expr.replace_expressions(replacements) if expr else None for expr in source_expressions ] ) return clone def copy(self): return copy.copy(self) def prefix_references(self, prefix): clone = self.copy() clone.set_source_expressions( [ F(f"{prefix}{expr.name}") if isinstance(expr, F) else expr.prefix_references(prefix) for expr in self.get_source_expressions() ] ) return clone def get_group_by_cols(self): if not self.contains_aggregate: return [self] cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()] def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def reverse_ordering(self): return self def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: if hasattr(expr, "flatten"): yield from expr.flatten() else: yield expr def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle. """ if hasattr(self.output_field, "select_format"): return self.output_field.select_format(compiler, sql, params) return sql, params @deconstructible class Expression(BaseExpression, Combinable): """An expression that can be combined with other expressions.""" @cached_property def identity(self): constructor_signature = inspect.signature(self.__init__) args, kwargs = self._constructor_args signature = constructor_signature.bind_partial(*args, **kwargs) signature.apply_defaults() arguments = signature.arguments.items() identity = [self.__class__] for arg, value in arguments: if isinstance(value, fields.Field): if value.name and value.model: value = (value.model._meta.label, value.name) else: value = type(value) else: value = make_hashable(value) identity.append((arg, value)) return tuple(identity) def __eq__(self, other): if not isinstance(other, Expression): return NotImplemented return other.identity == self.identity def __hash__(self): return hash(self.identity) # Type inference for CombinedExpression.output_field. # Missing items will result in FieldError, by design. # # The current approach for NULL is based on lowest common denominator behavior # i.e. if one of the supported databases is raising an error (rather than # return NULL) for `val <op> NULL`, then Django raises FieldError. NoneType = type(None) _connector_combinations = [ # Numeric operations - operands of same type. { connector: [ (fields.IntegerField, fields.IntegerField, fields.IntegerField), (fields.FloatField, fields.FloatField, fields.FloatField), (fields.DecimalField, fields.DecimalField, fields.DecimalField), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, # Behavior for DIV with integer arguments follows Postgres/SQLite, # not MySQL/Oracle. Combinable.DIV, Combinable.MOD, Combinable.POW, ) }, # Numeric operations - operands of different type. { connector: [ (fields.IntegerField, fields.DecimalField, fields.DecimalField), (fields.DecimalField, fields.IntegerField, fields.DecimalField), (fields.IntegerField, fields.FloatField, fields.FloatField), (fields.FloatField, fields.IntegerField, fields.FloatField), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV, Combinable.MOD, ) }, # Bitwise operators. { connector: [ (fields.IntegerField, fields.IntegerField, fields.IntegerField), ] for connector in ( Combinable.BITAND, Combinable.BITOR, Combinable.BITLEFTSHIFT, Combinable.BITRIGHTSHIFT, Combinable.BITXOR, ) }, # Numeric with NULL. { connector: [ (field_type, NoneType, field_type), (NoneType, field_type, field_type), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV, Combinable.MOD, Combinable.POW, ) for field_type in (fields.IntegerField, fields.DecimalField, fields.FloatField) }, # Date/DateTimeField/DurationField/TimeField. { Combinable.ADD: [ # Date/DateTimeField. (fields.DateField, fields.DurationField, fields.DateTimeField), (fields.DateTimeField, fields.DurationField, fields.DateTimeField), (fields.DurationField, fields.DateField, fields.DateTimeField), (fields.DurationField, fields.DateTimeField, fields.DateTimeField), # DurationField. (fields.DurationField, fields.DurationField, fields.DurationField), # TimeField. (fields.TimeField, fields.DurationField, fields.TimeField), (fields.DurationField, fields.TimeField, fields.TimeField), ], }, { Combinable.SUB: [ # Date/DateTimeField. (fields.DateField, fields.DurationField, fields.DateTimeField), (fields.DateTimeField, fields.DurationField, fields.DateTimeField), (fields.DateField, fields.DateField, fields.DurationField), (fields.DateField, fields.DateTimeField, fields.DurationField), (fields.DateTimeField, fields.DateField, fields.DurationField), (fields.DateTimeField, fields.DateTimeField, fields.DurationField), # DurationField. (fields.DurationField, fields.DurationField, fields.DurationField), # TimeField. (fields.TimeField, fields.DurationField, fields.TimeField), (fields.TimeField, fields.TimeField, fields.DurationField), ], }, ] _connector_combinators = defaultdict(list) def register_combinable_fields(lhs, connector, rhs, result): """ Register combinable types: lhs <connector> rhs -> result e.g. register_combinable_fields( IntegerField, Combinable.ADD, FloatField, FloatField ) """ _connector_combinators[connector].append((lhs, rhs, result)) for d in _connector_combinations: for connector, field_types in d.items(): for lhs, rhs, result in field_types: register_combinable_fields(lhs, connector, rhs, result) @functools.lru_cache(maxsize=128) def _resolve_combined_type(connector, lhs_type, rhs_type): combinators = _connector_combinators.get(connector, ()) for combinator_lhs_type, combinator_rhs_type, combined_type in combinators: if issubclass(lhs_type, combinator_lhs_type) and issubclass( rhs_type, combinator_rhs_type ): return combined_type class CombinedExpression(SQLiteNumericMixin, Expression): def __init__(self, lhs, connector, rhs, output_field=None): super().__init__(output_field=output_field) self.connector = connector self.lhs = lhs self.rhs = rhs def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self) def __str__(self): return "{} {} {}".format(self.lhs, self.connector, self.rhs) def get_source_expressions(self): return [self.lhs, self.rhs] def set_source_expressions(self, exprs): self.lhs, self.rhs = exprs def _resolve_output_field(self): # We avoid using super() here for reasons given in # Expression._resolve_output_field() combined_type = _resolve_combined_type( self.connector, type(self.lhs._output_field_or_none), type(self.rhs._output_field_or_none), ) if combined_type is None: raise FieldError( f"Cannot infer type of {self.connector!r} expression involving these " f"types: {self.lhs.output_field.__class__.__name__}, " f"{self.rhs.output_field.__class__.__name__}. You must set " f"output_field." ) return combined_type() def as_sql(self, compiler, connection): expressions = [] expression_params = [] sql, params = compiler.compile(self.lhs) expressions.append(sql) expression_params.extend(params) sql, params = compiler.compile(self.rhs) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = "(%s)" sql = connection.ops.combine_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): lhs = self.lhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) rhs = self.rhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) if not isinstance(self, (DurationExpression, TemporalSubtraction)): try: lhs_type = lhs.output_field.get_internal_type() except (AttributeError, FieldError): lhs_type = None try: rhs_type = rhs.output_field.get_internal_type() except (AttributeError, FieldError): rhs_type = None if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type: return DurationExpression( self.lhs, self.connector, self.rhs ).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) datetime_fields = {"DateField", "DateTimeField", "TimeField"} if ( self.connector == self.SUB and lhs_type in datetime_fields and lhs_type == rhs_type ): return TemporalSubtraction(self.lhs, self.rhs).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) c = self.copy() c.is_summary = summarize c.lhs = lhs c.rhs = rhs return c class DurationExpression(CombinedExpression): def compile(self, side, compiler, connection): try: output = side.output_field except FieldError: pass else: if output.get_internal_type() == "DurationField": sql, params = compiler.compile(side) return connection.ops.format_for_duration_arithmetic(sql), params return compiler.compile(side) def as_sql(self, compiler, connection): if connection.features.has_native_duration_field: return super().as_sql(compiler, connection) connection.ops.check_expression_support(self) expressions = [] expression_params = [] sql, params = self.compile(self.lhs, compiler, connection) expressions.append(sql) expression_params.extend(params) sql, params = self.compile(self.rhs, compiler, connection) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = "(%s)" sql = connection.ops.combine_duration_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) if self.connector in {Combinable.MUL, Combinable.DIV}: try: lhs_type = self.lhs.output_field.get_internal_type() rhs_type = self.rhs.output_field.get_internal_type() except (AttributeError, FieldError): pass else: allowed_fields = { "DecimalField", "DurationField", "FloatField", "IntegerField", } if lhs_type not in allowed_fields or rhs_type not in allowed_fields: raise DatabaseError( f"Invalid arguments for operator {self.connector}." ) return sql, params class TemporalSubtraction(CombinedExpression): output_field = fields.DurationField() def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) lhs = compiler.compile(self.lhs) rhs = compiler.compile(self.rhs) return connection.ops.subtract_temporals( self.lhs.output_field.get_internal_type(), lhs, rhs ) @deconstructible(path="django.db.models.F") class F(Combinable): """An object capable of resolving references to existing query objects.""" def __init__(self, name): """ Arguments: * name: the name of the field this expression references """ self.name = name def __repr__(self): return "{}({})".format(self.__class__.__name__, self.name) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): return query.resolve_ref(self.name, allow_joins, reuse, summarize) def replace_expressions(self, replacements): return replacements.get(self, self) def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __hash__(self): return hash(self.name) def copy(self): return copy.copy(self) class ResolvedOuterRef(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False contains_over_clause = False def as_sql(self, *args, **kwargs): raise ValueError( "This queryset contains a reference to an outer query and may " "only be used in a subquery." ) def resolve_expression(self, *args, **kwargs): col = super().resolve_expression(*args, **kwargs) # FIXME: Rename possibly_multivalued to multivalued and fix detection # for non-multivalued JOINs (e.g. foreign key fields). This should take # into account only many-to-many and one-to-many relationships. col.possibly_multivalued = LOOKUP_SEP in self.name return col def relabeled_clone(self, relabels): return self def get_group_by_cols(self): return [] class OuterRef(F): contains_aggregate = False def resolve_expression(self, *args, **kwargs): if isinstance(self.name, self.__class__): return self.name return ResolvedOuterRef(self.name) def relabeled_clone(self, relabels): return self @deconstructible(path="django.db.models.Func") class Func(SQLiteNumericMixin, Expression): """An SQL function call.""" function = None template = "%(function)s(%(expressions)s)" arg_joiner = ", " arity = None # The number of arguments the function accepts. def __init__(self, *expressions, output_field=None, **extra): if self.arity is not None and len(expressions) != self.arity: raise TypeError( "'%s' takes exactly %s %s (%s given)" % ( self.__class__.__name__, self.arity, "argument" if self.arity == 1 else "arguments", len(expressions), ) ) super().__init__(output_field=output_field) self.source_expressions = self._parse_expressions(*expressions) self.extra = extra def __repr__(self): args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) extra = {**self.extra, **self._get_repr_options()} if extra: extra = ", ".join( str(key) + "=" + str(val) for key, val in sorted(extra.items()) ) return "{}({}, {})".format(self.__class__.__name__, args, extra) return "{}({})".format(self.__class__.__name__, args) def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {} def get_source_expressions(self): return self.source_expressions def set_source_expressions(self, exprs): self.source_expressions = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize for pos, arg in enumerate(c.source_expressions): c.source_expressions[pos] = arg.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def as_sql( self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context, ): connection.ops.check_expression_support(self) sql_parts = [] params = [] for arg in self.source_expressions: try: arg_sql, arg_params = compiler.compile(arg) except EmptyResultSet: empty_result_set_value = getattr( arg, "empty_result_set_value", NotImplemented ) if empty_result_set_value is NotImplemented: raise arg_sql, arg_params = compiler.compile(Value(empty_result_set_value)) sql_parts.append(arg_sql) params.extend(arg_params) data = {**self.extra, **extra_context} # Use the first supplied value in this order: the parameter to this # method, a value supplied in __init__()'s **extra (the value in # `data`), or the value defined on the class. if function is not None: data["function"] = function else: data.setdefault("function", self.function) template = template or data.get("template", self.template) arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner) data["expressions"] = data["field"] = arg_joiner.join(sql_parts) return template % data, params def copy(self): copy = super().copy() copy.source_expressions = self.source_expressions[:] copy.extra = self.extra.copy() return copy @deconstructible(path="django.db.models.Value") class Value(SQLiteNumericMixin, Expression): """Represent a wrapped value as a node within an expression.""" # Provide a default value for `for_save` in order to allow unresolved # instances to be compiled until a decision is taken in #25425. for_save = False def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value def __repr__(self): return f"{self.__class__.__name__}({self.value!r})" def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) val = self.value output_field = self._output_field_or_none if output_field is not None: if self.for_save: val = output_field.get_db_prep_save(val, connection=connection) else: val = output_field.get_db_prep_value(val, connection=connection) if hasattr(output_field, "get_placeholder"): return output_field.get_placeholder(val, compiler, connection), [val] if val is None: # cx_Oracle does not always convert None to the appropriate # NULL type (like in case expressions using numbers), so we # use a literal SQL NULL return "NULL", [] return "%s", [val] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) c.for_save = for_save return c def get_group_by_cols(self): return [] def _resolve_output_field(self): if isinstance(self.value, str): return fields.CharField() if isinstance(self.value, bool): return fields.BooleanField() if isinstance(self.value, int): return fields.IntegerField() if isinstance(self.value, float): return fields.FloatField() if isinstance(self.value, datetime.datetime): return fields.DateTimeField() if isinstance(self.value, datetime.date): return fields.DateField() if isinstance(self.value, datetime.time): return fields.TimeField() if isinstance(self.value, datetime.timedelta): return fields.DurationField() if isinstance(self.value, Decimal): return fields.DecimalField() if isinstance(self.value, bytes): return fields.BinaryField() if isinstance(self.value, UUID): return fields.UUIDField() @property def empty_result_set_value(self): return self.value class RawSQL(Expression): def __init__(self, sql, params, output_field=None): if output_field is None: output_field = fields.Field() self.sql, self.params = sql, params super().__init__(output_field=output_field) def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) def as_sql(self, compiler, connection): return "(%s)" % self.sql, self.params def get_group_by_cols(self): return [self] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # Resolve parents fields used in raw SQL. if query.model: for parent in query.model._meta.get_parent_list(): for parent_field in parent._meta.local_fields: _, column_name = parent_field.get_attname_column() if column_name.lower() in self.sql.lower(): query.resolve_ref( parent_field.name, allow_joins, reuse, summarize ) break return super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) class Star(Expression): def __repr__(self): return "'*'" def as_sql(self, compiler, connection): return "*", [] class Col(Expression): contains_column_references = True possibly_multivalued = False def __init__(self, alias, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.alias, self.target = alias, target def __repr__(self): alias, target = self.alias, self.target identifiers = (alias, str(target)) if alias else (str(target),) return "{}({})".format(self.__class__.__name__, ", ".join(identifiers)) def as_sql(self, compiler, connection): alias, column = self.alias, self.target.column identifiers = (alias, column) if alias else (column,) sql = ".".join(map(compiler.quote_name_unless_alias, identifiers)) return sql, [] def relabeled_clone(self, relabels): if self.alias is None: return self return self.__class__( relabels.get(self.alias, self.alias), self.target, self.output_field ) def get_group_by_cols(self): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return self.output_field.get_db_converters( connection ) + self.target.get_db_converters(connection) class Ref(Expression): """ Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query. """ def __init__(self, refs, source): super().__init__() self.refs, self.source = refs, source def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) def get_source_expressions(self): return [self.source] def set_source_expressions(self, exprs): (self.source,) = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # The sub-expression `source` has already been resolved, as this is # just a reference to the name of `source`. return self def relabeled_clone(self, relabels): return self def as_sql(self, compiler, connection): return connection.ops.quote_name(self.refs), [] def get_group_by_cols(self): return [self] class ExpressionList(Func): """ An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like a partition clause. """ template = "%(expressions)s" def __init__(self, *expressions, **extra): if not expressions: raise ValueError( "%s requires at least one expression." % self.__class__.__name__ ) super().__init__(*expressions, **extra) def __str__(self): return self.arg_joiner.join(str(arg) for arg in self.source_expressions) def as_sqlite(self, compiler, connection, **extra_context): # Casting to numeric is unnecessary. return self.as_sql(compiler, connection, **extra_context) class OrderByList(Func): template = "ORDER BY %(expressions)s" def __init__(self, *expressions, **extra): expressions = ( ( OrderBy(F(expr[1:]), descending=True) if isinstance(expr, str) and expr[0] == "-" else expr ) for expr in expressions ) super().__init__(*expressions, **extra) def as_sql(self, *args, **kwargs): if not self.source_expressions: return "", () return super().as_sql(*args, **kwargs) def get_group_by_cols(self): group_by_cols = [] for order_by in self.get_source_expressions(): group_by_cols.extend(order_by.get_group_by_cols()) return group_by_cols @deconstructible(path="django.db.models.ExpressionWrapper") class ExpressionWrapper(SQLiteNumericMixin, Expression): """ An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field. """ def __init__(self, expression, output_field): super().__init__(output_field=output_field) self.expression = expression def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def get_group_by_cols(self): if isinstance(self.expression, Expression): expression = self.expression.copy() expression.output_field = self.output_field return expression.get_group_by_cols() # For non-expressions e.g. an SQL WHERE clause, the entire # `expression` must be included in the GROUP BY clause. return super().get_group_by_cols() def as_sql(self, compiler, connection): return compiler.compile(self.expression) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.expression) class NegatedExpression(ExpressionWrapper): """The logical negation of a conditional expression.""" def __init__(self, expression): super().__init__(expression, output_field=fields.BooleanField()) def __invert__(self): return self.expression.copy() def as_sql(self, compiler, connection): try: sql, params = super().as_sql(compiler, connection) except EmptyResultSet: features = compiler.connection.features if not features.supports_boolean_expr_in_select_clause: return "1=1", () return compiler.compile(Value(True)) ops = compiler.connection.ops # Some database backends (e.g. Oracle) don't allow EXISTS() and filters # to be compared to another expression unless they're wrapped in a CASE # WHEN. if not ops.conditional_expression_supported_in_where_clause(self.expression): return f"CASE WHEN {sql} = 0 THEN 1 ELSE 0 END", params return f"NOT {sql}", params def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): resolved = super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) if not getattr(resolved.expression, "conditional", False): raise TypeError("Cannot negate non-conditional expressions.") return resolved def select_format(self, compiler, sql, params): # Wrap boolean expressions with a CASE WHEN expression if a database # backend (e.g. Oracle) doesn't support boolean expression in SELECT or # GROUP BY list. expression_supported_in_where_clause = ( compiler.connection.ops.conditional_expression_supported_in_where_clause ) if ( not compiler.connection.features.supports_boolean_expr_in_select_clause # Avoid double wrapping. and expression_supported_in_where_clause(self.expression) ): sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql) return sql, params @deconstructible(path="django.db.models.When") class When(Expression): template = "WHEN %(condition)s THEN %(result)s" # This isn't a complete conditional expression, must be used in Case(). conditional = False def __init__(self, condition=None, then=None, **lookups): if lookups: if condition is None: condition, lookups = Q(**lookups), None elif getattr(condition, "conditional", False): condition, lookups = Q(condition, **lookups), None if condition is None or not getattr(condition, "conditional", False) or lookups: raise TypeError( "When() supports a Q object, a boolean expression, or lookups " "as a condition." ) if isinstance(condition, Q) and not condition: raise ValueError("An empty Q() can't be used as a When() condition.") super().__init__(output_field=None) self.condition = condition self.result = self._parse_expressions(then)[0] def __str__(self): return "WHEN %r THEN %r" % (self.condition, self.result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return [self.condition, self.result] def set_source_expressions(self, exprs): self.condition, self.result = exprs def get_source_fields(self): # We're only interested in the fields of the result expressions. return [self.result._output_field_or_none] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize if hasattr(c.condition, "resolve_expression"): c.condition = c.condition.resolve_expression( query, allow_joins, reuse, summarize, False ) c.result = c.result.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = extra_context sql_params = [] condition_sql, condition_params = compiler.compile(self.condition) # Filters that match everything are handled as empty strings in the # WHERE clause, but in a CASE WHEN expression they must use a predicate # that's always True. if condition_sql == "": if connection.features.supports_boolean_expr_in_select_clause: condition_sql, condition_params = compiler.compile(Value(True)) else: condition_sql, condition_params = "1=1", () template_params["condition"] = condition_sql result_sql, result_params = compiler.compile(self.result) template_params["result"] = result_sql template = template or self.template return template % template_params, ( *sql_params, *condition_params, *result_params, ) def get_group_by_cols(self): # This is not a complete expression and cannot be used in GROUP BY. cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols @deconstructible(path="django.db.models.Case") class Case(SQLiteNumericMixin, Expression): """ An SQL searched CASE expression: CASE WHEN n > 0 THEN 'positive' WHEN n < 0 THEN 'negative' ELSE 'zero' END """ template = "CASE %(cases)s ELSE %(default)s END" case_joiner = " " def __init__(self, *cases, default=None, output_field=None, **extra): if not all(isinstance(case, When) for case in cases): raise TypeError("Positional arguments must all be When objects.") super().__init__(output_field) self.cases = list(cases) self.default = self._parse_expressions(default)[0] self.extra = extra def __str__(self): return "CASE %s, ELSE %r" % ( ", ".join(str(c) for c in self.cases), self.default, ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return self.cases + [self.default] def set_source_expressions(self, exprs): *self.cases, self.default = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize for pos, case in enumerate(c.cases): c.cases[pos] = case.resolve_expression( query, allow_joins, reuse, summarize, for_save ) c.default = c.default.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def copy(self): c = super().copy() c.cases = c.cases[:] return c def as_sql( self, compiler, connection, template=None, case_joiner=None, **extra_context ): connection.ops.check_expression_support(self) if not self.cases: return compiler.compile(self.default) template_params = {**self.extra, **extra_context} case_parts = [] sql_params = [] for case in self.cases: try: case_sql, case_params = compiler.compile(case) except EmptyResultSet: continue case_parts.append(case_sql) sql_params.extend(case_params) default_sql, default_params = compiler.compile(self.default) if not case_parts: return default_sql, default_params case_joiner = case_joiner or self.case_joiner template_params["cases"] = case_joiner.join(case_parts) template_params["default"] = default_sql sql_params.extend(default_params) template = template or template_params.get("template", self.template) sql = template % template_params if self._output_field_or_none is not None: sql = connection.ops.unification_cast_sql(self.output_field) % sql return sql, sql_params def get_group_by_cols(self): if not self.cases: return self.default.get_group_by_cols() return super().get_group_by_cols() class Subquery(BaseExpression, Combinable): """ An explicit subquery. It may contain OuterRef() references to the outer query which will be resolved when it is applied to that query. """ template = "(%(subquery)s)" contains_aggregate = False empty_result_set_value = None def __init__(self, queryset, output_field=None, **extra): # Allow the usage of both QuerySet and sql.Query objects. self.query = getattr(queryset, "query", queryset).clone() self.query.subquery = True self.extra = extra super().__init__(output_field) def get_source_expressions(self): return [self.query] def set_source_expressions(self, exprs): self.query = exprs[0] def _resolve_output_field(self): return self.query.output_field def copy(self): clone = super().copy() clone.query = clone.query.clone() return clone @property def external_aliases(self): return self.query.external_aliases def get_external_cols(self): return self.query.get_external_cols() def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = {**self.extra, **extra_context} subquery_sql, sql_params = self.query.as_sql(compiler, connection) template_params["subquery"] = subquery_sql[1:-1] template = template or template_params.get("template", self.template) sql = template % template_params return sql, sql_params def get_group_by_cols(self): return self.query.get_group_by_cols(wrapper=self) class Exists(Subquery): template = "EXISTS(%(subquery)s)" output_field = fields.BooleanField() def __init__(self, queryset, **kwargs): super().__init__(queryset, **kwargs) self.query = self.query.exists() def select_format(self, compiler, sql, params): # Wrap EXISTS() with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql) return sql, params @deconstructible(path="django.db.models.OrderBy") class OrderBy(Expression): template = "%(expression)s %(ordering)s" conditional = False def __init__(self, expression, descending=False, nulls_first=None, nulls_last=None): if nulls_first and nulls_last: raise ValueError("nulls_first and nulls_last are mutually exclusive") if nulls_first is False or nulls_last is False: # When the deprecation ends, replace with: # raise ValueError( # "nulls_first and nulls_last values must be True or None." # ) warnings.warn( "Passing nulls_first=False or nulls_last=False is deprecated, use None " "instead.", RemovedInDjango50Warning, stacklevel=2, ) self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, "resolve_expression"): raise ValueError("expression must be an expression type") self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending ) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): template = template or self.template if connection.features.supports_order_by_nulls_modifier: if self.nulls_last: template = "%s NULLS LAST" % template elif self.nulls_first: template = "%s NULLS FIRST" % template else: if self.nulls_last and not ( self.descending and connection.features.order_by_nulls_first ): template = "%%(expression)s IS NULL, %s" % template elif self.nulls_first and not ( not self.descending and connection.features.order_by_nulls_first ): template = "%%(expression)s IS NOT NULL, %s" % template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { "expression": expression_sql, "ordering": "DESC" if self.descending else "ASC", **extra_context, } params *= template.count("%(expression)s") return (template % placeholders).rstrip(), params def as_oracle(self, compiler, connection): # Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped # in a CASE WHEN. if connection.ops.conditional_expression_supported_in_where_clause( self.expression ): copy = self.copy() copy.expression = Case( When(self.expression, then=True), default=False, ) return copy.as_sql(compiler, connection) return self.as_sql(compiler, connection) def get_group_by_cols(self): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first: self.nulls_last = True self.nulls_first = None elif self.nulls_last: self.nulls_first = True self.nulls_last = None return self def asc(self): self.descending = False def desc(self): self.descending = True class Window(SQLiteNumericMixin, Expression): template = "%(expression)s OVER (%(window)s)" # Although the main expression may either be an aggregate or an # expression with an aggregate function, the GROUP BY that will # be introduced in the query as a result is not desired. contains_aggregate = False contains_over_clause = True def __init__( self, expression, partition_by=None, order_by=None, frame=None, output_field=None, ): self.partition_by = partition_by self.order_by = order_by self.frame = frame if not getattr(expression, "window_compatible", False): raise ValueError( "Expression '%s' isn't compatible with OVER clauses." % expression.__class__.__name__ ) if self.partition_by is not None: if not isinstance(self.partition_by, (tuple, list)): self.partition_by = (self.partition_by,) self.partition_by = ExpressionList(*self.partition_by) if self.order_by is not None: if isinstance(self.order_by, (list, tuple)): self.order_by = OrderByList(*self.order_by) elif isinstance(self.order_by, (BaseExpression, str)): self.order_by = OrderByList(self.order_by) else: raise ValueError( "Window.order_by must be either a string reference to a " "field, an expression, or a list or tuple of them." ) super().__init__(output_field=output_field) self.source_expression = self._parse_expressions(expression)[0] def _resolve_output_field(self): return self.source_expression.output_field def get_source_expressions(self): return [self.source_expression, self.partition_by, self.order_by, self.frame] def set_source_expressions(self, exprs): self.source_expression, self.partition_by, self.order_by, self.frame = exprs def as_sql(self, compiler, connection, template=None): connection.ops.check_expression_support(self) if not connection.features.supports_over_clause: raise NotSupportedError("This backend does not support window expressions.") expr_sql, params = compiler.compile(self.source_expression) window_sql, window_params = [], () if self.partition_by is not None: sql_expr, sql_params = self.partition_by.as_sql( compiler=compiler, connection=connection, template="PARTITION BY %(expressions)s", ) window_sql.append(sql_expr) window_params += tuple(sql_params) if self.order_by is not None: order_sql, order_params = compiler.compile(self.order_by) window_sql.append(order_sql) window_params += tuple(order_params) if self.frame: frame_sql, frame_params = compiler.compile(self.frame) window_sql.append(frame_sql) window_params += tuple(frame_params) template = template or self.template return ( template % {"expression": expr_sql, "window": " ".join(window_sql).strip()}, (*params, *window_params), ) def as_sqlite(self, compiler, connection): if isinstance(self.output_field, fields.DecimalField): # Casting to numeric must be outside of the window expression. copy = self.copy() source_expressions = copy.get_source_expressions() source_expressions[0].output_field = fields.FloatField() copy.set_source_expressions(source_expressions) return super(Window, copy).as_sqlite(compiler, connection) return self.as_sql(compiler, connection) def __str__(self): return "{} OVER ({}{}{})".format( str(self.source_expression), "PARTITION BY " + str(self.partition_by) if self.partition_by else "", str(self.order_by or ""), str(self.frame or ""), ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_group_by_cols(self): group_by_cols = [] if self.partition_by: group_by_cols.extend(self.partition_by.get_group_by_cols()) if self.order_by is not None: group_by_cols.extend(self.order_by.get_group_by_cols()) return group_by_cols class WindowFrame(Expression): """ Model the frame clause in window expressions. There are two types of frame clauses which are subclasses, however, all processing and validation (by no means intended to be complete) is done here. Thus, providing an end for a frame is optional (the default is UNBOUNDED FOLLOWING, which is the last row in the frame). """ template = "%(frame_type)s BETWEEN %(start)s AND %(end)s" def __init__(self, start=None, end=None): self.start = Value(start) self.end = Value(end) def set_source_expressions(self, exprs): self.start, self.end = exprs def get_source_expressions(self): return [self.start, self.end] def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) start, end = self.window_frame_start_end( connection, self.start.value, self.end.value ) return ( self.template % { "frame_type": self.frame_type, "start": start, "end": end, }, [], ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_group_by_cols(self): return [] def __str__(self): if self.start.value is not None and self.start.value < 0: start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING) elif self.start.value is not None and self.start.value == 0: start = connection.ops.CURRENT_ROW else: start = connection.ops.UNBOUNDED_PRECEDING if self.end.value is not None and self.end.value > 0: end = "%d %s" % (self.end.value, connection.ops.FOLLOWING) elif self.end.value is not None and self.end.value == 0: end = connection.ops.CURRENT_ROW else: end = connection.ops.UNBOUNDED_FOLLOWING return self.template % { "frame_type": self.frame_type, "start": start, "end": end, } def window_frame_start_end(self, connection, start, end): raise NotImplementedError("Subclasses must implement window_frame_start_end().") class RowRange(WindowFrame): frame_type = "ROWS" def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_rows_start_end(start, end) class ValueRange(WindowFrame): frame_type = "RANGE" def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_range_start_end(start, end)
0f063636c8d220b8051b82557cfac9e549f93202de535e512883bd98458767d3
import functools import inspect from functools import partial from django import forms from django.apps import apps from django.conf import SettingsReference, settings from django.core import checks, exceptions from django.db import connection, router from django.db.backends import utils from django.db.models import Q from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL from django.db.models.query_utils import PathInfo from django.db.models.utils import make_model_tuple from django.utils.functional import cached_property from django.utils.translation import gettext_lazy as _ from . import Field from .mixins import FieldCacheMixin from .related_descriptors import ( ForeignKeyDeferredAttribute, ForwardManyToOneDescriptor, ForwardOneToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from .related_lookups import ( RelatedExact, RelatedGreaterThan, RelatedGreaterThanOrEqual, RelatedIn, RelatedIsNull, RelatedLessThan, RelatedLessThanOrEqual, ) from .reverse_related import ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel RECURSIVE_RELATIONSHIP_CONSTANT = "self" def resolve_relation(scope_model, relation): """ Transform relation into a model or fully-qualified model string of the form "app_label.ModelName", relative to scope_model. The relation argument can be: * RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case the model argument will be returned. * A bare model name without an app_label, in which case scope_model's app_label will be prepended. * An "app_label.ModelName" string. * A model class, which will be returned unchanged. """ # Check for recursive relations if relation == RECURSIVE_RELATIONSHIP_CONSTANT: relation = scope_model # Look for an "app.Model" relation if isinstance(relation, str): if "." not in relation: relation = "%s.%s" % (scope_model._meta.app_label, relation) return relation def lazy_related_operation(function, model, *related_models, **kwargs): """ Schedule `function` to be called once `model` and all `related_models` have been imported and registered with the app registry. `function` will be called with the newly-loaded model classes as its positional arguments, plus any optional keyword arguments. The `model` argument must be a model class. Each subsequent positional argument is another model, or a reference to another model - see `resolve_relation()` for the various forms these may take. Any relative references will be resolved relative to `model`. This is a convenience wrapper for `Apps.lazy_model_operation` - the app registry model used is the one found in `model._meta.apps`. """ models = [model] + [resolve_relation(model, rel) for rel in related_models] model_keys = (make_model_tuple(m) for m in models) apps = model._meta.apps return apps.lazy_model_operation(partial(function, **kwargs), *model_keys) class RelatedField(FieldCacheMixin, Field): """Base class that all relational fields inherit from.""" # Field flags one_to_many = False one_to_one = False many_to_many = False many_to_one = False def __init__( self, related_name=None, related_query_name=None, limit_choices_to=None, **kwargs, ): self._related_name = related_name self._related_query_name = related_query_name self._limit_choices_to = limit_choices_to super().__init__(**kwargs) @cached_property def related_model(self): # Can't cache this property until all the models are loaded. apps.check_models_ready() return self.remote_field.model def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_related_name_is_valid(), *self._check_related_query_name_is_valid(), *self._check_relation_model_exists(), *self._check_referencing_to_swapped_model(), *self._check_clashes(), ] def _check_related_name_is_valid(self): import keyword related_name = self.remote_field.related_name if related_name is None: return [] is_valid_id = ( not keyword.iskeyword(related_name) and related_name.isidentifier() ) if not (is_valid_id or related_name.endswith("+")): return [ checks.Error( "The name '%s' is invalid related_name for field %s.%s" % ( self.remote_field.related_name, self.model._meta.object_name, self.name, ), hint=( "Related name must be a valid Python identifier or end with a " "'+'" ), obj=self, id="fields.E306", ) ] return [] def _check_related_query_name_is_valid(self): if self.remote_field.is_hidden(): return [] rel_query_name = self.related_query_name() errors = [] if rel_query_name.endswith("_"): errors.append( checks.Error( "Reverse query name '%s' must not end with an underscore." % rel_query_name, hint=( "Add or change a related_name or related_query_name " "argument for this field." ), obj=self, id="fields.E308", ) ) if LOOKUP_SEP in rel_query_name: errors.append( checks.Error( "Reverse query name '%s' must not contain '%s'." % (rel_query_name, LOOKUP_SEP), hint=( "Add or change a related_name or related_query_name " "argument for this field." ), obj=self, id="fields.E309", ) ) return errors def _check_relation_model_exists(self): rel_is_missing = self.remote_field.model not in self.opts.apps.get_models() rel_is_string = isinstance(self.remote_field.model, str) model_name = ( self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name ) if rel_is_missing and ( rel_is_string or not self.remote_field.model._meta.swapped ): return [ checks.Error( "Field defines a relation with model '%s', which is either " "not installed, or is abstract." % model_name, obj=self, id="fields.E300", ) ] return [] def _check_referencing_to_swapped_model(self): if ( self.remote_field.model not in self.opts.apps.get_models() and not isinstance(self.remote_field.model, str) and self.remote_field.model._meta.swapped ): return [ checks.Error( "Field defines a relation with the model '%s', which has " "been swapped out." % self.remote_field.model._meta.label, hint="Update the relation to point at 'settings.%s'." % self.remote_field.model._meta.swappable, obj=self, id="fields.E301", ) ] return [] def _check_clashes(self): """Check accessor and reverse query name clashes.""" from django.db.models.base import ModelBase errors = [] opts = self.model._meta # f.remote_field.model may be a string instead of a model. Skip if # model name is not resolved. if not isinstance(self.remote_field.model, ModelBase): return [] # Consider that we are checking field `Model.foreign` and the models # are: # # class Target(models.Model): # model = models.IntegerField() # model_set = models.IntegerField() # # class Model(models.Model): # foreign = models.ForeignKey(Target) # m2m = models.ManyToManyField(Target) # rel_opts.object_name == "Target" rel_opts = self.remote_field.model._meta # If the field doesn't install a backward relation on the target model # (so `is_hidden` returns True), then there are no clashes to check # and we can skip these fields. rel_is_hidden = self.remote_field.is_hidden() rel_name = self.remote_field.get_accessor_name() # i. e. "model_set" rel_query_name = self.related_query_name() # i. e. "model" # i.e. "app_label.Model.field". field_name = "%s.%s" % (opts.label, self.name) # Check clashes between accessor or reverse query name of `field` # and any other field name -- i.e. accessor for Model.foreign is # model_set and it clashes with Target.model_set. potential_clashes = rel_opts.fields + rel_opts.many_to_many for clash_field in potential_clashes: # i.e. "app_label.Target.model_set". clash_name = "%s.%s" % (rel_opts.label, clash_field.name) if not rel_is_hidden and clash_field.name == rel_name: errors.append( checks.Error( f"Reverse accessor '{rel_opts.object_name}.{rel_name}' " f"for '{field_name}' clashes with field name " f"'{clash_name}'.", hint=( "Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'." ) % (clash_name, field_name), obj=self, id="fields.E302", ) ) if clash_field.name == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name), hint=( "Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'." ) % (clash_name, field_name), obj=self, id="fields.E303", ) ) # Check clashes between accessors/reverse query names of `field` and # any other field accessor -- i. e. Model.foreign accessor clashes with # Model.m2m accessor. potential_clashes = (r for r in rel_opts.related_objects if r.field is not self) for clash_field in potential_clashes: # i.e. "app_label.Model.m2m". clash_name = "%s.%s" % ( clash_field.related_model._meta.label, clash_field.field.name, ) if not rel_is_hidden and clash_field.get_accessor_name() == rel_name: errors.append( checks.Error( f"Reverse accessor '{rel_opts.object_name}.{rel_name}' " f"for '{field_name}' clashes with reverse accessor for " f"'{clash_name}'.", hint=( "Add or change a related_name argument " "to the definition for '%s' or '%s'." ) % (field_name, clash_name), obj=self, id="fields.E304", ) ) if clash_field.get_accessor_name() == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with reverse query name " "for '%s'." % (field_name, clash_name), hint=( "Add or change a related_name argument " "to the definition for '%s' or '%s'." ) % (field_name, clash_name), obj=self, id="fields.E305", ) ) return errors def db_type(self, connection): # By default related field will not have a column as it relates to # columns from another table. return None def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) self.opts = cls._meta if not cls._meta.abstract: if self.remote_field.related_name: related_name = self.remote_field.related_name else: related_name = self.opts.default_related_name if related_name: related_name %= { "class": cls.__name__.lower(), "model_name": cls._meta.model_name.lower(), "app_label": cls._meta.app_label.lower(), } self.remote_field.related_name = related_name if self.remote_field.related_query_name: related_query_name = self.remote_field.related_query_name % { "class": cls.__name__.lower(), "app_label": cls._meta.app_label.lower(), } self.remote_field.related_query_name = related_query_name def resolve_related_class(model, related, field): field.remote_field.model = related field.do_related_class(related, model) lazy_related_operation( resolve_related_class, cls, self.remote_field.model, field=self ) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self._limit_choices_to: kwargs["limit_choices_to"] = self._limit_choices_to if self._related_name is not None: kwargs["related_name"] = self._related_name if self._related_query_name is not None: kwargs["related_query_name"] = self._related_query_name return name, path, args, kwargs def get_forward_related_filter(self, obj): """ Return the keyword arguments that when supplied to self.model.object.filter(), would select all instances related through this field to the remote obj. This is used to build the querysets returned by related descriptors. obj is an instance of self.related_field.model. """ return { "%s__%s" % (self.name, rh_field.name): getattr(obj, rh_field.attname) for _, rh_field in self.related_fields } def get_reverse_related_filter(self, obj): """ Complement to get_forward_related_filter(). Return the keyword arguments that when passed to self.related_field.model.object.filter() select all instances of self.related_field.model related through this field to obj. obj is an instance of self.model. """ base_q = Q.create( [ (rh_field.attname, getattr(obj, lh_field.attname)) for lh_field, rh_field in self.related_fields ] ) descriptor_filter = self.get_extra_descriptor_filter(obj) if isinstance(descriptor_filter, dict): return base_q & Q(**descriptor_filter) elif descriptor_filter: return base_q & descriptor_filter return base_q @property def swappable_setting(self): """ Get the setting that this is powered from for swapping, or None if it's not swapped in / marked with swappable=False. """ if self.swappable: # Work out string form of "to" if isinstance(self.remote_field.model, str): to_string = self.remote_field.model else: to_string = self.remote_field.model._meta.label return apps.get_swappable_settings_name(to_string) return None def set_attributes_from_rel(self): self.name = self.name or ( self.remote_field.model._meta.model_name + "_" + self.remote_field.model._meta.pk.name ) if self.verbose_name is None: self.verbose_name = self.remote_field.model._meta.verbose_name self.remote_field.set_field_name() def do_related_class(self, other, cls): self.set_attributes_from_rel() self.contribute_to_related_class(other, self.remote_field) def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this model field. If it is a callable, it will be invoked and the result will be returned. """ if callable(self.remote_field.limit_choices_to): return self.remote_field.limit_choices_to() return self.remote_field.limit_choices_to def formfield(self, **kwargs): """ Pass ``limit_choices_to`` to the field being constructed. Only passes it if there is a type that supports related fields. This is a similar strategy used to pass the ``queryset`` to the field being constructed. """ defaults = {} if hasattr(self.remote_field, "get_related_field"): # If this is a callable, do not invoke it here. Just pass # it in the defaults for when the form class will later be # instantiated. limit_choices_to = self.remote_field.limit_choices_to defaults.update( { "limit_choices_to": limit_choices_to, } ) defaults.update(kwargs) return super().formfield(**defaults) def related_query_name(self): """ Define the name that can be used to identify this related object in a table-spanning query. """ return ( self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name ) @property def target_field(self): """ When filtering against this relation, return the field on the remote model against which the filtering should happen. """ target_fields = self.path_infos[-1].target_fields if len(target_fields) > 1: raise exceptions.FieldError( "The relation has multiple target fields, but only single target field " "was asked for" ) return target_fields[0] def get_cache_name(self): return self.name class ForeignObject(RelatedField): """ Abstraction of the ForeignKey relation to support multi-column relations. """ # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False requires_unique_target = True related_accessor_class = ReverseManyToOneDescriptor forward_related_accessor_class = ForwardManyToOneDescriptor rel_class = ForeignObjectRel def __init__( self, to, on_delete, from_fields, to_fields, rel=None, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, swappable=True, **kwargs, ): if rel is None: rel = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) super().__init__( rel=rel, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, **kwargs, ) self.from_fields = from_fields self.to_fields = to_fields self.swappable = swappable def __copy__(self): obj = super().__copy__() # Remove any cached PathInfo values. obj.__dict__.pop("path_infos", None) obj.__dict__.pop("reverse_path_infos", None) return obj def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_to_fields_exist(), *self._check_unique_target(), ] def _check_to_fields_exist(self): # Skip nonexistent models. if isinstance(self.remote_field.model, str): return [] errors = [] for to_field in self.to_fields: if to_field: try: self.remote_field.model._meta.get_field(to_field) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The to_field '%s' doesn't exist on the related " "model '%s'." % (to_field, self.remote_field.model._meta.label), obj=self, id="fields.E312", ) ) return errors def _check_unique_target(self): rel_is_string = isinstance(self.remote_field.model, str) if rel_is_string or not self.requires_unique_target: return [] try: self.foreign_related_fields except exceptions.FieldDoesNotExist: return [] if not self.foreign_related_fields: return [] unique_foreign_fields = { frozenset([f.name]) for f in self.remote_field.model._meta.get_fields() if getattr(f, "unique", False) } unique_foreign_fields.update( {frozenset(ut) for ut in self.remote_field.model._meta.unique_together} ) unique_foreign_fields.update( { frozenset(uc.fields) for uc in self.remote_field.model._meta.total_unique_constraints } ) foreign_fields = {f.name for f in self.foreign_related_fields} has_unique_constraint = any(u <= foreign_fields for u in unique_foreign_fields) if not has_unique_constraint and len(self.foreign_related_fields) > 1: field_combination = ", ".join( "'%s'" % rel_field.name for rel_field in self.foreign_related_fields ) model_name = self.remote_field.model.__name__ return [ checks.Error( "No subset of the fields %s on model '%s' is unique." % (field_combination, model_name), hint=( "Mark a single field as unique=True or add a set of " "fields to a unique constraint (via unique_together " "or a UniqueConstraint (without condition) in the " "model Meta.constraints)." ), obj=self, id="fields.E310", ) ] elif not has_unique_constraint: field_name = self.foreign_related_fields[0].name model_name = self.remote_field.model.__name__ return [ checks.Error( "'%s.%s' must be unique because it is referenced by " "a foreign key." % (model_name, field_name), hint=( "Add unique=True to this field or add a " "UniqueConstraint (without condition) in the model " "Meta.constraints." ), obj=self, id="fields.E311", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() kwargs["on_delete"] = self.remote_field.on_delete kwargs["from_fields"] = self.from_fields kwargs["to_fields"] = self.to_fields if self.remote_field.parent_link: kwargs["parent_link"] = self.remote_field.parent_link if isinstance(self.remote_field.model, str): if "." in self.remote_field.model: app_label, model_name = self.remote_field.model.split(".") kwargs["to"] = "%s.%s" % (app_label, model_name.lower()) else: kwargs["to"] = self.remote_field.model.lower() else: kwargs["to"] = self.remote_field.model._meta.label_lower # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error if hasattr(kwargs["to"], "setting_name"): if kwargs["to"].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ForeignKey pointing to a model " "that is swapped in place of more than one model (%s and %s)" % (kwargs["to"].setting_name, swappable_setting) ) # Set it kwargs["to"] = SettingsReference( kwargs["to"], swappable_setting, ) return name, path, args, kwargs def resolve_related_fields(self): if not self.from_fields or len(self.from_fields) != len(self.to_fields): raise ValueError( "Foreign Object from and to fields must be the same non-zero length" ) if isinstance(self.remote_field.model, str): raise ValueError( "Related model %r cannot be resolved" % self.remote_field.model ) related_fields = [] for index in range(len(self.from_fields)): from_field_name = self.from_fields[index] to_field_name = self.to_fields[index] from_field = ( self if from_field_name == RECURSIVE_RELATIONSHIP_CONSTANT else self.opts.get_field(from_field_name) ) to_field = ( self.remote_field.model._meta.pk if to_field_name is None else self.remote_field.model._meta.get_field(to_field_name) ) related_fields.append((from_field, to_field)) return related_fields @cached_property def related_fields(self): return self.resolve_related_fields() @cached_property def reverse_related_fields(self): return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields] @cached_property def local_related_fields(self): return tuple(lhs_field for lhs_field, rhs_field in self.related_fields) @cached_property def foreign_related_fields(self): return tuple( rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field ) def get_local_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.local_related_fields) def get_foreign_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.foreign_related_fields) @staticmethod def get_instance_value_for_fields(instance, fields): ret = [] opts = instance._meta for field in fields: # Gotcha: in some cases (like fixture loading) a model can have # different values in parent_ptr_id and parent's id. So, use # instance.pk (that is, parent_ptr_id) when asked for instance.id. if field.primary_key: possible_parent_link = opts.get_ancestor_link(field.model) if ( not possible_parent_link or possible_parent_link.primary_key or possible_parent_link.model._meta.abstract ): ret.append(instance.pk) continue ret.append(getattr(instance, field.attname)) return tuple(ret) def get_attname_column(self): attname, column = super().get_attname_column() return attname, None def get_joining_columns(self, reverse_join=False): source = self.reverse_related_fields if reverse_join else self.related_fields return tuple( (lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source ) def get_reverse_joining_columns(self): return self.get_joining_columns(reverse_join=True) def get_extra_descriptor_filter(self, instance): """ Return an extra filter condition for related object fetching when user does 'instance.fieldname', that is the extra filter is used in the descriptor of the field. The filter should be either a dict usable in .filter(**kwargs) call or a Q-object. The condition will be ANDed together with the relation's joining columns. A parallel method is get_extra_restriction() which is used in JOIN and subquery conditions. """ return {} def get_extra_restriction(self, alias, related_alias): """ Return a pair condition used for joining and subquery pushdown. The condition is something that responds to as_sql(compiler, connection) method. Note that currently referring both the 'alias' and 'related_alias' will not work in some conditions, like subquery pushdown. A parallel method is get_extra_descriptor_filter() which is used in instance.fieldname related object fetching. """ return None def get_path_info(self, filtered_relation=None): """Get path from this field to the related model.""" opts = self.remote_field.model._meta from_opts = self.model._meta return [ PathInfo( from_opts=from_opts, to_opts=opts, target_fields=self.foreign_related_fields, join_field=self, m2m=False, direct=True, filtered_relation=filtered_relation, ) ] @cached_property def path_infos(self): return self.get_path_info() def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [ PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, ) ] @cached_property def reverse_path_infos(self): return self.get_reverse_path_info() @classmethod @functools.lru_cache(maxsize=None) def get_class_lookups(cls): bases = inspect.getmro(cls) bases = bases[: bases.index(ForeignObject) + 1] class_lookups = [parent.__dict__.get("class_lookups", {}) for parent in bases] return cls.merge_dicts(class_lookups) def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) setattr(cls, self.name, self.forward_related_accessor_class(self)) def contribute_to_related_class(self, cls, related): # Internal FK's - i.e., those with a related name ending with '+' - # and swapped models don't get a related descriptor. if ( not self.remote_field.is_hidden() and not related.related_model._meta.swapped ): setattr( cls._meta.concrete_model, related.get_accessor_name(), self.related_accessor_class(related), ) # While 'limit_choices_to' might be a callable, simply pass # it along for later - this is too early because it's still # model load time. if self.remote_field.limit_choices_to: cls._meta.related_fkey_lookups.append( self.remote_field.limit_choices_to ) ForeignObject.register_lookup(RelatedIn) ForeignObject.register_lookup(RelatedExact) ForeignObject.register_lookup(RelatedLessThan) ForeignObject.register_lookup(RelatedGreaterThan) ForeignObject.register_lookup(RelatedGreaterThanOrEqual) ForeignObject.register_lookup(RelatedLessThanOrEqual) ForeignObject.register_lookup(RelatedIsNull) class ForeignKey(ForeignObject): """ Provide a many-to-one relation by adding a column to the local model to hold the remote value. By default ForeignKey will target the pk of the remote model but this behavior can be changed by using the ``to_field`` argument. """ descriptor_class = ForeignKeyDeferredAttribute # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False rel_class = ManyToOneRel empty_strings_allowed = False default_error_messages = { "invalid": _("%(model)s instance with %(field)s %(value)r does not exist.") } description = _("Foreign Key (type determined by related field)") def __init__( self, to, on_delete, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, to_field=None, db_constraint=True, **kwargs, ): try: to._meta.model_name except AttributeError: if not isinstance(to, str): raise TypeError( "%s(%r) is invalid. First parameter to ForeignKey must be " "either a model, a model name, or the string %r" % ( self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT, ) ) else: # For backwards compatibility purposes, we need to *try* and set # the to_field during FK construction. It won't be guaranteed to # be correct until contribute_to_class is called. Refs #12190. to_field = to_field or (to._meta.pk and to._meta.pk.name) if not callable(on_delete): raise TypeError("on_delete must be callable.") kwargs["rel"] = self.rel_class( self, to, to_field, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) kwargs.setdefault("db_index", True) super().__init__( to, on_delete, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, from_fields=[RECURSIVE_RELATIONSHIP_CONSTANT], to_fields=[to_field], **kwargs, ) self.db_constraint = db_constraint def __class_getitem__(cls, *args, **kwargs): return cls def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_on_delete(), *self._check_unique(), ] def _check_on_delete(self): on_delete = getattr(self.remote_field, "on_delete", None) if on_delete == SET_NULL and not self.null: return [ checks.Error( "Field specifies on_delete=SET_NULL, but cannot be null.", hint=( "Set null=True argument on the field, or change the on_delete " "rule." ), obj=self, id="fields.E320", ) ] elif on_delete == SET_DEFAULT and not self.has_default(): return [ checks.Error( "Field specifies on_delete=SET_DEFAULT, but has no default value.", hint="Set a default value, or change the on_delete rule.", obj=self, id="fields.E321", ) ] else: return [] def _check_unique(self, **kwargs): return ( [ checks.Warning( "Setting unique=True on a ForeignKey has the same effect as using " "a OneToOneField.", hint=( "ForeignKey(unique=True) is usually better served by a " "OneToOneField." ), obj=self, id="fields.W342", ) ] if self.unique else [] ) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["to_fields"] del kwargs["from_fields"] # Handle the simpler arguments if self.db_index: del kwargs["db_index"] else: kwargs["db_index"] = False if self.db_constraint is not True: kwargs["db_constraint"] = self.db_constraint # Rel needs more work. to_meta = getattr(self.remote_field.model, "_meta", None) if self.remote_field.field_name and ( not to_meta or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name) ): kwargs["to_field"] = self.remote_field.field_name return name, path, args, kwargs def to_python(self, value): return self.target_field.to_python(value) @property def target_field(self): return self.foreign_related_fields[0] def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [ PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, ) ] def validate(self, value, model_instance): if self.remote_field.parent_link: return super().validate(value, model_instance) if value is None: return using = router.db_for_read(self.remote_field.model, instance=model_instance) qs = self.remote_field.model._base_manager.using(using).filter( **{self.remote_field.field_name: value} ) qs = qs.complex_filter(self.get_limit_choices_to()) if not qs.exists(): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={ "model": self.remote_field.model._meta.verbose_name, "pk": value, "field": self.remote_field.field_name, "value": value, }, # 'pk' is included for backwards compatibility ) def resolve_related_fields(self): related_fields = super().resolve_related_fields() for from_field, to_field in related_fields: if ( to_field and to_field.model != self.remote_field.model._meta.concrete_model ): raise exceptions.FieldError( "'%s.%s' refers to field '%s' which is not local to model " "'%s'." % ( self.model._meta.label, self.name, to_field.name, self.remote_field.model._meta.concrete_model._meta.label, ) ) return related_fields def get_attname(self): return "%s_id" % self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_default(self): """Return the to_field if the default value is an object.""" field_default = super().get_default() if isinstance(field_default, self.remote_field.model): return getattr(field_default, self.target_field.attname) return field_default def get_db_prep_save(self, value, connection): if value is None or ( value == "" and ( not self.target_field.empty_strings_allowed or connection.features.interprets_empty_strings_as_nulls ) ): return None else: return self.target_field.get_db_prep_save(value, connection=connection) def get_db_prep_value(self, value, connection, prepared=False): return self.target_field.get_db_prep_value(value, connection, prepared) def get_prep_value(self, value): return self.target_field.get_prep_value(value) def contribute_to_related_class(self, cls, related): super().contribute_to_related_class(cls, related) if self.remote_field.field_name is None: self.remote_field.field_name = cls._meta.pk.name def formfield(self, *, using=None, **kwargs): if isinstance(self.remote_field.model, str): raise ValueError( "Cannot create form field for %r yet, because " "its related model %r has not been loaded yet" % (self.name, self.remote_field.model) ) return super().formfield( **{ "form_class": forms.ModelChoiceField, "queryset": self.remote_field.model._default_manager.using(using), "to_field_name": self.remote_field.field_name, **kwargs, "blank": self.blank, } ) def db_check(self, connection): return None def db_type(self, connection): return self.target_field.rel_db_type(connection=connection) def db_parameters(self, connection): target_db_parameters = self.target_field.db_parameters(connection) return { "type": self.db_type(connection), "check": self.db_check(connection), "collation": target_db_parameters.get("collation"), } def convert_empty_strings(self, value, expression, connection): if (not value) and isinstance(value, str): return None return value def get_db_converters(self, connection): converters = super().get_db_converters(connection) if connection.features.interprets_empty_strings_as_nulls: converters += [self.convert_empty_strings] return converters def get_col(self, alias, output_field=None): if output_field is None: output_field = self.target_field while isinstance(output_field, ForeignKey): output_field = output_field.target_field if output_field is self: raise ValueError("Cannot resolve output_field.") return super().get_col(alias, output_field) class OneToOneField(ForeignKey): """ A OneToOneField is essentially the same as a ForeignKey, with the exception that it always carries a "unique" constraint with it and the reverse relation always returns the object pointed to (since there will only ever be one), rather than returning a list. """ # Field flags many_to_many = False many_to_one = False one_to_many = False one_to_one = True related_accessor_class = ReverseOneToOneDescriptor forward_related_accessor_class = ForwardOneToOneDescriptor rel_class = OneToOneRel description = _("One-to-one relationship") def __init__(self, to, on_delete, to_field=None, **kwargs): kwargs["unique"] = True super().__init__(to, on_delete, to_field=to_field, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if "unique" in kwargs: del kwargs["unique"] return name, path, args, kwargs def formfield(self, **kwargs): if self.remote_field.parent_link: return None return super().formfield(**kwargs) def save_form_data(self, instance, data): if isinstance(data, self.remote_field.model): setattr(instance, self.name, data) else: setattr(instance, self.attname, data) # Remote field object must be cleared otherwise Model.save() # will reassign attname using the related object pk. if data is None: setattr(instance, self.name, data) def _check_unique(self, **kwargs): # Override ForeignKey since check isn't applicable here. return [] def create_many_to_many_intermediary_model(field, klass): from django.db import models def set_managed(model, related, through): through._meta.managed = model._meta.managed or related._meta.managed to_model = resolve_relation(klass, field.remote_field.model) name = "%s_%s" % (klass._meta.object_name, field.name) lazy_related_operation(set_managed, klass, to_model, name) to = make_model_tuple(to_model)[1] from_ = klass._meta.model_name if to == from_: to = "to_%s" % to from_ = "from_%s" % from_ meta = type( "Meta", (), { "db_table": field._get_m2m_db_table(klass._meta), "auto_created": klass, "app_label": klass._meta.app_label, "db_tablespace": klass._meta.db_tablespace, "unique_together": (from_, to), "verbose_name": _("%(from)s-%(to)s relationship") % {"from": from_, "to": to}, "verbose_name_plural": _("%(from)s-%(to)s relationships") % {"from": from_, "to": to}, "apps": field.model._meta.apps, }, ) # Construct and return the new class. return type( name, (models.Model,), { "Meta": meta, "__module__": klass.__module__, from_: models.ForeignKey( klass, related_name="%s+" % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ), to: models.ForeignKey( to_model, related_name="%s+" % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ), }, ) class ManyToManyField(RelatedField): """ Provide a many-to-many relation by using an intermediary model that holds two ForeignKey fields pointed at the two sides of the relation. Unless a ``through`` model was provided, ManyToManyField will use the create_many_to_many_intermediary_model factory to automatically generate the intermediary model. """ # Field flags many_to_many = True many_to_one = False one_to_many = False one_to_one = False rel_class = ManyToManyRel description = _("Many-to-many relationship") def __init__( self, to, related_name=None, related_query_name=None, limit_choices_to=None, symmetrical=None, through=None, through_fields=None, db_constraint=True, db_table=None, swappable=True, **kwargs, ): try: to._meta except AttributeError: if not isinstance(to, str): raise TypeError( "%s(%r) is invalid. First parameter to ManyToManyField " "must be either a model, a model name, or the string %r" % ( self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT, ) ) if symmetrical is None: symmetrical = to == RECURSIVE_RELATIONSHIP_CONSTANT if through is not None and db_table is not None: raise ValueError( "Cannot specify a db_table if an intermediary model is used." ) kwargs["rel"] = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, symmetrical=symmetrical, through=through, through_fields=through_fields, db_constraint=db_constraint, ) self.has_null_arg = "null" in kwargs super().__init__( related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, **kwargs, ) self.db_table = db_table self.swappable = swappable def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_unique(**kwargs), *self._check_relationship_model(**kwargs), *self._check_ignored_options(**kwargs), *self._check_table_uniqueness(**kwargs), ] def _check_unique(self, **kwargs): if self.unique: return [ checks.Error( "ManyToManyFields cannot be unique.", obj=self, id="fields.E330", ) ] return [] def _check_ignored_options(self, **kwargs): warnings = [] if self.has_null_arg: warnings.append( checks.Warning( "null has no effect on ManyToManyField.", obj=self, id="fields.W340", ) ) if self._validators: warnings.append( checks.Warning( "ManyToManyField does not support validators.", obj=self, id="fields.W341", ) ) if self.remote_field.symmetrical and self._related_name: warnings.append( checks.Warning( "related_name has no effect on ManyToManyField " 'with a symmetrical relationship, e.g. to "self".', obj=self, id="fields.W345", ) ) return warnings def _check_relationship_model(self, from_model=None, **kwargs): if hasattr(self.remote_field.through, "_meta"): qualified_model_name = "%s.%s" % ( self.remote_field.through._meta.app_label, self.remote_field.through.__name__, ) else: qualified_model_name = self.remote_field.through errors = [] if self.remote_field.through not in self.opts.apps.get_models( include_auto_created=True ): # The relationship model is not installed. errors.append( checks.Error( "Field specifies a many-to-many relation through model " "'%s', which has not been installed." % qualified_model_name, obj=self, id="fields.E331", ) ) else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) # Set some useful local variables to_model = resolve_relation(from_model, self.remote_field.model) from_model_name = from_model._meta.object_name if isinstance(to_model, str): to_model_name = to_model else: to_model_name = to_model._meta.object_name relationship_model_name = self.remote_field.through._meta.object_name self_referential = from_model == to_model # Count foreign keys in intermediate model if self_referential: seen_self = sum( from_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) if seen_self > 2 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than two foreign keys " "to '%s', which is ambiguous. You must specify " "which two foreign keys Django should use via the " "through_fields keyword argument." % (self, from_model_name), hint=( "Use through_fields to specify which two foreign keys " "Django should use." ), obj=self.remote_field.through, id="fields.E333", ) ) else: # Count foreign keys in relationship model seen_from = sum( from_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) seen_to = sum( to_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) if seen_from > 1 and not self.remote_field.through_fields: errors.append( checks.Error( ( "The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "from '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument." ) % (self, from_model_name), hint=( "If you want to create a recursive relationship, " 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id="fields.E334", ) ) if seen_to > 1 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "to '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument." % (self, to_model_name), hint=( "If you want to create a recursive relationship, " 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id="fields.E335", ) ) if seen_from == 0 or seen_to == 0: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it does not have a foreign key to '%s' or '%s'." % (self, from_model_name, to_model_name), obj=self.remote_field.through, id="fields.E336", ) ) # Validate `through_fields`. if self.remote_field.through_fields is not None: # Validate that we're given an iterable of at least two items # and that none of them is "falsy". if not ( len(self.remote_field.through_fields) >= 2 and self.remote_field.through_fields[0] and self.remote_field.through_fields[1] ): errors.append( checks.Error( "Field specifies 'through_fields' but does not provide " "the names of the two link fields that should be used " "for the relation through model '%s'." % qualified_model_name, hint=( "Make sure you specify 'through_fields' as " "through_fields=('field1', 'field2')" ), obj=self, id="fields.E337", ) ) # Validate the given through fields -- they should be actual # fields on the through model, and also be foreign keys to the # expected models. else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) source, through, target = ( from_model, self.remote_field.through, self.remote_field.model, ) source_field_name, target_field_name = self.remote_field.through_fields[ :2 ] for field_name, related_model in ( (source_field_name, source), (target_field_name, target), ): possible_field_names = [] for f in through._meta.fields: if ( hasattr(f, "remote_field") and getattr(f.remote_field, "model", None) == related_model ): possible_field_names.append(f.name) if possible_field_names: hint = ( "Did you mean one of the following foreign keys to '%s': " "%s?" % ( related_model._meta.object_name, ", ".join(possible_field_names), ) ) else: hint = None try: field = through._meta.get_field(field_name) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The intermediary model '%s' has no field '%s'." % (qualified_model_name, field_name), hint=hint, obj=self, id="fields.E338", ) ) else: if not ( hasattr(field, "remote_field") and getattr(field.remote_field, "model", None) == related_model ): errors.append( checks.Error( "'%s.%s' is not a foreign key to '%s'." % ( through._meta.object_name, field_name, related_model._meta.object_name, ), hint=hint, obj=self, id="fields.E339", ) ) return errors def _check_table_uniqueness(self, **kwargs): if ( isinstance(self.remote_field.through, str) or not self.remote_field.through._meta.managed ): return [] registered_tables = { model._meta.db_table: model for model in self.opts.apps.get_models(include_auto_created=True) if model != self.remote_field.through and model._meta.managed } m2m_db_table = self.m2m_db_table() model = registered_tables.get(m2m_db_table) # The second condition allows multiple m2m relations on a model if # some point to a through model that proxies another through model. if ( model and model._meta.concrete_model != self.remote_field.through._meta.concrete_model ): if model._meta.auto_created: def _get_field_name(model): for field in model._meta.auto_created._meta.many_to_many: if field.remote_field.through is model: return field.name opts = model._meta.auto_created._meta clashing_obj = "%s.%s" % (opts.label, _get_field_name(model)) else: clashing_obj = model._meta.label if settings.DATABASE_ROUTERS: error_class, error_id = checks.Warning, "fields.W344" error_hint = ( "You have configured settings.DATABASE_ROUTERS. Verify " "that the table of %r is correctly routed to a separate " "database." % clashing_obj ) else: error_class, error_id = checks.Error, "fields.E340" error_hint = None return [ error_class( "The field's intermediary table '%s' clashes with the " "table name of '%s'." % (m2m_db_table, clashing_obj), obj=self, hint=error_hint, id=error_id, ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() # Handle the simpler arguments. if self.db_table is not None: kwargs["db_table"] = self.db_table if self.remote_field.db_constraint is not True: kwargs["db_constraint"] = self.remote_field.db_constraint # Lowercase model names as they should be treated as case-insensitive. if isinstance(self.remote_field.model, str): if "." in self.remote_field.model: app_label, model_name = self.remote_field.model.split(".") kwargs["to"] = "%s.%s" % (app_label, model_name.lower()) else: kwargs["to"] = self.remote_field.model.lower() else: kwargs["to"] = self.remote_field.model._meta.label_lower if getattr(self.remote_field, "through", None) is not None: if isinstance(self.remote_field.through, str): kwargs["through"] = self.remote_field.through elif not self.remote_field.through._meta.auto_created: kwargs["through"] = self.remote_field.through._meta.label # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error. if hasattr(kwargs["to"], "setting_name"): if kwargs["to"].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ManyToManyField pointing to a " "model that is swapped in place of more than one model " "(%s and %s)" % (kwargs["to"].setting_name, swappable_setting) ) kwargs["to"] = SettingsReference( kwargs["to"], swappable_setting, ) return name, path, args, kwargs def _get_path_info(self, direct=False, filtered_relation=None): """Called by both direct and indirect m2m traversal.""" int_model = self.remote_field.through linkfield1 = int_model._meta.get_field(self.m2m_field_name()) linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = linkfield1.reverse_path_infos if filtered_relation: join2infos = linkfield2.get_path_info(filtered_relation) else: join2infos = linkfield2.path_infos else: join1infos = linkfield2.reverse_path_infos if filtered_relation: join2infos = linkfield1.get_path_info(filtered_relation) else: join2infos = linkfield1.path_infos # Get join infos between the last model of join 1 and the first model # of join 2. Assume the only reason these may differ is due to model # inheritance. join1_final = join1infos[-1].to_opts join2_initial = join2infos[0].from_opts if join1_final is join2_initial: intermediate_infos = [] elif issubclass(join1_final.model, join2_initial.model): intermediate_infos = join1_final.get_path_to_parent(join2_initial.model) else: intermediate_infos = join2_initial.get_path_from_parent(join1_final.model) return [*join1infos, *intermediate_infos, *join2infos] def get_path_info(self, filtered_relation=None): return self._get_path_info(direct=True, filtered_relation=filtered_relation) @cached_property def path_infos(self): return self.get_path_info() def get_reverse_path_info(self, filtered_relation=None): return self._get_path_info(direct=False, filtered_relation=filtered_relation) @cached_property def reverse_path_infos(self): return self.get_reverse_path_info() def _get_m2m_db_table(self, opts): """ Function that can be curried to provide the m2m table name for this relation. """ if self.remote_field.through is not None: return self.remote_field.through._meta.db_table elif self.db_table: return self.db_table else: m2m_table_name = "%s_%s" % (utils.strip_quotes(opts.db_table), self.name) return utils.truncate_name(m2m_table_name, connection.ops.max_name_length()) def _get_m2m_attr(self, related, attr): """ Function that can be curried to provide the source accessor or DB column name for the m2m table. """ cache_attr = "_m2m_%s_cache" % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[0] else: link_field_name = None for f in self.remote_field.through._meta.fields: if ( f.is_relation and f.remote_field.model == related.related_model and (link_field_name is None or link_field_name == f.name) ): setattr(self, cache_attr, getattr(f, attr)) return getattr(self, cache_attr) def _get_m2m_reverse_attr(self, related, attr): """ Function that can be curried to provide the related accessor or DB column name for the m2m table. """ cache_attr = "_m2m_reverse_%s_cache" % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) found = False if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[1] else: link_field_name = None for f in self.remote_field.through._meta.fields: if f.is_relation and f.remote_field.model == related.model: if link_field_name is None and related.related_model == related.model: # If this is an m2m-intermediate to self, # the first foreign key you find will be # the source column. Keep searching for # the second foreign key. if found: setattr(self, cache_attr, getattr(f, attr)) break else: found = True elif link_field_name is None or link_field_name == f.name: setattr(self, cache_attr, getattr(f, attr)) break return getattr(self, cache_attr) def contribute_to_class(self, cls, name, **kwargs): # To support multiple relations to self, it's useful to have a non-None # related name on symmetrical relations for internal reasons. The # concept doesn't make a lot of sense externally ("you want me to # specify *what* on my non-reversible relation?!"), so we set it up # automatically. The funky name reduces the chance of an accidental # clash. if self.remote_field.symmetrical and ( self.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT or self.remote_field.model == cls._meta.object_name ): self.remote_field.related_name = "%s_rel_+" % name elif self.remote_field.is_hidden(): # If the backwards relation is disabled, replace the original # related_name with one generated from the m2m field name. Django # still uses backwards relations internally and we need to avoid # clashes between multiple m2m fields with related_name == '+'. self.remote_field.related_name = "_%s_%s_%s_+" % ( cls._meta.app_label, cls.__name__.lower(), name, ) super().contribute_to_class(cls, name, **kwargs) # The intermediate m2m model is not auto created if: # 1) There is a manually specified intermediate, or # 2) The class owning the m2m field is abstract. # 3) The class owning the m2m field has been swapped out. if not cls._meta.abstract: if self.remote_field.through: def resolve_through_model(_, model, field): field.remote_field.through = model lazy_related_operation( resolve_through_model, cls, self.remote_field.through, field=self ) elif not cls._meta.swapped: self.remote_field.through = create_many_to_many_intermediary_model( self, cls ) # Add the descriptor for the m2m relation. setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False)) # Set up the accessor for the m2m table name for the relation. self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta) def contribute_to_related_class(self, cls, related): # Internal M2Ms (i.e., those with a related name ending with '+') # and swapped models don't get a related descriptor. if ( not self.remote_field.is_hidden() and not related.related_model._meta.swapped ): setattr( cls, related.get_accessor_name(), ManyToManyDescriptor(self.remote_field, reverse=True), ) # Set up the accessors for the column names on the m2m table. self.m2m_column_name = partial(self._get_m2m_attr, related, "column") self.m2m_reverse_name = partial(self._get_m2m_reverse_attr, related, "column") self.m2m_field_name = partial(self._get_m2m_attr, related, "name") self.m2m_reverse_field_name = partial( self._get_m2m_reverse_attr, related, "name" ) get_m2m_rel = partial(self._get_m2m_attr, related, "remote_field") self.m2m_target_field_name = lambda: get_m2m_rel().field_name get_m2m_reverse_rel = partial( self._get_m2m_reverse_attr, related, "remote_field" ) self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name def set_attributes_from_rel(self): pass def value_from_object(self, obj): return [] if obj.pk is None else list(getattr(obj, self.attname).all()) def save_form_data(self, instance, data): getattr(instance, self.attname).set(data) def formfield(self, *, using=None, **kwargs): defaults = { "form_class": forms.ModelMultipleChoiceField, "queryset": self.remote_field.model._default_manager.using(using), **kwargs, } # If initial is passed in, it's a list of related objects, but the # MultipleChoiceField takes a list of IDs. if defaults.get("initial") is not None: initial = defaults["initial"] if callable(initial): initial = initial() defaults["initial"] = [i.pk for i in initial] return super().formfield(**defaults) def db_check(self, connection): return None def db_type(self, connection): # A ManyToManyField is not represented by a single column, # so return None. return None def db_parameters(self, connection): return {"type": None, "check": None}
394a7146c927d02e64b7276964eaae9a5d0eb9b27701cbc8a9276169719ba372
import json from django import forms from django.core import checks, exceptions from django.db import NotSupportedError, connections, router from django.db.models import lookups from django.db.models.constants import LOOKUP_SEP from django.db.models.fields import TextField from django.db.models.lookups import PostgresOperatorLookup, Transform from django.utils.translation import gettext_lazy as _ from . import Field from .mixins import CheckFieldDefaultMixin __all__ = ["JSONField"] class JSONField(CheckFieldDefaultMixin, Field): empty_strings_allowed = False description = _("A JSON object") default_error_messages = { "invalid": _("Value must be valid JSON."), } _default_hint = ("dict", "{}") def __init__( self, verbose_name=None, name=None, encoder=None, decoder=None, **kwargs, ): if encoder and not callable(encoder): raise ValueError("The encoder parameter must be a callable object.") if decoder and not callable(decoder): raise ValueError("The decoder parameter must be a callable object.") self.encoder = encoder self.decoder = decoder super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super().check(**kwargs) databases = kwargs.get("databases") or [] errors.extend(self._check_supported(databases)) return errors def _check_supported(self, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if ( self.model._meta.required_db_vendor and self.model._meta.required_db_vendor != connection.vendor ): continue if not ( "supports_json_field" in self.model._meta.required_db_features or connection.features.supports_json_field ): errors.append( checks.Error( "%s does not support JSONFields." % connection.display_name, obj=self.model, id="fields.E180", ) ) return errors def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.encoder is not None: kwargs["encoder"] = self.encoder if self.decoder is not None: kwargs["decoder"] = self.decoder return name, path, args, kwargs def from_db_value(self, value, expression, connection): if value is None: return value # Some backends (SQLite at least) extract non-string values in their # SQL datatypes. if isinstance(expression, KeyTransform) and not isinstance(value, str): return value try: return json.loads(value, cls=self.decoder) except json.JSONDecodeError: return value def get_internal_type(self): return "JSONField" def get_prep_value(self, value): if value is None: return value return json.dumps(value, cls=self.encoder) def get_transform(self, name): transform = super().get_transform(name) if transform: return transform return KeyTransformFactory(name) def validate(self, value, model_instance): super().validate(value, model_instance) try: json.dumps(value, cls=self.encoder) except TypeError: raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def value_to_string(self, obj): return self.value_from_object(obj) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.JSONField, "encoder": self.encoder, "decoder": self.decoder, **kwargs, } ) def compile_json_path(key_transforms, include_root=True): path = ["$"] if include_root else [] for key_transform in key_transforms: try: num = int(key_transform) except ValueError: # non-integer path.append(".") path.append(json.dumps(key_transform)) else: path.append("[%s]" % num) return "".join(path) class DataContains(PostgresOperatorLookup): lookup_name = "contains" postgres_operator = "@>" def as_sql(self, compiler, connection): if not connection.features.supports_json_field_contains: raise NotSupportedError( "contains lookup is not supported on this database backend." ) lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(lhs_params) + tuple(rhs_params) return "JSON_CONTAINS(%s, %s)" % (lhs, rhs), params class ContainedBy(PostgresOperatorLookup): lookup_name = "contained_by" postgres_operator = "<@" def as_sql(self, compiler, connection): if not connection.features.supports_json_field_contains: raise NotSupportedError( "contained_by lookup is not supported on this database backend." ) lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(rhs_params) + tuple(lhs_params) return "JSON_CONTAINS(%s, %s)" % (rhs, lhs), params class HasKeyLookup(PostgresOperatorLookup): logical_operator = None def compile_json_path_final_key(self, key_transform): # Compile the final key without interpreting ints as array elements. return ".%s" % json.dumps(key_transform) def as_sql(self, compiler, connection, template=None): # Process JSON path from the left-hand side. if isinstance(self.lhs, KeyTransform): lhs, lhs_params, lhs_key_transforms = self.lhs.preprocess_lhs( compiler, connection ) lhs_json_path = compile_json_path(lhs_key_transforms) else: lhs, lhs_params = self.process_lhs(compiler, connection) lhs_json_path = "$" sql = template % lhs # Process JSON path from the right-hand side. rhs = self.rhs rhs_params = [] if not isinstance(rhs, (list, tuple)): rhs = [rhs] for key in rhs: if isinstance(key, KeyTransform): *_, rhs_key_transforms = key.preprocess_lhs(compiler, connection) else: rhs_key_transforms = [key] *rhs_key_transforms, final_key = rhs_key_transforms rhs_json_path = compile_json_path(rhs_key_transforms, include_root=False) rhs_json_path += self.compile_json_path_final_key(final_key) rhs_params.append(lhs_json_path + rhs_json_path) # Add condition for each key. if self.logical_operator: sql = "(%s)" % self.logical_operator.join([sql] * len(rhs_params)) return sql, tuple(lhs_params) + tuple(rhs_params) def as_mysql(self, compiler, connection): return self.as_sql( compiler, connection, template="JSON_CONTAINS_PATH(%s, 'one', %%s)" ) def as_oracle(self, compiler, connection): sql, params = self.as_sql( compiler, connection, template="JSON_EXISTS(%s, '%%s')" ) # Add paths directly into SQL because path expressions cannot be passed # as bind variables on Oracle. return sql % tuple(params), [] def as_postgresql(self, compiler, connection): if isinstance(self.rhs, KeyTransform): *_, rhs_key_transforms = self.rhs.preprocess_lhs(compiler, connection) for key in rhs_key_transforms[:-1]: self.lhs = KeyTransform(key, self.lhs) self.rhs = rhs_key_transforms[-1] return super().as_postgresql(compiler, connection) def as_sqlite(self, compiler, connection): return self.as_sql( compiler, connection, template="JSON_TYPE(%s, %%s) IS NOT NULL" ) class HasKey(HasKeyLookup): lookup_name = "has_key" postgres_operator = "?" prepare_rhs = False class HasKeys(HasKeyLookup): lookup_name = "has_keys" postgres_operator = "?&" logical_operator = " AND " def get_prep_lookup(self): return [str(item) for item in self.rhs] class HasAnyKeys(HasKeys): lookup_name = "has_any_keys" postgres_operator = "?|" logical_operator = " OR " class HasKeyOrArrayIndex(HasKey): def compile_json_path_final_key(self, key_transform): return compile_json_path([key_transform], include_root=False) class CaseInsensitiveMixin: """ Mixin to allow case-insensitive comparison of JSON values on MySQL. MySQL handles strings used in JSON context using the utf8mb4_bin collation. Because utf8mb4_bin is a binary collation, comparison of JSON values is case-sensitive. """ def process_lhs(self, compiler, connection): lhs, lhs_params = super().process_lhs(compiler, connection) if connection.vendor == "mysql": return "LOWER(%s)" % lhs, lhs_params return lhs, lhs_params def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) if connection.vendor == "mysql": return "LOWER(%s)" % rhs, rhs_params return rhs, rhs_params class JSONExact(lookups.Exact): can_use_none_as_rhs = True def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) # Treat None lookup values as null. if rhs == "%s" and rhs_params == [None]: rhs_params = ["null"] if connection.vendor == "mysql": func = ["JSON_EXTRACT(%s, '$')"] * len(rhs_params) rhs %= tuple(func) return rhs, rhs_params class JSONIContains(CaseInsensitiveMixin, lookups.IContains): pass JSONField.register_lookup(DataContains) JSONField.register_lookup(ContainedBy) JSONField.register_lookup(HasKey) JSONField.register_lookup(HasKeys) JSONField.register_lookup(HasAnyKeys) JSONField.register_lookup(JSONExact) JSONField.register_lookup(JSONIContains) class KeyTransform(Transform): postgres_operator = "->" postgres_nested_operator = "#>" def __init__(self, key_name, *args, **kwargs): super().__init__(*args, **kwargs) self.key_name = str(key_name) def preprocess_lhs(self, compiler, connection): key_transforms = [self.key_name] previous = self.lhs while isinstance(previous, KeyTransform): key_transforms.insert(0, previous.key_name) previous = previous.lhs lhs, params = compiler.compile(previous) if connection.vendor == "oracle": # Escape string-formatting. key_transforms = [key.replace("%", "%%") for key in key_transforms] return lhs, params, key_transforms def as_mysql(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) return "JSON_EXTRACT(%s, %%s)" % lhs, tuple(params) + (json_path,) def as_oracle(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) return ( "COALESCE(JSON_QUERY(%s, '%s'), JSON_VALUE(%s, '%s'))" % ((lhs, json_path) * 2) ), tuple(params) * 2 def as_postgresql(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) if len(key_transforms) > 1: sql = "(%s %s %%s)" % (lhs, self.postgres_nested_operator) return sql, tuple(params) + (key_transforms,) try: lookup = int(self.key_name) except ValueError: lookup = self.key_name return "(%s %s %%s)" % (lhs, self.postgres_operator), tuple(params) + (lookup,) def as_sqlite(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) datatype_values = ",".join( [repr(datatype) for datatype in connection.ops.jsonfield_datatype_values] ) return ( "(CASE WHEN JSON_TYPE(%s, %%s) IN (%s) " "THEN JSON_TYPE(%s, %%s) ELSE JSON_EXTRACT(%s, %%s) END)" ) % (lhs, datatype_values, lhs, lhs), (tuple(params) + (json_path,)) * 3 class KeyTextTransform(KeyTransform): postgres_operator = "->>" postgres_nested_operator = "#>>" output_field = TextField() def as_mysql(self, compiler, connection): if connection.mysql_is_mariadb: # MariaDB doesn't support -> and ->> operators (see MDEV-13594). sql, params = super().as_mysql(compiler, connection) return "JSON_UNQUOTE(%s)" % sql, params else: lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) return "(%s ->> %%s)" % lhs, tuple(params) + (json_path,) @classmethod def from_lookup(cls, lookup): transform, *keys = lookup.split(LOOKUP_SEP) if not keys: raise ValueError("Lookup must contain key or index transforms.") for key in keys: transform = cls(key, transform) return transform KT = KeyTextTransform.from_lookup class KeyTransformTextLookupMixin: """ Mixin for combining with a lookup expecting a text lhs from a JSONField key lookup. On PostgreSQL, make use of the ->> operator instead of casting key values to text and performing the lookup on the resulting representation. """ def __init__(self, key_transform, *args, **kwargs): if not isinstance(key_transform, KeyTransform): raise TypeError( "Transform should be an instance of KeyTransform in order to " "use this lookup." ) key_text_transform = KeyTextTransform( key_transform.key_name, *key_transform.source_expressions, **key_transform.extra, ) super().__init__(key_text_transform, *args, **kwargs) class KeyTransformIsNull(lookups.IsNull): # key__isnull=False is the same as has_key='key' def as_oracle(self, compiler, connection): sql, params = HasKeyOrArrayIndex( self.lhs.lhs, self.lhs.key_name, ).as_oracle(compiler, connection) if not self.rhs: return sql, params # Column doesn't have a key or IS NULL. lhs, lhs_params, _ = self.lhs.preprocess_lhs(compiler, connection) return "(NOT %s OR %s IS NULL)" % (sql, lhs), tuple(params) + tuple(lhs_params) def as_sqlite(self, compiler, connection): template = "JSON_TYPE(%s, %%s) IS NULL" if not self.rhs: template = "JSON_TYPE(%s, %%s) IS NOT NULL" return HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name).as_sql( compiler, connection, template=template, ) class KeyTransformIn(lookups.In): def resolve_expression_parameter(self, compiler, connection, sql, param): sql, params = super().resolve_expression_parameter( compiler, connection, sql, param, ) if ( not hasattr(param, "as_sql") and not connection.features.has_native_json_field ): if connection.vendor == "oracle": value = json.loads(param) sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')" if isinstance(value, (list, dict)): sql %= "JSON_QUERY" else: sql %= "JSON_VALUE" elif connection.vendor == "mysql" or ( connection.vendor == "sqlite" and params[0] not in connection.ops.jsonfield_datatype_values ): sql = "JSON_EXTRACT(%s, '$')" if connection.vendor == "mysql" and connection.mysql_is_mariadb: sql = "JSON_UNQUOTE(%s)" % sql return sql, params class KeyTransformExact(JSONExact): def process_rhs(self, compiler, connection): if isinstance(self.rhs, KeyTransform): return super(lookups.Exact, self).process_rhs(compiler, connection) rhs, rhs_params = super().process_rhs(compiler, connection) if connection.vendor == "oracle": func = [] sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')" for value in rhs_params: value = json.loads(value) if isinstance(value, (list, dict)): func.append(sql % "JSON_QUERY") else: func.append(sql % "JSON_VALUE") rhs %= tuple(func) elif connection.vendor == "sqlite": func = [] for value in rhs_params: if value in connection.ops.jsonfield_datatype_values: func.append("%s") else: func.append("JSON_EXTRACT(%s, '$')") rhs %= tuple(func) return rhs, rhs_params def as_oracle(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) if rhs_params == ["null"]: # Field has key and it's NULL. has_key_expr = HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name) has_key_sql, has_key_params = has_key_expr.as_oracle(compiler, connection) is_null_expr = self.lhs.get_lookup("isnull")(self.lhs, True) is_null_sql, is_null_params = is_null_expr.as_sql(compiler, connection) return ( "%s AND %s" % (has_key_sql, is_null_sql), tuple(has_key_params) + tuple(is_null_params), ) return super().as_sql(compiler, connection) class KeyTransformIExact( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IExact ): pass class KeyTransformIContains( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IContains ): pass class KeyTransformStartsWith(KeyTransformTextLookupMixin, lookups.StartsWith): pass class KeyTransformIStartsWith( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IStartsWith ): pass class KeyTransformEndsWith(KeyTransformTextLookupMixin, lookups.EndsWith): pass class KeyTransformIEndsWith( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IEndsWith ): pass class KeyTransformRegex(KeyTransformTextLookupMixin, lookups.Regex): pass class KeyTransformIRegex( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IRegex ): pass class KeyTransformNumericLookupMixin: def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) if not connection.features.has_native_json_field: rhs_params = [json.loads(value) for value in rhs_params] return rhs, rhs_params class KeyTransformLt(KeyTransformNumericLookupMixin, lookups.LessThan): pass class KeyTransformLte(KeyTransformNumericLookupMixin, lookups.LessThanOrEqual): pass class KeyTransformGt(KeyTransformNumericLookupMixin, lookups.GreaterThan): pass class KeyTransformGte(KeyTransformNumericLookupMixin, lookups.GreaterThanOrEqual): pass KeyTransform.register_lookup(KeyTransformIn) KeyTransform.register_lookup(KeyTransformExact) KeyTransform.register_lookup(KeyTransformIExact) KeyTransform.register_lookup(KeyTransformIsNull) KeyTransform.register_lookup(KeyTransformIContains) KeyTransform.register_lookup(KeyTransformStartsWith) KeyTransform.register_lookup(KeyTransformIStartsWith) KeyTransform.register_lookup(KeyTransformEndsWith) KeyTransform.register_lookup(KeyTransformIEndsWith) KeyTransform.register_lookup(KeyTransformRegex) KeyTransform.register_lookup(KeyTransformIRegex) KeyTransform.register_lookup(KeyTransformLt) KeyTransform.register_lookup(KeyTransformLte) KeyTransform.register_lookup(KeyTransformGt) KeyTransform.register_lookup(KeyTransformGte) class KeyTransformFactory: def __init__(self, key_name): self.key_name = key_name def __call__(self, *args, **kwargs): return KeyTransform(self.key_name, *args, **kwargs)
b1862fbb2b95b8feceee92982efbfe51ccf74e02a2da07cf2ad995fceed7913e
import datetime import posixpath from django import forms from django.core import checks from django.core.files.base import File from django.core.files.images import ImageFile from django.core.files.storage import Storage, default_storage from django.core.files.utils import validate_file_name from django.db.models import signals from django.db.models.fields import Field from django.db.models.query_utils import DeferredAttribute from django.db.models.utils import AltersData from django.utils.translation import gettext_lazy as _ class FieldFile(File, AltersData): def __init__(self, instance, field, name): super().__init__(None, name) self.instance = instance self.field = field self.storage = field.storage self._committed = True def __eq__(self, other): # Older code may be expecting FileField values to be simple strings. # By overriding the == operator, it can remain backwards compatibility. if hasattr(other, "name"): return self.name == other.name return self.name == other def __hash__(self): return hash(self.name) # The standard File contains most of the necessary properties, but # FieldFiles can be instantiated without a name, so that needs to # be checked for here. def _require_file(self): if not self: raise ValueError( "The '%s' attribute has no file associated with it." % self.field.name ) def _get_file(self): self._require_file() if getattr(self, "_file", None) is None: self._file = self.storage.open(self.name, "rb") return self._file def _set_file(self, file): self._file = file def _del_file(self): del self._file file = property(_get_file, _set_file, _del_file) @property def path(self): self._require_file() return self.storage.path(self.name) @property def url(self): self._require_file() return self.storage.url(self.name) @property def size(self): self._require_file() if not self._committed: return self.file.size return self.storage.size(self.name) def open(self, mode="rb"): self._require_file() if getattr(self, "_file", None) is None: self.file = self.storage.open(self.name, mode) else: self.file.open(mode) return self # open() doesn't alter the file's contents, but it does reset the pointer open.alters_data = True # In addition to the standard File API, FieldFiles have extra methods # to further manipulate the underlying file, as well as update the # associated model instance. def save(self, name, content, save=True): name = self.field.generate_filename(self.instance, name) self.name = self.storage.save(name, content, max_length=self.field.max_length) setattr(self.instance, self.field.attname, self.name) self._committed = True # Save the object because it has changed, unless save is False if save: self.instance.save() save.alters_data = True def delete(self, save=True): if not self: return # Only close the file if it's already open, which we know by the # presence of self._file if hasattr(self, "_file"): self.close() del self.file self.storage.delete(self.name) self.name = None setattr(self.instance, self.field.attname, self.name) self._committed = False if save: self.instance.save() delete.alters_data = True @property def closed(self): file = getattr(self, "_file", None) return file is None or file.closed def close(self): file = getattr(self, "_file", None) if file is not None: file.close() def __getstate__(self): # FieldFile needs access to its associated model field, an instance and # the file's name. Everything else will be restored later, by # FileDescriptor below. return { "name": self.name, "closed": False, "_committed": True, "_file": None, "instance": self.instance, "field": self.field, } def __setstate__(self, state): self.__dict__.update(state) self.storage = self.field.storage class FileDescriptor(DeferredAttribute): """ The descriptor for the file attribute on the model instance. Return a FieldFile when accessed so you can write code like:: >>> from myapp.models import MyModel >>> instance = MyModel.objects.get(pk=1) >>> instance.file.size Assign a file object on assignment so you can do:: >>> with open('/path/to/hello.world') as f: ... instance.file = File(f) """ def __get__(self, instance, cls=None): if instance is None: return self # This is slightly complicated, so worth an explanation. # instance.file needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like # instance.file.path and have that delegated to the file storage engine. # Easy enough if we're strict about assignment in __set__, but if you # peek below you can see that we're not. So depending on the current # value of the field we have to dynamically construct some sort of # "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. file = super().__get__(instance, cls) # If this value is a string (instance.file = "path/to/file") or None # then we simply wrap it with the appropriate attribute class according # to the file field. [This is FieldFile for FileFields and # ImageFieldFile for ImageFields; it's also conceivable that user # subclasses might also want to subclass the attribute class]. This # object understands how to convert a path to a file, and also how to # handle None. if isinstance(file, str) or file is None: attr = self.field.attr_class(instance, self.field, file) instance.__dict__[self.field.attname] = attr # Other types of files may be assigned as well, but they need to have # the FieldFile interface added to them. Thus, we wrap any other type of # File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) file_copy.file = file file_copy._committed = False instance.__dict__[self.field.attname] = file_copy # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated # file. So we need to reset the details of the FieldFile in those cases. elif isinstance(file, FieldFile) and not hasattr(file, "field"): file.instance = instance file.field = self.field file.storage = self.field.storage # Make sure that the instance is correct. elif isinstance(file, FieldFile) and instance is not file.instance: file.instance = instance # That was fun, wasn't it? return instance.__dict__[self.field.attname] def __set__(self, instance, value): instance.__dict__[self.field.attname] = value class FileField(Field): # The class to wrap instance attributes in. Accessing the file object off # the instance will always return an instance of attr_class. attr_class = FieldFile # The descriptor to use for accessing the attribute off of the class. descriptor_class = FileDescriptor description = _("File") def __init__( self, verbose_name=None, name=None, upload_to="", storage=None, **kwargs ): self._primary_key_set_explicitly = "primary_key" in kwargs self.storage = storage or default_storage if callable(self.storage): # Hold a reference to the callable for deconstruct(). self._storage_callable = self.storage self.storage = self.storage() if not isinstance(self.storage, Storage): raise TypeError( "%s.storage must be a subclass/instance of %s.%s" % ( self.__class__.__qualname__, Storage.__module__, Storage.__qualname__, ) ) self.upload_to = upload_to kwargs.setdefault("max_length", 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), *self._check_upload_to(), ] def _check_primary_key(self): if self._primary_key_set_explicitly: return [ checks.Error( "'primary_key' is not a valid argument for a %s." % self.__class__.__name__, obj=self, id="fields.E201", ) ] else: return [] def _check_upload_to(self): if isinstance(self.upload_to, str) and self.upload_to.startswith("/"): return [ checks.Error( "%s's 'upload_to' argument must be a relative path, not an " "absolute path." % self.__class__.__name__, obj=self, id="fields.E202", hint="Remove the leading slash.", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 100: del kwargs["max_length"] kwargs["upload_to"] = self.upload_to if self.storage is not default_storage: kwargs["storage"] = getattr(self, "_storage_callable", self.storage) return name, path, args, kwargs def get_internal_type(self): return "FileField" def get_prep_value(self, value): value = super().get_prep_value(value) # Need to convert File objects provided via a form to string for # database insertion. if value is None: return None return str(value) def pre_save(self, model_instance, add): file = super().pre_save(model_instance, add) if file and not file._committed: # Commit the file to storage prior to saving the model file.save(file.name, file.file, save=False) return file def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) setattr(cls, self.attname, self.descriptor_class(self)) def generate_filename(self, instance, filename): """ Apply (if callable) or prepend (if a string) upload_to to the filename, then delegate further processing of the name to the storage backend. Until the storage layer, all file paths are expected to be Unix style (with forward slashes). """ if callable(self.upload_to): filename = self.upload_to(instance, filename) else: dirname = datetime.datetime.now().strftime(str(self.upload_to)) filename = posixpath.join(dirname, filename) filename = validate_file_name(filename, allow_relative_path=True) return self.storage.generate_filename(filename) def save_form_data(self, instance, data): # Important: None means "no change", other false value means "clear" # This subtle distinction (rather than a more explicit marker) is # needed because we need to consume values that are also sane for a # regular (non Model-) Form to find in its cleaned_data dictionary. if data is not None: # This value will be converted to str and stored in the # database, so leaving False as-is is not acceptable. setattr(instance, self.name, data or "") def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.FileField, "max_length": self.max_length, **kwargs, } ) class ImageFileDescriptor(FileDescriptor): """ Just like the FileDescriptor, but for ImageFields. The only difference is assigning the width/height to the width_field/height_field, if appropriate. """ def __set__(self, instance, value): previous_file = instance.__dict__.get(self.field.attname) super().__set__(instance, value) # To prevent recalculating image dimensions when we are instantiating # an object from the database (bug #11084), only update dimensions if # the field had a value before this assignment. Since the default # value for FileField subclasses is an instance of field.attr_class, # previous_file will only be None when we are called from # Model.__init__(). The ImageField.update_dimension_fields method # hooked up to the post_init signal handles the Model.__init__() cases. # Assignment happening outside of Model.__init__() will trigger the # update right here. if previous_file is not None: self.field.update_dimension_fields(instance, force=True) class ImageFieldFile(ImageFile, FieldFile): def delete(self, save=True): # Clear the image dimensions cache if hasattr(self, "_dimensions_cache"): del self._dimensions_cache super().delete(save) class ImageField(FileField): attr_class = ImageFieldFile descriptor_class = ImageFileDescriptor description = _("Image") def __init__( self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs, ): self.width_field, self.height_field = width_field, height_field super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_image_library_installed(), ] def _check_image_library_installed(self): try: from PIL import Image # NOQA except ImportError: return [ checks.Error( "Cannot use ImageField because Pillow is not installed.", hint=( "Get Pillow at https://pypi.org/project/Pillow/ " 'or run command "python -m pip install Pillow".' ), obj=self, id="fields.E210", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.width_field: kwargs["width_field"] = self.width_field if self.height_field: kwargs["height_field"] = self.height_field return name, path, args, kwargs def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) # Attach update_dimension_fields so that dimension fields declared # after their corresponding image field don't stay cleared by # Model.__init__, see bug #11196. # Only run post-initialization dimension update on non-abstract models if not cls._meta.abstract: signals.post_init.connect(self.update_dimension_fields, sender=cls) def update_dimension_fields(self, instance, force=False, *args, **kwargs): """ Update field's width and height fields, if defined. This method is hooked up to model's post_init signal to update dimensions after instantiating a model instance. However, dimensions won't be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method. """ # Nothing to update if the field doesn't have dimension fields or if # the field is deferred. has_dimension_fields = self.width_field or self.height_field if not has_dimension_fields or self.attname not in instance.__dict__: return # getattr will call the ImageFileDescriptor's __get__ method, which # coerces the assigned value into an instance of self.attr_class # (ImageFieldFile in this case). file = getattr(instance, self.attname) # Nothing to update if we have no file and not being forced to update. if not file and not force: return dimension_fields_filled = not ( (self.width_field and not getattr(instance, self.width_field)) or (self.height_field and not getattr(instance, self.height_field)) ) # When both dimension fields have values, we are most likely loading # data from the database or updating an image field that already had # an image stored. In the first case, we don't want to update the # dimension fields because we are already getting their values from the # database. In the second case, we do want to update the dimensions # fields and will skip this return because force will be True since we # were called from ImageFileDescriptor.__set__. if dimension_fields_filled and not force: return # file should be an instance of ImageFieldFile or should be None. if file: width = file.width height = file.height else: # No file, so clear dimensions fields. width = None height = None # Update the width and height fields. if self.width_field: setattr(instance, self.width_field, width) if self.height_field: setattr(instance, self.height_field, height) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.ImageField, **kwargs, } )
da1277aca30de3b97c34e6a0517238b7fdd6ff4eeeb6bf80fea4ceabbd70c9a0
""" Accessors for related objects. When a field defines a relation between two models, each model class provides an attribute to access related instances of the other model class (unless the reverse accessor has been disabled with related_name='+'). Accessors are implemented as descriptors in order to customize access and assignment. This module defines the descriptor classes. Forward accessors follow foreign keys. Reverse accessors trace them back. For example, with the following models:: class Parent(Model): pass class Child(Model): parent = ForeignKey(Parent, related_name='children') ``child.parent`` is a forward many-to-one relation. ``parent.children`` is a reverse many-to-one relation. There are three types of relations (many-to-one, one-to-one, and many-to-many) and two directions (forward and reverse) for a total of six combinations. 1. Related instance on the forward side of a many-to-one relation: ``ForwardManyToOneDescriptor``. Uniqueness of foreign key values is irrelevant to accessing the related instance, making the many-to-one and one-to-one cases identical as far as the descriptor is concerned. The constraint is checked upstream (unicity validation in forms) or downstream (unique indexes in the database). 2. Related instance on the forward side of a one-to-one relation: ``ForwardOneToOneDescriptor``. It avoids querying the database when accessing the parent link field in a multi-table inheritance scenario. 3. Related instance on the reverse side of a one-to-one relation: ``ReverseOneToOneDescriptor``. One-to-one relations are asymmetrical, despite the apparent symmetry of the name, because they're implemented in the database with a foreign key from one table to another. As a consequence ``ReverseOneToOneDescriptor`` is slightly different from ``ForwardManyToOneDescriptor``. 4. Related objects manager for related instances on the reverse side of a many-to-one relation: ``ReverseManyToOneDescriptor``. Unlike the previous two classes, this one provides access to a collection of objects. It returns a manager rather than an instance. 5. Related objects manager for related instances on the forward or reverse sides of a many-to-many relation: ``ManyToManyDescriptor``. Many-to-many relations are symmetrical. The syntax of Django models requires declaring them on one side but that's an implementation detail. They could be declared on the other side without any change in behavior. Therefore the forward and reverse descriptors can be the same. If you're looking for ``ForwardManyToManyDescriptor`` or ``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead. """ from django.core.exceptions import FieldError from django.db import ( DEFAULT_DB_ALIAS, NotSupportedError, connections, router, transaction, ) from django.db.models import Q, Window, signals from django.db.models.functions import RowNumber from django.db.models.lookups import GreaterThan, LessThanOrEqual from django.db.models.query import QuerySet from django.db.models.query_utils import DeferredAttribute from django.db.models.utils import AltersData, resolve_callables from django.utils.functional import cached_property class ForeignKeyDeferredAttribute(DeferredAttribute): def __set__(self, instance, value): if instance.__dict__.get(self.field.attname) != value and self.field.is_cached( instance ): self.field.delete_cached_value(instance) instance.__dict__[self.field.attname] = value def _filter_prefetch_queryset(queryset, field_name, instances): predicate = Q(**{f"{field_name}__in": instances}) db = queryset._db or DEFAULT_DB_ALIAS if queryset.query.is_sliced: if not connections[db].features.supports_over_clause: raise NotSupportedError( "Prefetching from a limited queryset is only supported on backends " "that support window functions." ) low_mark, high_mark = queryset.query.low_mark, queryset.query.high_mark order_by = [ expr for expr, _ in queryset.query.get_compiler(using=db).get_order_by() ] window = Window(RowNumber(), partition_by=field_name, order_by=order_by) predicate &= GreaterThan(window, low_mark) if high_mark is not None: predicate &= LessThanOrEqual(window, high_mark) queryset.query.clear_limits() return queryset.filter(predicate) class ForwardManyToOneDescriptor: """ Accessor to the related object on the forward side of a many-to-one or one-to-one (via ForwardOneToOneDescriptor subclass) relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``Child.parent`` is a ``ForwardManyToOneDescriptor`` instance. """ def __init__(self, field_with_rel): self.field = field_with_rel @cached_property def RelatedObjectDoesNotExist(self): # The exception can't be created at initialization time since the # related model might not be resolved yet; `self.field.model` might # still be a string model reference. return type( "RelatedObjectDoesNotExist", (self.field.remote_field.model.DoesNotExist, AttributeError), { "__module__": self.field.model.__module__, "__qualname__": "%s.%s.RelatedObjectDoesNotExist" % ( self.field.model.__qualname__, self.field.name, ), }, ) def is_cached(self, instance): return self.field.is_cached(instance) def get_queryset(self, **hints): return self.field.remote_field.model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = self.field.get_foreign_related_value instance_attr = self.field.get_local_related_value instances_dict = {instance_attr(inst): inst for inst in instances} related_field = self.field.foreign_related_fields[0] remote_field = self.field.remote_field # FIXME: This will need to be revisited when we introduce support for # composite fields. In the meantime we take this practical approach to # solve a regression on 1.6 when the reverse manager in hidden # (related_name ends with a '+'). Refs #21410. # The check for len(...) == 1 is a special case that allows the query # to be join-less and smaller. Refs #21760. if remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1: query = { "%s__in" % related_field.name: {instance_attr(inst)[0] for inst in instances} } else: query = {"%s__in" % self.field.related_query_name(): instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. if not remote_field.multiple: for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] remote_field.set_cached_value(rel_obj, instance) return ( queryset, rel_obj_attr, instance_attr, True, self.field.get_cache_name(), False, ) def get_object(self, instance): qs = self.get_queryset(instance=instance) # Assuming the database enforces foreign keys, this won't fail. return qs.get(self.field.get_reverse_related_filter(instance)) def __get__(self, instance, cls=None): """ Get the related instance through the forward relation. With the example above, when getting ``child.parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``cls`` is the ``Child`` class (we don't need it) """ if instance is None: return self # The related instance is loaded from the database and then cached # by the field on the model instance state. It can also be pre-cached # by the reverse accessor (ReverseOneToOneDescriptor). try: rel_obj = self.field.get_cached_value(instance) except KeyError: has_value = None not in self.field.get_local_related_value(instance) ancestor_link = ( instance._meta.get_ancestor_link(self.field.model) if has_value else None ) if ancestor_link and ancestor_link.is_cached(instance): # An ancestor link will exist if this field is defined on a # multi-table inheritance parent of the instance's class. ancestor = ancestor_link.get_cached_value(instance) # The value might be cached on an ancestor if the instance # originated from walking down the inheritance chain. rel_obj = self.field.get_cached_value(ancestor, default=None) else: rel_obj = None if rel_obj is None and has_value: rel_obj = self.get_object(instance) remote_field = self.field.remote_field # If this is a one-to-one relation, set the reverse accessor # cache on the related object to the current instance to avoid # an extra SQL query if it's accessed later on. if not remote_field.multiple: remote_field.set_cached_value(rel_obj, instance) self.field.set_cached_value(instance, rel_obj) if rel_obj is None and not self.field.null: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (self.field.model.__name__, self.field.name) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the forward relation. With the example above, when setting ``child.parent = parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``value`` is the ``parent`` instance on the right of the equal sign """ # An object must be an instance of the related class. if value is not None and not isinstance( value, self.field.remote_field.model._meta.concrete_model ): raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.field.name, self.field.remote_field.model._meta.object_name, ) ) elif value is not None: if instance._state.db is None: instance._state.db = router.db_for_write( instance.__class__, instance=value ) if value._state.db is None: value._state.db = router.db_for_write( value.__class__, instance=instance ) if not router.allow_relation(value, instance): raise ValueError( 'Cannot assign "%r": the current database router prevents this ' "relation." % value ) remote_field = self.field.remote_field # If we're setting the value of a OneToOneField to None, we need to clear # out the cache on any old related object. Otherwise, deleting the # previously-related object will also cause this object to be deleted, # which is wrong. if value is None: # Look up the previously-related object, which may still be available # since we've not yet cleared out the related field. # Use the cache directly, instead of the accessor; if we haven't # populated the cache, then we don't care - we're only accessing # the object to invalidate the accessor cache, so there's no # need to populate the cache just to expire it again. related = self.field.get_cached_value(instance, default=None) # If we've got an old related object, we need to clear out its # cache. This cache also might not exist if the related object # hasn't been accessed yet. if related is not None: remote_field.set_cached_value(related, None) for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, None) # Set the values of the related field. else: for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, getattr(value, rh_field.attname)) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. self.field.set_cached_value(instance, value) # If this is a one-to-one relation, set the reverse accessor cache on # the related object to the current instance to avoid an extra SQL # query if it's accessed later on. if value is not None and not remote_field.multiple: remote_field.set_cached_value(value, instance) def __reduce__(self): """ Pickling should return the instance attached by self.field on the model, not a new copy of that descriptor. Use getattr() to retrieve the instance directly from the model. """ return getattr, (self.field.model, self.field.name) class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor): """ Accessor to the related object on the forward side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``Restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance. """ def get_object(self, instance): if self.field.remote_field.parent_link: deferred = instance.get_deferred_fields() # Because it's a parent link, all the data is available in the # instance, so populate the parent model with this data. rel_model = self.field.remote_field.model fields = [field.attname for field in rel_model._meta.concrete_fields] # If any of the related model's fields are deferred, fallback to # fetching all fields from the related model. This avoids a query # on the related model for every deferred field. if not any(field in fields for field in deferred): kwargs = {field: getattr(instance, field) for field in fields} obj = rel_model(**kwargs) obj._state.adding = instance._state.adding obj._state.db = instance._state.db return obj return super().get_object(instance) def __set__(self, instance, value): super().__set__(instance, value) # If the primary key is a link to a parent model and a parent instance # is being set, update the value of the inherited pk(s). if self.field.primary_key and self.field.remote_field.parent_link: opts = instance._meta # Inherited primary key fields from this object's base classes. inherited_pk_fields = [ field for field in opts.concrete_fields if field.primary_key and field.remote_field ] for field in inherited_pk_fields: rel_model_pk_name = field.remote_field.model._meta.pk.attname raw_value = ( getattr(value, rel_model_pk_name) if value is not None else None ) setattr(instance, rel_model_pk_name, raw_value) class ReverseOneToOneDescriptor: """ Accessor to the related object on the reverse side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``Place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance. """ def __init__(self, related): # Following the example above, `related` is an instance of OneToOneRel # which represents the reverse restaurant field (place.restaurant). self.related = related @cached_property def RelatedObjectDoesNotExist(self): # The exception isn't created at initialization time for the sake of # consistency with `ForwardManyToOneDescriptor`. return type( "RelatedObjectDoesNotExist", (self.related.related_model.DoesNotExist, AttributeError), { "__module__": self.related.model.__module__, "__qualname__": "%s.%s.RelatedObjectDoesNotExist" % ( self.related.model.__qualname__, self.related.name, ), }, ) def is_cached(self, instance): return self.related.is_cached(instance) def get_queryset(self, **hints): return self.related.related_model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = self.related.field.get_local_related_value instance_attr = self.related.field.get_foreign_related_value instances_dict = {instance_attr(inst): inst for inst in instances} query = {"%s__in" % self.related.field.name: instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] self.related.field.set_cached_value(rel_obj, instance) return ( queryset, rel_obj_attr, instance_attr, True, self.related.get_cache_name(), False, ) def __get__(self, instance, cls=None): """ Get the related instance through the reverse relation. With the example above, when getting ``place.restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``cls`` is the ``Place`` class (unused) Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ if instance is None: return self # The related instance is loaded from the database and then cached # by the field on the model instance state. It can also be pre-cached # by the forward accessor (ForwardManyToOneDescriptor). try: rel_obj = self.related.get_cached_value(instance) except KeyError: related_pk = instance.pk if related_pk is None: rel_obj = None else: filter_args = self.related.field.get_forward_related_filter(instance) try: rel_obj = self.get_queryset(instance=instance).get(**filter_args) except self.related.related_model.DoesNotExist: rel_obj = None else: # Set the forward accessor cache on the related object to # the current instance to avoid an extra SQL query if it's # accessed later on. self.related.field.set_cached_value(rel_obj, instance) self.related.set_cached_value(instance, rel_obj) if rel_obj is None: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (instance.__class__.__name__, self.related.get_accessor_name()) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the reverse relation. With the example above, when setting ``place.restaurant = restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``value`` is the ``restaurant`` instance on the right of the equal sign Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ # The similarity of the code below to the code in # ForwardManyToOneDescriptor is annoying, but there's a bunch # of small differences that would make a common base class convoluted. if value is None: # Update the cached related instance (if any) & clear the cache. # Following the example above, this would be the cached # ``restaurant`` instance (if any). rel_obj = self.related.get_cached_value(instance, default=None) if rel_obj is not None: # Remove the ``restaurant`` instance from the ``place`` # instance cache. self.related.delete_cached_value(instance) # Set the ``place`` field on the ``restaurant`` # instance to None. setattr(rel_obj, self.related.field.name, None) elif not isinstance(value, self.related.related_model): # An object must be an instance of the related class. raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.related.get_accessor_name(), self.related.related_model._meta.object_name, ) ) else: if instance._state.db is None: instance._state.db = router.db_for_write( instance.__class__, instance=value ) if value._state.db is None: value._state.db = router.db_for_write( value.__class__, instance=instance ) if not router.allow_relation(value, instance): raise ValueError( 'Cannot assign "%r": the current database router prevents this ' "relation." % value ) related_pk = tuple( getattr(instance, field.attname) for field in self.related.field.foreign_related_fields ) # Set the value of the related field to the value of the related # object's related field. for index, field in enumerate(self.related.field.local_related_fields): setattr(value, field.attname, related_pk[index]) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. self.related.set_cached_value(instance, value) # Set the forward accessor cache on the related object to the current # instance to avoid an extra SQL query if it's accessed later on. self.related.field.set_cached_value(value, instance) def __reduce__(self): # Same purpose as ForwardManyToOneDescriptor.__reduce__(). return getattr, (self.related.model, self.related.name) class ReverseManyToOneDescriptor: """ Accessor to the related objects manager on the reverse side of a many-to-one relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``Parent.children`` is a ``ReverseManyToOneDescriptor`` instance. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel): self.rel = rel self.field = rel.field @cached_property def related_manager_cls(self): related_model = self.rel.related_model return create_reverse_many_to_one_manager( related_model._default_manager.__class__, self.rel, ) def __get__(self, instance, cls=None): """ Get the related objects through the reverse relation. With the example above, when getting ``parent.children``: - ``self`` is the descriptor managing the ``children`` attribute - ``instance`` is the ``parent`` instance - ``cls`` is the ``Parent`` class (unused) """ if instance is None: return self return self.related_manager_cls(instance) def _get_set_deprecation_msg_params(self): return ( "reverse side of a related set", self.rel.get_accessor_name(), ) def __set__(self, instance, value): raise TypeError( "Direct assignment to the %s is prohibited. Use %s.set() instead." % self._get_set_deprecation_msg_params(), ) def create_reverse_many_to_one_manager(superclass, rel): """ Create a manager for the reverse side of a many-to-one relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-one relations. """ class RelatedManager(superclass, AltersData): def __init__(self, instance): super().__init__() self.instance = instance self.model = rel.related_model self.field = rel.field self.core_filters = {self.field.name: instance} def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_reverse_many_to_one_manager(manager.__class__, rel) return manager_class(self.instance) do_not_call_in_templates = True def _check_fk_val(self): for field in self.field.foreign_related_fields: if getattr(self.instance, field.attname) is None: raise ValueError( f'"{self.instance!r}" needs to have a value for field ' f'"{field.attname}" before this relationship can be used.' ) def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ db = self._db or router.db_for_read(self.model, instance=self.instance) empty_strings_as_null = connections[ db ].features.interprets_empty_strings_as_nulls queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset._defer_next_filter = True queryset = queryset.filter(**self.core_filters) for field in self.field.foreign_related_fields: val = getattr(self.instance, field.attname) if val is None or (val == "" and empty_strings_as_null): return queryset.none() if self.field.many_to_one: # Guard against field-like objects such as GenericRelation # that abuse create_reverse_many_to_one_manager() with reverse # one-to-many relationships instead and break known related # objects assignment. try: target_field = self.field.target_field except FieldError: # The relationship has multiple target fields. Use a tuple # for related object id. rel_obj_id = tuple( [ getattr(self.instance, target_field.attname) for target_field in self.field.path_infos[-1].target_fields ] ) else: rel_obj_id = getattr(self.instance, target_field.attname) queryset._known_related_objects = { self.field: {rel_obj_id: self.instance} } return queryset def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop( self.field.remote_field.get_cache_name() ) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if self.instance.pk is None: raise ValueError( f"{self.instance.__class__.__name__!r} instance needs to have a " f"primary key value before this relationship can be used." ) try: return self.instance._prefetched_objects_cache[ self.field.remote_field.get_cache_name() ] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) rel_obj_attr = self.field.get_local_related_value instance_attr = self.field.get_foreign_related_value instances_dict = {instance_attr(inst): inst for inst in instances} queryset = _filter_prefetch_queryset(queryset, self.field.name, instances) # Since we just bypassed this class' get_queryset(), we must manage # the reverse relation manually. for rel_obj in queryset: if not self.field.is_cached(rel_obj): instance = instances_dict[rel_obj_attr(rel_obj)] setattr(rel_obj, self.field.name, instance) cache_name = self.field.remote_field.get_cache_name() return queryset, rel_obj_attr, instance_attr, False, cache_name, False def add(self, *objs, bulk=True): self._check_fk_val() self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) def check_and_update_obj(obj): if not isinstance(obj, self.model): raise TypeError( "'%s' instance expected, got %r" % ( self.model._meta.object_name, obj, ) ) setattr(obj, self.field.name, self.instance) if bulk: pks = [] for obj in objs: check_and_update_obj(obj) if obj._state.adding or obj._state.db != db: raise ValueError( "%r instance isn't saved. Use bulk=False or save " "the object first." % obj ) pks.append(obj.pk) self.model._base_manager.using(db).filter(pk__in=pks).update( **{ self.field.name: self.instance, } ) else: with transaction.atomic(using=db, savepoint=False): for obj in objs: check_and_update_obj(obj) obj.save() add.alters_data = True def create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).create(**kwargs) create.alters_data = True def get_or_create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs) get_or_create.alters_data = True def update_or_create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs) update_or_create.alters_data = True # remove() and clear() are only provided if the ForeignKey can have a # value of null. if rel.field.null: def remove(self, *objs, bulk=True): if not objs: return self._check_fk_val() val = self.field.get_foreign_related_value(self.instance) old_ids = set() for obj in objs: if not isinstance(obj, self.model): raise TypeError( "'%s' instance expected, got %r" % ( self.model._meta.object_name, obj, ) ) # Is obj actually part of this descriptor set? if self.field.get_local_related_value(obj) == val: old_ids.add(obj.pk) else: raise self.field.remote_field.model.DoesNotExist( "%r is not related to %r." % (obj, self.instance) ) self._clear(self.filter(pk__in=old_ids), bulk) remove.alters_data = True def clear(self, *, bulk=True): self._check_fk_val() self._clear(self, bulk) clear.alters_data = True def _clear(self, queryset, bulk): self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) queryset = queryset.using(db) if bulk: # `QuerySet.update()` is intrinsically atomic. queryset.update(**{self.field.name: None}) else: with transaction.atomic(using=db, savepoint=False): for obj in queryset: setattr(obj, self.field.name, None) obj.save(update_fields=[self.field.name]) _clear.alters_data = True def set(self, objs, *, bulk=True, clear=False): self._check_fk_val() # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) if self.field.null: db = router.db_for_write(self.model, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear(bulk=bulk) self.add(*objs, bulk=bulk) else: old_objs = set(self.using(db).all()) new_objs = [] for obj in objs: if obj in old_objs: old_objs.remove(obj) else: new_objs.append(obj) self.remove(*old_objs, bulk=bulk) self.add(*new_objs, bulk=bulk) else: self.add(*objs, bulk=bulk) set.alters_data = True return RelatedManager class ManyToManyDescriptor(ReverseManyToOneDescriptor): """ Accessor to the related objects manager on the forward and reverse sides of a many-to-many relation. In the example:: class Pizza(Model): toppings = ManyToManyField(Topping, related_name='pizzas') ``Pizza.toppings`` and ``Topping.pizzas`` are ``ManyToManyDescriptor`` instances. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel, reverse=False): super().__init__(rel) self.reverse = reverse @property def through(self): # through is provided so that you have easy access to the through # model (Book.authors.through) for inlines, etc. This is done as # a property to ensure that the fully resolved value is returned. return self.rel.through @cached_property def related_manager_cls(self): related_model = self.rel.related_model if self.reverse else self.rel.model return create_forward_many_to_many_manager( related_model._default_manager.__class__, self.rel, reverse=self.reverse, ) def _get_set_deprecation_msg_params(self): return ( "%s side of a many-to-many set" % ("reverse" if self.reverse else "forward"), self.rel.get_accessor_name() if self.reverse else self.field.name, ) def create_forward_many_to_many_manager(superclass, rel, reverse): """ Create a manager for the either side of a many-to-many relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-many relations. """ class ManyRelatedManager(superclass, AltersData): def __init__(self, instance=None): super().__init__() self.instance = instance if not reverse: self.model = rel.model self.query_field_name = rel.field.related_query_name() self.prefetch_cache_name = rel.field.name self.source_field_name = rel.field.m2m_field_name() self.target_field_name = rel.field.m2m_reverse_field_name() self.symmetrical = rel.symmetrical else: self.model = rel.related_model self.query_field_name = rel.field.name self.prefetch_cache_name = rel.field.related_query_name() self.source_field_name = rel.field.m2m_reverse_field_name() self.target_field_name = rel.field.m2m_field_name() self.symmetrical = False self.through = rel.through self.reverse = reverse self.source_field = self.through._meta.get_field(self.source_field_name) self.target_field = self.through._meta.get_field(self.target_field_name) self.core_filters = {} self.pk_field_names = {} for lh_field, rh_field in self.source_field.related_fields: core_filter_key = "%s__%s" % (self.query_field_name, rh_field.name) self.core_filters[core_filter_key] = getattr(instance, rh_field.attname) self.pk_field_names[lh_field.name] = rh_field.name self.related_val = self.source_field.get_foreign_related_value(instance) if None in self.related_val: raise ValueError( '"%r" needs to have a value for field "%s" before ' "this many-to-many relationship can be used." % (instance, self.pk_field_names[self.source_field_name]) ) # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if instance.pk is None: raise ValueError( "%r instance needs to have a primary key value before " "a many-to-many relationship can be used." % instance.__class__.__name__ ) def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_forward_many_to_many_manager( manager.__class__, rel, reverse ) return manager_class(instance=self.instance) do_not_call_in_templates = True def _build_remove_filters(self, removed_vals): filters = Q.create([(self.source_field_name, self.related_val)]) # No need to add a subquery condition if removed_vals is a QuerySet without # filters. removed_vals_filters = ( not isinstance(removed_vals, QuerySet) or removed_vals._has_filters() ) if removed_vals_filters: filters &= Q.create([(f"{self.target_field_name}__in", removed_vals)]) if self.symmetrical: symmetrical_filters = Q.create( [(self.target_field_name, self.related_val)] ) if removed_vals_filters: symmetrical_filters &= Q.create( [(f"{self.source_field_name}__in", removed_vals)] ) filters |= symmetrical_filters return filters def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset._defer_next_filter = True return queryset._next_is_sticky().filter(**self.core_filters) def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) queryset = _filter_prefetch_queryset( queryset._next_is_sticky(), self.query_field_name, instances ) # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that # there will already be a join on the join table, so we can just add # the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. fk = self.through._meta.get_field(self.source_field_name) join_table = fk.model._meta.db_table connection = connections[queryset.db] qn = connection.ops.quote_name queryset = queryset.extra( select={ "_prefetch_related_val_%s" % f.attname: "%s.%s" % (qn(join_table), qn(f.column)) for f in fk.local_related_fields } ) return ( queryset, lambda result: tuple( getattr(result, "_prefetch_related_val_%s" % f.attname) for f in fk.local_related_fields ), lambda inst: tuple( f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields ), False, self.prefetch_cache_name, False, ) def add(self, *objs, through_defaults=None): self._remove_prefetched_objects() db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): self._add_items( self.source_field_name, self.target_field_name, *objs, through_defaults=through_defaults, ) # If this is a symmetrical m2m relation to self, add the mirror # entry in the m2m table. if self.symmetrical: self._add_items( self.target_field_name, self.source_field_name, *objs, through_defaults=through_defaults, ) add.alters_data = True def remove(self, *objs): self._remove_prefetched_objects() self._remove_items(self.source_field_name, self.target_field_name, *objs) remove.alters_data = True def clear(self): db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): signals.m2m_changed.send( sender=self.through, action="pre_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) self._remove_prefetched_objects() filters = self._build_remove_filters(super().get_queryset().using(db)) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) clear.alters_data = True def set(self, objs, *, clear=False, through_defaults=None): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, through_defaults=through_defaults) else: old_ids = set( self.using(db).values_list( self.target_field.target_field.attname, flat=True ) ) new_objs = [] for obj in objs: fk_val = ( self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else self.target_field.get_prep_value(obj) ) if fk_val in old_ids: old_ids.remove(fk_val) else: new_objs.append(obj) self.remove(*old_ids) self.add(*new_objs, through_defaults=through_defaults) set.alters_data = True def create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs) self.add(new_obj, through_defaults=through_defaults) return new_obj create.alters_data = True def get_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create( **kwargs ) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created get_or_create.alters_data = True def update_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super( ManyRelatedManager, self.db_manager(db) ).update_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created update_or_create.alters_data = True def _get_target_ids(self, target_field_name, objs): """ Return the set of ids of `objs` that the target field references. """ from django.db.models import Model target_ids = set() target_field = self.through._meta.get_field(target_field_name) for obj in objs: if isinstance(obj, self.model): if not router.allow_relation(obj, self.instance): raise ValueError( 'Cannot add "%r": instance is on database "%s", ' 'value is on database "%s"' % (obj, self.instance._state.db, obj._state.db) ) target_id = target_field.get_foreign_related_value(obj)[0] if target_id is None: raise ValueError( 'Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name) ) target_ids.add(target_id) elif isinstance(obj, Model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) else: target_ids.add(target_field.get_prep_value(obj)) return target_ids def _get_missing_target_ids( self, source_field_name, target_field_name, db, target_ids ): """ Return the subset of ids of `objs` that aren't already assigned to this relationship. """ vals = ( self.through._default_manager.using(db) .values_list(target_field_name, flat=True) .filter( **{ source_field_name: self.related_val[0], "%s__in" % target_field_name: target_ids, } ) ) return target_ids.difference(vals) def _get_add_plan(self, db, source_field_name): """ Return a boolean triple of the way the add should be performed. The first element is whether or not bulk_create(ignore_conflicts) can be used, the second whether or not signals must be sent, and the third element is whether or not the immediate bulk insertion with conflicts ignored can be performed. """ # Conflicts can be ignored when the intermediary model is # auto-created as the only possible collision is on the # (source_id, target_id) tuple. The same assertion doesn't hold for # user-defined intermediary models as they could have other fields # causing conflicts which must be surfaced. can_ignore_conflicts = ( self.through._meta.auto_created is not False and connections[db].features.supports_ignore_conflicts ) # Don't send the signal when inserting duplicate data row # for symmetrical reverse entries. must_send_signals = ( self.reverse or source_field_name == self.source_field_name ) and (signals.m2m_changed.has_listeners(self.through)) # Fast addition through bulk insertion can only be performed # if no m2m_changed listeners are connected for self.through # as they require the added set of ids to be provided via # pk_set. return ( can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and not must_send_signals), ) def _add_items( self, source_field_name, target_field_name, *objs, through_defaults=None ): # source_field_name: the PK fieldname in join table for the source object # target_field_name: the PK fieldname in join table for the target object # *objs - objects to add. Either object instances, or primary keys # of object instances. if not objs: return through_defaults = dict(resolve_callables(through_defaults or {})) target_ids = self._get_target_ids(target_field_name, objs) db = router.db_for_write(self.through, instance=self.instance) can_ignore_conflicts, must_send_signals, can_fast_add = self._get_add_plan( db, source_field_name ) if can_fast_add: self.through._default_manager.using(db).bulk_create( [ self.through( **{ "%s_id" % source_field_name: self.related_val[0], "%s_id" % target_field_name: target_id, } ) for target_id in target_ids ], ignore_conflicts=True, ) return missing_target_ids = self._get_missing_target_ids( source_field_name, target_field_name, db, target_ids ) with transaction.atomic(using=db, savepoint=False): if must_send_signals: signals.m2m_changed.send( sender=self.through, action="pre_add", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) # Add the ones that aren't there already. self.through._default_manager.using(db).bulk_create( [ self.through( **through_defaults, **{ "%s_id" % source_field_name: self.related_val[0], "%s_id" % target_field_name: target_id, }, ) for target_id in missing_target_ids ], ignore_conflicts=can_ignore_conflicts, ) if must_send_signals: signals.m2m_changed.send( sender=self.through, action="post_add", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) def _remove_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK colname in join table for the source object # target_field_name: the PK colname in join table for the target object # *objs - objects to remove. Either object instances, or primary # keys of object instances. if not objs: return # Check that all the objects are of the right type old_ids = set() for obj in objs: if isinstance(obj, self.model): fk_val = self.target_field.get_foreign_related_value(obj)[0] old_ids.add(fk_val) else: old_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): # Send a signal to the other end if need be. signals.m2m_changed.send( sender=self.through, action="pre_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) target_model_qs = super().get_queryset() if target_model_qs._has_filters(): old_vals = target_model_qs.using(db).filter( **{"%s__in" % self.target_field.target_field.attname: old_ids} ) else: old_vals = old_ids filters = self._build_remove_filters(old_vals) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) return ManyRelatedManager
e8e8149c1994e52f53bd4e774c3d4ec6e8d79d1cba3661a2d594629e5b474fc2
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import copy import difflib import functools import sys from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import FieldDoesNotExist, FieldError from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import ( BaseExpression, Col, Exists, F, OuterRef, Ref, ResolvedOuterRef, Value, ) from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode from django.utils.functional import cached_property from django.utils.regex_helper import _lazy_re_compile from django.utils.tree import Node __all__ = ["Query", "RawQuery"] # Quotation marks ('"`[]), whitespace characters, semicolons, or inline # SQL comments are forbidden in column aliases. FORBIDDEN_ALIAS_PATTERN = _lazy_re_compile(r"['`\"\]\[;\s]|--|/\*|\*/") # Inspired from # https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS EXPLAIN_OPTIONS_PATTERN = _lazy_re_compile(r"[\w\-]+") def get_field_names_from_opts(opts): if opts is None: return set() return set( chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() ) ) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( "JoinInfo", ("final_field", "targets", "opts", "joins", "path", "transform_function"), ) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=()): self.params = params self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): if self.params is None: return None return dict if isinstance(self.params, Mapping) else tuple def __str__(self): if self.params_type is None: return self.sql return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} elif params_type is None: params = None else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) ExplainInfo = namedtuple("ExplainInfo", ("format", "options")) class Query(BaseExpression): """A single SQL query.""" alias_prefix = "T" empty_result_set_value = None subq_aliases = frozenset([alias_prefix]) compiler = "SQLCompiler" base_table_class = BaseTable join_class = Join default_cols = True default_ordering = True standard_ordering = True filter_is_sticky = False subquery = False # SQL-related attributes. # Select and related select clauses are expressions to use in the SELECT # clause of the query. The select is used for cases where we want to set up # the select clause to contain other than default fields (values(), # subqueries...). Note that annotations go to annotations dictionary. select = () # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. group_by = None order_by = () low_mark = 0 # Used for offset/limit. high_mark = None # Used for offset/limit. distinct = False distinct_fields = () select_for_update = False select_for_update_nowait = False select_for_update_skip_locked = False select_for_update_of = () select_for_no_key_update = False select_related = False has_select_fields = False # Arbitrary limit for select_related to prevents infinite recursion. max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. values_select = () # SQL annotation-related attributes. annotation_select_mask = None _annotation_select_cache = None # Set combination attributes. combinator = None combinator_all = False combined_queries = () # These are for extensions. The contents are more or less appended verbatim # to the appropriate clause. extra_select_mask = None _extra_select_cache = None extra_tables = () extra_order_by = () # A tuple that is a set of model field names and either True, if these are # the fields to defer, or False if these are the only fields to load. deferred_loading = (frozenset(), True) explain_info = None def __init__(self, model, alias_cols=True): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Whether to provide alias to columns during reference resolving. self.alias_cols = alias_cols # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. # Map external tables to whether they are aliased. self.external_aliases = {} self.table_map = {} # Maps table names to list of aliases. self.used_aliases = set() self.where = WhereNode() # Maps alias -> Annotation Expression. self.annotations = {} # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self._filtered_relations = {} @property def output_field(self): if len(self.select) == 1: select = self.select[0] return getattr(select, "target", None) or select.field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None, elide_empty=True): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)( self, connection, using, elide_empty ) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ if self.model: return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is not None: obj.annotation_select_mask = self.annotation_select_mask.copy() if self.combined_queries: obj.combined_queries = tuple( [query.clone() for query in self.combined_queries] ) # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is not None: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is not None: obj._extra_select_cache = self._extra_select_cache.copy() if self.select_related is not False: # Use deepcopy because select_related stores fields in nested # dicts. obj.select_related = copy.deepcopy(obj.select_related) if "subq_aliases" in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property, if it exists. obj.__dict__.pop("base_table", None) return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, "_setup_query"): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def _get_col(self, target, field, alias): if not self.alias_cols: alias = None return target.get_col(alias, field) def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) else: # Reuse aliases of expressions already selected in subquery. for col_alias, selected_annotation in self.annotation_select.items(): if selected_annotation is expr: new_expr = Ref(col_alias, expr) break else: # An expression that is not selected the subquery. if isinstance(expr, Col) or ( expr.contains_aggregate and not expr.is_summary ): # Reference column or another aggregate. Select it # under a non-conflicting alias. col_cnt += 1 col_alias = "__col%d" % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_expr = Ref(col_alias, expr) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} existing_annotations = [ annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ] # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if ( isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or self.distinct or self.combinator ): from django.db.models.sql.subqueries import AggregateQuery inner_query = self.clone() inner_query.subquery = True outer_query = AggregateQuery(self.model, inner_query) inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) # Queries with distinct_fields need ordering and when a limit is # applied we must take the slice from the ordered query. Otherwise # no need for ordering. inner_query.clear_ordering(force=False) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. has_existing_aggregate_annotations = any( annotation for annotation in existing_annotations if getattr(annotation, "contains_aggregate", True) ) if inner_query.default_cols and has_existing_aggregate_annotations: inner_query.group_by = ( self.model._meta.pk.get_col(inner_query.get_initial_alias()), ) inner_query.default_cols = False relabels = {t: "subquery" for t in inner_query.alias_map} relabels[None] = "subquery" # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): annotation_select_mask = inner_query.annotation_select_mask if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone( relabels ) del inner_query.annotations[alias] annotation_select_mask.remove(alias) # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if ( inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask ): # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = ( self.model._meta.pk.get_col(inner_query.get_initial_alias()), ) else: outer_query = self self.select = () self.default_cols = False self.extra = {} empty_set_result = [ expression.empty_result_set_value for expression in outer_query.annotation_select.values() ] elide_empty = not any(result is NotImplemented for result in empty_set_result) outer_query.clear_ordering(force=True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using, elide_empty=elide_empty) result = compiler.execute_sql(SINGLE) if result is None: result = empty_set_result converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count("*"), alias="__count", is_summary=True) return obj.get_aggregation(using, ["__count"])["__count"] def has_filters(self): return self.where def exists(self, limit=True): q = self.clone() if not (q.distinct and q.is_sliced): if q.group_by is True: q.add_fields( (f.attname for f in self.model._meta.concrete_fields), False ) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. q.set_group_by(allow_aliases=False) q.clear_select_clause() if q.combined_queries and q.combinator == "union": q.combined_queries = tuple( combined_query.exists(limit=False) for combined_query in q.combined_queries ) q.clear_ordering(force=True) if limit: q.set_limits(high=1) q.add_annotation(Value(1), "a") return q def has_results(self, using): q = self.exists(using) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() for option_name in options: if ( not EXPLAIN_OPTIONS_PATTERN.fullmatch(option_name) or "--" in option_name ): raise ValueError(f"Invalid option name: {option_name!r}.") q.explain_info = ExplainInfo(format, options) compiler = q.get_compiler(using=using) return "\n".join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ if self.model != rhs.model: raise TypeError("Cannot combine queries on two different base models.") if self.is_sliced: raise TypeError("Cannot combine queries once a slice has been taken.") if self.distinct != rhs.distinct: raise TypeError("Cannot combine a unique query with a non-unique query.") if self.distinct_fields != rhs.distinct_fields: raise TypeError("Cannot combine queries with different distinct fields.") # If lhs and rhs shares the same alias prefix, it is possible to have # conflicting alias changes like T4 -> T5, T5 -> T6, which might end up # as T4 -> T6 while combining two querysets. To prevent this, change an # alias prefix of the rhs and update current aliases accordingly, # except if the alias is the base table since it must be present in the # query on both sides. initial_alias = self.get_initial_alias() rhs.bump_prefix(self, exclude={initial_alias}) # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = connector == AND # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER ) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Combine subqueries aliases to ensure aliases relabelling properly # handle subqueries when combining where and select clauses. self.subq_aliases |= rhs.subq_aliases # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError( "When merging querysets using 'or', you cannot have " "extra(select=...) on both sides." ) self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def _get_defer_select_mask(self, opts, mask, select_mask=None): if select_mask is None: select_mask = {} select_mask[opts.pk] = {} # All concrete fields that are not part of the defer mask must be # loaded. If a relational field is encountered it gets added to the # mask for it be considered if `select_related` and the cycle continues # by recursively caling this function. for field in opts.concrete_fields: field_mask = mask.pop(field.name, None) if field_mask is None: select_mask.setdefault(field, {}) elif field_mask: if not field.is_relation: raise FieldError(next(iter(field_mask))) field_select_mask = select_mask.setdefault(field, {}) related_model = field.remote_field.model._meta.concrete_model self._get_defer_select_mask( related_model._meta, field_mask, field_select_mask ) # Remaining defer entries must be references to reverse relationships. # The following code is expected to raise FieldError if it encounters # a malformed defer entry. for field_name, field_mask in mask.items(): if filtered_relation := self._filtered_relations.get(field_name): relation = opts.get_field(filtered_relation.relation_name) field_select_mask = select_mask.setdefault((field_name, relation), {}) field = relation.field else: field = opts.get_field(field_name).field field_select_mask = select_mask.setdefault(field, {}) related_model = field.model._meta.concrete_model self._get_defer_select_mask( related_model._meta, field_mask, field_select_mask ) return select_mask def _get_only_select_mask(self, opts, mask, select_mask=None): if select_mask is None: select_mask = {} select_mask[opts.pk] = {} # Only include fields mentioned in the mask. for field_name, field_mask in mask.items(): field = opts.get_field(field_name) field_select_mask = select_mask.setdefault(field, {}) if field_mask: if not field.is_relation: raise FieldError(next(iter(field_mask))) related_model = field.remote_field.model._meta.concrete_model self._get_only_select_mask( related_model._meta, field_mask, field_select_mask ) return select_mask def get_select_mask(self): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. """ field_names, defer = self.deferred_loading if not field_names: return {} mask = {} for field_name in field_names: part_mask = mask for part in field_name.split(LOOKUP_SEP): part_mask = part_mask.setdefault(part, {}) opts = self.get_meta() if defer: return self._get_defer_select_mask(opts, mask) return self._get_only_select_mask(opts, mask) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = ( filtered_relation.alias if filtered_relation is not None else table_name ) self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = ( parent_alias and self.alias_map[parent_alias].join_type == LOUTER ) already_louter = self.alias_map[alias].join_type == LOUTER if (self.alias_map[alias].nullable or parent_louter) and not already_louter: self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ # If keys and values of change_map were to intersect, an alias might be # updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending # on their order in change_map. assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple( [col.relabeled_clone(change_map) for col in self.group_by] ) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = { # Table is aliased or it's being changed and thus is aliased. change_map.get(alias, alias): (aliased or alias in change_map) for alias, aliased in self.external_aliases.items() } def bump_prefix(self, other_query, exclude=None): """ Change the alias prefix to the next letter in the alphabet in a way that the other query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. To prevent changing aliases use the exclude parameter. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet for s in product(seq, repeat=n): yield "".join(s) prefix = None if self.alias_prefix != other_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( "Maximum recursion depth exceeded: too many subqueries." ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases) if exclude is None: exclude = {} self.change_aliases( { alias: "%s%d" % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) if alias not in exclude } ) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) elif self.model: alias = self.join(self.base_table_class(self.get_meta().db_table, None)) else: alias = None return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a base_table_class or join_class. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias( join.table_name, create=True, filtered_relation=join.filtered_relation ) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def check_alias(self, alias): if FORBIDDEN_ALIAS_PATTERN.search(alias): raise ValueError( "Column aliases cannot contain whitespace characters, quotation marks, " "semicolons, or SQL comments." ) def add_annotation(self, annotation, alias, is_summary=False, select=True): """Add a single annotation expression to the Query.""" self.check_alias(alias) annotation = annotation.resolve_expression( self, allow_joins=True, reuse=None, summarize=is_summary ) if select: self.append_annotation_mask([alias]) else: self.set_annotation_mask(set(self.annotation_select).difference({alias})) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True clone.where.resolve_expression(query, *args, **kwargs) # Resolve combined queries. if clone.combinator: clone.combined_queries = tuple( [ combined_query.resolve_expression(query, *args, **kwargs) for combined_query in clone.combined_queries ] ) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, "external_aliases"): resolved.external_aliases.update(clone.external_aliases) clone.annotations[key] = resolved # Outer query's aliases are considered external. for alias, table in query.alias_map.items(): clone.external_aliases[alias] = ( isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias ) or ( isinstance(table, BaseTable) and table.table_name != table.table_alias ) return clone def get_external_cols(self): exprs = chain(self.annotations.values(), self.where.children) return [ col for col in self._gen_cols(exprs, include_external=True) if col.alias in self.external_aliases ] def get_group_by_cols(self, wrapper=None): # If wrapper is referenced by an alias for an explicit GROUP BY through # values() a reference to this expression and not the self must be # returned to ensure external column references are not grouped against # as well. external_cols = self.get_external_cols() if any(col.possibly_multivalued for col in external_cols): return [wrapper or self] return external_cols def as_sql(self, compiler, connection): # Some backends (e.g. Oracle) raise an error when a subquery contains # unnecessary ORDER BY clause. if ( self.subquery and not connection.features.ignores_unnecessary_order_by_in_subqueries ): self.clear_ordering(force=False) for query in self.combined_queries: query.clear_ordering(force=False) sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = "(%s)" % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins): if hasattr(value, "resolve_expression"): value = value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, ) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. values = ( self.resolve_lookup_value(sub_value, can_reuse, allow_joins) for sub_value in value ) type_ = type(value) if hasattr(type_, "_make"): # namedtuple return type_(*values) return type_(values) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: expression, expression_lookups = refs_expression( lookup_splitted, self.annotations ) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, "_meta"): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name) ) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if ( isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field) ): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, "_meta"): self.check_query_object_type(value, opts, field) elif hasattr(value, "__iter__"): for v in value: self.check_query_object_type(v, opts, field) def check_filterable(self, expression): """Raise an error if expression cannot be used in a WHERE clause.""" if hasattr(expression, "resolve_expression") and not getattr( expression, "filterable", True ): raise NotSupportedError( expression.__class__.__name__ + " is disallowed in the filter " "clause." ) if hasattr(expression, "get_source_expressions"): for expr in expression.get_source_expressions(): self.check_filterable(expr) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ["exact"] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = "exact" lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ("exact", "iexact"): raise ValueError("Cannot use None as a query value") return lhs.get_lookup("isnull")(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if ( lookup_name == "exact" and lookup.rhs == "" and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls ): return lhs.get_lookup("isnull")(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches( name, output_field.get_lookups() ) if suggested_lookups: suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups) else: suggestion = "." raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter( self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False, check_filterable=True, ): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") if isinstance(filter_expr, Q): return self._add_q( filter_expr, branch_negated=branch_negated, current_negated=current_negated, used_aliases=can_reuse, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) if hasattr(filter_expr, "resolve_expression"): if not getattr(filter_expr, "conditional", False): raise TypeError("Cannot filter against a non-conditional expression.") condition = filter_expr.resolve_expression(self, allow_joins=allow_joins) if not isinstance(condition, Lookup): condition = self.build_lookup(["exact"], condition, True) return WhereNode([condition], connector=AND), [] arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) if check_filterable: self.check_filterable(reffed_expression) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins) used_joins = { k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0) } if check_filterable: self.check_filterable(value) if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) return WhereNode([condition], connector=AND), [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins( join_info.targets, join_info.joins, join_info.path ) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: if len(targets) == 1: col = self._get_col(targets[0], join_info.final_field, alias) else: col = MultiColSource( alias, targets, join_info.targets, join_info.final_field ) else: col = self._get_col(targets[0], join_info.final_field, alias) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause = WhereNode([condition], connector=AND) require_outer = ( lookup_type == "isnull" and condition.rhs is True and not current_negated ) if ( current_negated and (lookup_type != "isnull" or condition.rhs is False) and condition.rhs is not None ): require_outer = True if lookup_type != "isnull": # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). if ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER ): lookup_class = targets[0].get_lookup("isnull") col = self._get_col(targets[0], join_info.targets[0], alias) clause.add(lookup_class(col, False), AND) # If someval is a nullable column, someval IS NOT NULL is # added. if isinstance(value, Col) and self.is_nullable(value.target): lookup_class = value.target.get_lookup("isnull") clause.add(lookup_class(value, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_lhs, filter_rhs): self.add_q(Q((filter_lhs, filter_rhs))) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = { a for a in self.alias_map if self.alias_map[a].join_type == INNER } clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, filter_expr): return self.build_filter(filter_expr, allow_joins=False)[0] def clear_where(self): self.where = WhereNode() def _add_q( self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, check_filterable=True, ): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = WhereNode(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter( q_object.connector, len(q_object.children), current_negated ) for child in q_object.children: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q( self, q_object, reuse, branch_negated=False, current_negated=False ): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = WhereNode(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type( filtered_relation.relation_name ) if relation_lookup_parts: raise ValueError( "FilteredRelation's relation_name cannot contain lookups " "(got %r)." % filtered_relation.relation_name ) for lookup in chain(lookups): lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 lookup_field_path = lookup_field_parts[:-shift] for idx, lookup_field_part in enumerate(lookup_field_path): if len(relation_field_parts) > idx: if relation_field_parts[idx] != lookup_field_part: raise ValueError( "FilteredRelation's condition doesn't support " "relations outside the %r (got %r)." % (filtered_relation.relation_name, lookup) ) else: raise ValueError( "FilteredRelation's condition doesn't support nested " "relations deeper than the relation_name (got %r for " "%r)." % (lookup, filtered_relation.relation_name) ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == "pk": name = opts.pk.name field = None filtered_relation = None try: if opts is None: raise FieldDoesNotExist field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] if LOOKUP_SEP in filtered_relation.relation_name: parts = filtered_relation.relation_name.split(LOOKUP_SEP) filtered_relation_path, field, _, _ = self.names_to_path( parts, opts, allow_many, fail_on_missing, ) path.extend(filtered_relation_path[:-1]) else: field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted( [ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ] ) raise FieldError( "Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available)) ) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if opts is not None and model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, "path_infos"): if filtered_relation: pathinfos = field.get_path_info(filtered_relation) else: pathinfos = field.path_infos if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name) ) break return path, final_field, targets, names[pos + 1 :] def setup_joins( self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False, ): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): if not self.alias_cols: alias = None return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial( transform, name=name, previous=final_transformer ) final_transformer.has_transforms = True # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = self.join_class( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = { r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets } targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins @classmethod def _gen_cols(cls, exprs, include_external=False): for expr in exprs: if isinstance(expr, Col): yield expr elif include_external and callable( getattr(expr, "get_external_cols", None) ): yield from expr.get_external_cols() elif hasattr(expr, "get_source_expressions"): yield from cls._gen_cols( expr.get_source_expressions(), include_external=include_external, ) @classmethod def _gen_col_aliases(cls, exprs): yield from (expr.alias for expr in cls._gen_cols(exprs)) def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False): annotation = self.annotations.get(name) if annotation is not None: if not allow_joins: for alias in self._gen_col_aliases([annotation]): if isinstance(self.alias_map[alias], Join): raise FieldError( "Joined field references are not permitted in this query" ) if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. if name not in self.annotation_select: raise FieldError( "Cannot aggregate over the '%s' alias. Use annotate() " "to promote it." % name ) return Ref(name, self.annotation_select[name]) else: return annotation else: field_list = name.split(LOOKUP_SEP) annotation = self.annotations.get(field_list[0]) if annotation is not None: for transform in field_list[1:]: annotation = self.try_transform(annotation, transform) return annotation join_info = self.setup_joins( field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse ) targets, final_alias, join_list = self.trim_joins( join_info.targets, join_info.joins, join_info.path ) if not allow_joins and len(join_list) > 1: raise FieldError( "Joined field references are not permitted in this query" ) if len(targets) > 1: raise FieldError( "Referencing multicolumn fields with F() objects isn't supported" ) # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. transform = join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) return transform def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT EXISTS( SELECT 1 FROM child WHERE name = 'foo' AND child.parent_id = parent.id LIMIT 1 ) """ # Generate the inner query. query = self.__class__(self.model) query._filtered_relations = self._filtered_relations filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, OuterRef): filter_rhs = OuterRef(filter_rhs) elif isinstance(filter_rhs, F): filter_rhs = OuterRef(filter_rhs.name) query.add_filter(filter_lhs, filter_rhs) query.clear_ordering(force=True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) col = query.select[0] select_field = col.target alias = col.alias if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup("exact") # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases[alias] = True lookup_class = select_field.get_lookup("exact") lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix)) query.where.add(lookup, AND) condition, needed_inner = self.build_filter(Exists(query)) if contains_louter: or_null_condition, _ = self.build_filter( ("%s__isnull" % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse, ) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) for query in self.combined_queries: query.set_empty() def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None @property def is_sliced(self): return self.low_mark != 0 or self.high_mark is not None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.is_sliced def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col, name): self.select += (col,) self.values_select += (name,) def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins( name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m ) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise elif name in self.annotations: raise FieldError( "Cannot select the '%s' alias. Use annotate() to promote " "it." % name ) else: names = sorted( [ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations, ] ) raise FieldError( "Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names)) ) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if isinstance(item, str): if item == "?": continue if item.startswith("-"): item = item[1:] if item in self.annotations: continue if self.extra and item in self.extra: continue # names_to_path() validates the lookup. A descriptive # FieldError will be raise if it's not. self.names_to_path(item.split(LOOKUP_SEP), self.model._meta) elif not hasattr(item, "resolve_expression"): errors.append(item) if getattr(item, "contains_aggregate", False): raise FieldError( "Using an aggregate in order_by() without also including " "it in annotate() is not allowed: %s" % item ) if errors: raise FieldError("Invalid order_by arguments: %s" % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force=False, clear_default=True): """ Remove any ordering settings if the current query allows it without side effects, set 'force' to True to clear the ordering regardless. If 'clear_default' is True, there will be no ordering in the resulting query (not even the model's default). """ if not force and ( self.is_sliced or self.distinct_fields or self.select_for_update ): return self.order_by = () self.extra_order_by = () if clear_default: self.default_ordering = False def set_group_by(self, allow_aliases=True): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ if allow_aliases: # Column names from JOINs to check collisions with aliases. column_names = set() seen_models = set() for join in list(self.alias_map.values())[1:]: # Skip base table. model = join.join_field.related_model if model not in seen_models: column_names.update( {field.column for field in model._meta.local_concrete_fields} ) seen_models.add(model) if self.values_select: # If grouping by aliases is allowed assign selected values # aliases by moving them to annotations. group_by_annotations = {} values_select = {} for alias, expr in zip(self.values_select, self.select): if isinstance(expr, Col): values_select[alias] = expr else: group_by_annotations[alias] = expr self.annotations = {**group_by_annotations, **self.annotations} self.append_annotation_mask(group_by_annotations) self.select = tuple(values_select.values()) self.values_select = tuple(values_select) group_by = list(self.select) for alias, annotation in self.annotation_select.items(): if not (group_by_cols := annotation.get_group_by_cols()): continue if ( allow_aliases and alias not in column_names and not annotation.contains_aggregate ): group_by.append(Ref(alias, annotation)) else: group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): self.check_alias(name) entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != "%": entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. if new_existing := existing.difference(field_names): self.deferred_loading = new_existing, False else: self.clear_deferred_loading() if new_only := set(field_names).difference(existing): self.deferred_loading = new_only, True def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if "pk" in field_names: field_names.remove("pk") field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() self.has_select_fields = True if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) selected = frozenset(field_names + extra_names + annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] selected = frozenset(field_names) # Selected annotations must be known before setting the GROUP BY # clause. if self.group_by is True: self.add_fields( (f.attname for f in self.model._meta.concrete_fields), False ) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. self.set_group_by(allow_aliases=False) self.clear_select_fields() elif self.group_by: # Resolve GROUP BY annotation references if they are not part of # the selected fields anymore. group_by = [] for expr in self.group_by: if isinstance(expr, Ref) and expr.refs not in selected: expr = self.annotations[expr.refs] group_by.append(expr) self.group_by = tuple(group_by) self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append(join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( None, lookup_tables[trimmed_paths + 1] ) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a join_class instead of a # base_table_class reference. But the first entry in the query's FROM # clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = self.base_table_class( self.alias_map[table].table_name, table, ) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return field.null or ( field.empty_strings_allowed and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls ) def get_order_dir(field, default="ASC"): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == "-": return field[1:], dirn[1] return field, dirn[0] class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def __repr__(self): return ( f"{self.__class__.__qualname__}(connector={self.connector!r}, " f"num_children={self.num_children!r}, negated={self.negated!r})" ) def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == OR and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == AND or ( self.effective_connector == OR and votes == self.num_children ): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
6b4895f16c48518e4bcc455d3ba3112128fe2c944f8d28138dbaa30bad2e4536
import collections import json import re from functools import partial from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError from django.db import DatabaseError, NotSupportedError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value from django.db.models.functions import Cast, Random from django.db.models.lookups import Lookup from django.db.models.query_utils import select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.models.sql.where import AND from django.db.transaction import TransactionManagementError from django.utils.functional import cached_property from django.utils.hashable import make_hashable from django.utils.regex_helper import _lazy_re_compile class SQLCompiler: # Multiline ordering SQL clause may appear from RawSQL. ordering_parts = _lazy_re_compile( r"^(.*)\s(?:ASC|DESC).*", re.MULTILINE | re.DOTALL, ) def __init__(self, query, connection, using, elide_empty=True): self.query = query self.connection = connection self.using = using # Some queries, e.g. coalesced aggregation, need to be executed even if # they would return an empty result set. self.elide_empty = elide_empty self.quote_cache = {"*": "*"} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None self._meta_ordering = None def __repr__(self): return ( f"<{self.__class__.__qualname__} " f"model={self.query.model.__qualname__} " f"connection={self.connection!r} using={self.using!r}>" ) def setup_query(self, with_col_aliases=False): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select( with_col_aliases=with_col_aliases, ) self.col_count = len(self.select) def pre_sql_setup(self, with_col_aliases=False): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query(with_col_aliases=with_col_aliases) order_by = self.get_order_by() self.where, self.having, self.qualify = self.query.where.split_having_qualify( must_group_by=self.query.group_by is not None ) extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk') # .annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] allows_group_by_refs = self.connection.features.allows_group_by_refs if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, "as_sql"): expr = self.query.resolve_ref(expr) if not allows_group_by_refs and isinstance(expr, Ref): expr = expr.source expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)} aliased_exprs = {} for expr, _, alias in select: # Skip members of the select clause that are already included # by reference. if expr in ref_sources: continue if alias: aliased_exprs[expr] = alias cols = expr.get_group_by_cols() for col in cols: expressions.append(col) if not self._meta_ordering: for expr, (sql, params, is_ref) in order_by: # Skip references to the SELECT clause, as all expressions in # the SELECT clause are already part of the GROUP BY. if not is_ref: expressions.extend(expr.get_group_by_cols()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: if allows_group_by_refs and (alias := aliased_exprs.get(expr)): expr = Ref(alias, expr) try: sql, params = self.compile(expr) except EmptyResultSet: continue sql, params = expr.select_format(self, sql, params) params_hash = make_hashable(params) if (sql, params_hash) not in seen: result.append((sql, params)) seen.add((sql, params_hash)) return result def collapse_group_by(self, expressions, having): # If the DB can group by primary key, then group by the primary key of # query's main model. Note that for PostgreSQL the GROUP BY clause must # include the primary key of every table, but for MySQL it is enough to # have the main table's primary key. if self.connection.features.allows_group_by_pk: # Determine if the main model's primary key is in the query. pk = None for expr in expressions: # Is this a reference to query's base table primary key? If the # expression isn't a Col-like, then skip the expression. if ( getattr(expr, "target", None) == self.query.model._meta.pk and getattr(expr, "alias", None) == self.query.base_table ): pk = expr break # If the main model's primary key is in the query, group by that # field, HAVING expressions, and expressions associated with tables # that don't have a primary key included in the grouped columns. if pk: pk_aliases = { expr.alias for expr in expressions if hasattr(expr, "target") and expr.target.primary_key } expressions = [pk] + [ expr for expr in expressions if expr in having or ( getattr(expr, "alias", None) is not None and expr.alias not in pk_aliases ) ] elif self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if ( hasattr(expr, "target") and expr.target.primary_key and self.connection.features.allows_group_by_selected_pks_on_model( expr.target.model ) ) } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or getattr(expr, "alias", None) not in aliases ] return expressions def get_select(self, with_col_aliases=False): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) select_mask = self.query.get_select_mask() if self.query.default_cols: cols = self.get_default_columns(select_mask) else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { "model": self.query.model, "select_fields": select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select, select_mask) klass_info["related_klass_infos"] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info["related_klass_infos"]: if ki["from_parent"]: ki["select_fields"] = ( klass_info["select_fields"] + ki["select_fields"] ) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] col_idx = 1 for col, alias in select: try: sql, params = self.compile(col) except EmptyResultSet: empty_result_set_value = getattr( col, "empty_result_set_value", NotImplemented ) if empty_result_set_value is NotImplemented: # Select a predicate that's always False. sql, params = "0", () else: sql, params = self.compile(Value(empty_result_set_value)) else: sql, params = col.select_format(self, sql, params) if alias is None and with_col_aliases: alias = f"col{col_idx}" col_idx += 1 ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def _order_by_pairs(self): if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif (meta := self.query.get_meta()) and meta.ordering: ordering = meta.ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: default_order, _ = ORDER_DIR["ASC"] else: default_order, _ = ORDER_DIR["DESC"] for field in ordering: if hasattr(field, "resolve_expression"): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() if isinstance(field.expression, F) and ( annotation := self.query.annotation_select.get( field.expression.name ) ): field.expression = Ref(field.expression.name, annotation) yield field, isinstance(field.expression, Ref) continue if field == "?": # random yield OrderBy(Random()), False continue col, order = get_order_dir(field, default_order) descending = order == "DESC" if col in self.query.annotation_select: # Reference to expression in SELECT clause yield ( OrderBy( Ref(col, self.query.annotation_select[col]), descending=descending, ), True, ) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT # clause. if self.query.combinator and self.select: # Don't use the resolved annotation because other # combinated queries might define it differently. expr = F(col) else: expr = self.query.annotations[col] if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) yield OrderBy(expr, descending=descending), False continue if "." in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split(".", 1) yield ( OrderBy( RawSQL( "%s.%s" % (self.quote_name_unless_alias(table), col), [] ), descending=descending, ), False, ) continue if self.query.extra and col in self.query.extra: if col in self.query.extra_select: yield ( OrderBy( Ref(col, RawSQL(*self.query.extra[col])), descending=descending, ), True, ) else: yield ( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False, ) else: if self.query.combinator and self.select: # Don't use the first model's field because other # combinated queries might define it differently. yield OrderBy(F(col), descending=descending), False else: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. yield from self.find_ordering_name( field, self.query.get_meta(), default_order=default_order, ) def get_order_by(self): """ Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ result = [] seen = set() for expr, is_ref in self._order_by_pairs(): resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if not is_ref and self.query.combinator and self.select: src = resolved.expression expr_src = expr.expression for sel_expr, _, col_alias in self.select: if col_alias and not ( isinstance(expr_src, F) and col_alias == expr_src.name ): continue if src == sel_expr: resolved.set_source_expressions( [Ref(col_alias if col_alias else src.target.column, src)] ) break else: if col_alias: raise DatabaseError( "ORDER BY term does not match any column in the result set." ) # Add column used in ORDER BY clause to the selected # columns and to each combined query. order_by_idx = len(self.query.select) + 1 col_name = f"__orderbycol{order_by_idx}" for q in self.query.combined_queries: q.add_annotation(expr_src, col_name) self.query.add_select_col(resolved, col_name) resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())]) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql)[1] params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql)[1] if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ( (name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( self.query.external_aliases.get(name) and name not in self.query.table_map ) ): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node): vendor_impl = getattr(node, "as_" + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection, self.elide_empty) for query in self.query.combined_queries ] if not features.supports_slicing_ordering_in_compound: for compiler in compilers: if compiler.query.is_sliced: raise DatabaseError( "LIMIT/OFFSET not allowed in subqueries of compound statements." ) if compiler.get_order_by(): raise DatabaseError( "ORDER BY not allowed in subqueries of compound statements." ) elif self.query.is_sliced and combinator == "union": limit = (self.query.low_mark, self.query.high_mark) for compiler in compilers: # A sliced union cannot have its parts elided as some of them # might be sliced as well and in the event where only a single # part produces a non-empty resultset it might be impossible to # generate valid SQL. compiler.elide_empty = False if not compiler.query.is_sliced: compiler.query.set_limits(*limit) parts = () for compiler in compilers: try: # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. if not compiler.query.values_select and self.query.values_select: compiler.query = compiler.query.clone() compiler.query.set_values( ( *self.query.extra_select, *self.query.values_select, *self.query.annotation_select, ) ) part_sql, part_args = compiler.as_sql() if compiler.query.combinator: # Wrap in a subquery if wrapping in parentheses isn't # supported. if not features.supports_parentheses_in_compound: part_sql = "SELECT * FROM ({})".format(part_sql) # Add parentheses when combining with compound query if not # already added for all compound queries. elif ( self.query.subquery or not features.supports_slicing_ordering_in_compound ): part_sql = "({})".format(part_sql) elif ( self.query.subquery and features.supports_slicing_ordering_in_compound ): part_sql = "({})".format(part_sql) parts += ((part_sql, part_args),) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == "union" or (combinator == "difference" and parts): continue raise if not parts: raise EmptyResultSet combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == "union": combinator_sql += " ALL" braces = "{}" if not self.query.subquery and features.supports_slicing_ordering_in_compound: braces = "({})" sql_parts, args_parts = zip( *((braces.format(sql), args) for sql, args in parts) ) result = [" {} ".format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def get_qualify_sql(self): where_parts = [] if self.where: where_parts.append(self.where) if self.having: where_parts.append(self.having) inner_query = self.query.clone() inner_query.subquery = True inner_query.where = inner_query.where.__class__(where_parts) # Augment the inner query with any window function references that # might have been masked via values() and alias(). If any masked # aliases are added they'll be masked again to avoid fetching # the data in the `if qual_aliases` branch below. select = { expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0] } select_aliases = set(select.values()) qual_aliases = set() replacements = {} def collect_replacements(expressions): while expressions: expr = expressions.pop() if expr in replacements: continue elif select_alias := select.get(expr): replacements[expr] = select_alias elif isinstance(expr, Lookup): expressions.extend(expr.get_source_expressions()) elif isinstance(expr, Ref): if expr.refs not in select_aliases: expressions.extend(expr.get_source_expressions()) else: num_qual_alias = len(qual_aliases) select_alias = f"qual{num_qual_alias}" qual_aliases.add(select_alias) inner_query.add_annotation(expr, select_alias) replacements[expr] = select_alias collect_replacements(list(self.qualify.leaves())) self.qualify = self.qualify.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) order_by = [] for order_by_expr, *_ in self.get_order_by(): collect_replacements(order_by_expr.get_source_expressions()) order_by.append( order_by_expr.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) ) inner_query_compiler = inner_query.get_compiler( self.using, elide_empty=self.elide_empty ) inner_sql, inner_params = inner_query_compiler.as_sql( # The limits must be applied to the outer query to avoid pruning # results too eagerly. with_limits=False, # Force unique aliasing of selected columns to avoid collisions # and make rhs predicates referencing easier. with_col_aliases=True, ) qualify_sql, qualify_params = self.compile(self.qualify) result = [ "SELECT * FROM (", inner_sql, ")", self.connection.ops.quote_name("qualify"), "WHERE", qualify_sql, ] if qual_aliases: # If some select aliases were unmasked for filtering purposes they # must be masked back. cols = [self.connection.ops.quote_name(alias) for alias in select.values()] result = [ "SELECT", ", ".join(cols), "FROM (", *result, ")", self.connection.ops.quote_name("qualify_mask"), ] params = list(inner_params) + qualify_params # As the SQL spec is unclear on whether or not derived tables # ordering must propagate it has to be explicitly repeated on the # outer-most query to ensure it's preserved. if order_by: ordering_sqls = [] for ordering in order_by: ordering_sql, ordering_params = self.compile(ordering) ordering_sqls.append(ordering_sql) params.extend(ordering_params) result.extend(["ORDER BY", ", ".join(ordering_sqls)]) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup( with_col_aliases=with_col_aliases, ) for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and self.query.is_sliced combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, "supports_select_{}".format(combinator)): raise NotSupportedError( "{} is not supported on this database backend.".format( combinator ) ) result, params = self.get_combinator_sql( combinator, self.query.combinator_all ) elif self.qualify: result, params = self.get_qualify_sql() order_by = None else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() try: where, w_params = ( self.compile(self.where) if self.where is not None else ("", []) ) except EmptyResultSet: if self.elide_empty: raise # Use a predicate that's always False. where, w_params = "0 = 1", [] having, h_params = ( self.compile(self.having) if self.having is not None else ("", []) ) result = ["SELECT"] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = "%s AS %s" % ( s_sql, self.connection.ops.quote_name(alias), ) params.extend(s_params) out_cols.append(s_sql) result += [", ".join(out_cols)] if from_: result += ["FROM", *from_] elif self.connection.features.bare_select_suffix: result += [self.connection.features.bare_select_suffix] params.extend(f_params) if self.query.select_for_update and features.has_select_for_update: if ( self.connection.get_autocommit() # Don't raise an exception when database doesn't # support transactions, as it's a noop. and features.supports_transactions ): raise TransactionManagementError( "select_for_update cannot be used outside of a transaction." ) if ( with_limit_offset and not features.supports_select_for_update_with_limit ): raise NotSupportedError( "LIMIT/OFFSET is not supported with " "select_for_update on this database backend." ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of no_key = self.query.select_for_no_key_update # If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the # backend doesn't support it, raise NotSupportedError to # prevent a possible deadlock. if nowait and not features.has_select_for_update_nowait: raise NotSupportedError( "NOWAIT is not supported on this database backend." ) elif skip_locked and not features.has_select_for_update_skip_locked: raise NotSupportedError( "SKIP LOCKED is not supported on this database backend." ) elif of and not features.has_select_for_update_of: raise NotSupportedError( "FOR UPDATE OF is not supported on this database backend." ) elif no_key and not features.has_select_for_no_key_update: raise NotSupportedError( "FOR NO KEY UPDATE is not supported on this " "database backend." ) for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), no_key=no_key, ) if for_update_part and features.for_update_after_from: result.append(for_update_part) if where: result.append("WHERE %s" % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError( "annotate() + distinct(fields) is not implemented." ) order_by = order_by or self.connection.ops.force_no_ordering() result.append("GROUP BY %s" % ", ".join(grouping)) if self._meta_ordering: order_by = None if having: result.append("HAVING %s" % having) params.extend(h_params) if self.query.explain_info: result.insert( 0, self.connection.ops.explain_query_prefix( self.query.explain_info.format, **self.query.explain_info.options, ), ) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) order_by_sql = "ORDER BY %s" % ", ".join(ordering) if combinator and features.requires_compound_order_by_subquery: result = ["SELECT * FROM (", *result, ")", order_by_sql] else: result.append(order_by_sql) if with_limit_offset: result.append( self.connection.ops.limit_offset_sql( self.query.low_mark, self.query.high_mark ) ) if for_update_part and not features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering expressions # to be selected as well. Wrap the query in another subquery # to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if alias: sub_selects.append( "%s.%s" % ( self.connection.ops.quote_name("subquery"), self.connection.ops.quote_name(alias), ) ) else: select_clone = select.relabeled_clone( {select.alias: "subquery"} ) subselect, subparams = select_clone.as_sql( self, self.connection ) sub_selects.append(subselect) sub_params.extend(subparams) return "SELECT %s FROM (%s) subquery" % ( ", ".join(sub_selects), " ".join(result), ), tuple(sub_params + params) return " ".join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns( self, select_mask, start_alias=None, opts=None, from_parent=None ): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: if (opts := self.query.get_meta()) is None: return result start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if ( from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model ) ): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if select_mask and field not in select_mask: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins( parts, opts, None ) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(self.connection.ops.quote_name(name)) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name( self, name, opts, alias=None, default_order="ASC", already_seen=None ): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == "DESC" pieces = name.split(LOOKUP_SEP) ( field, targets, alias, joins, path, opts, transform_function, ) = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless it is the pk # shortcut or the attribute name of the field that is specified or # there are transforms to process. if ( field.is_relation and opts.ordering and getattr(field, "attname", None) != pieces[-1] and name != "pk" and not getattr(transform_function, "has_transforms", False) ): # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple( getattr(self.query.alias_map[j], "join_cols", None) for j in joins ) if join_tuple in already_seen: raise FieldError("Infinite loop caused by ordering.") already_seen.add(join_tuple) results = [] for item in opts.ordering: if hasattr(item, "resolve_expression") and not isinstance( item, OrderBy ): item = item.desc() if descending else item.asc() if isinstance(item, OrderBy): results.append( (item.prefix_references(f"{name}{LOOKUP_SEP}"), False) ) continue results.extend( (expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref) for expr, is_ref in self.find_ordering_name( item, opts, alias, order, already_seen ) ) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [ (OrderBy(transform_function(t, alias), descending=descending), False) for t in targets ] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins( pieces, opts, alias ) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in tuple(self.query.alias_map): if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if ( alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1 ): result.append(", %s" % self.quote_name_unless_alias(alias)) return result, params def get_related_selections( self, select, select_mask, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None, ): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain( direct_choices, reverse_choices, self.query._filtered_relations ) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info["related_klass_infos"] = related_klass_infos for f in opts.fields: fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or "(none)", ) ) else: next = False if not select_related_descend(f, restricted, requested, select_mask): continue related_select_mask = select_mask.get(f) or {} klass_info = { "model": f.remote_field.model, "field": f, "reverse": False, "local_setter": f.set_cached_value, "remote_setter": f.remote_field.set_cached_value if f.unique else lambda x, y: None, "from_parent": False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=f.remote_field.model._meta ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_klass_infos = self.get_related_selections( select, related_select_mask, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: related_select_mask = select_mask.get(f) or {} if not select_related_descend( f, restricted, requested, related_select_mask, reverse=True ): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins( [related_field_name], opts, root_alias ) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { "model": model, "field": f, "reverse": True, "local_setter": f.remote_field.set_cached_value, "remote_setter": f.set_cached_value, "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, related_select_mask, model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) def local_setter(obj, from_obj): # Set a reverse fk object when relation is non-empty. if from_obj: f.remote_field.set_cached_value(from_obj, obj) def remote_setter(name, obj, from_obj): setattr(from_obj, name, obj) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) f, _, join_opts, joins, _, _ = self.query.setup_joins( [name], opts, root_alias ) model = join_opts.model alias = joins[-1] from_parent = ( issubclass(model, opts.model) and model is not opts.model ) klass_info = { "model": model, "field": f, "reverse": True, "local_setter": local_setter, "remote_setter": partial(remote_setter, name), "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] field_select_mask = select_mask.get((name, f)) or {} columns = self.get_default_columns( field_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, field_select_mask, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( "Invalid field name(s) given in select_related: %s. " "Choices are: %s" % ( ", ".join(invalid_fields), ", ".join(_get_field_choices()) or "(none)", ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_parent_klass_info(klass_info): concrete_model = klass_info["model"]._meta.concrete_model for parent_model, parent_link in concrete_model._meta.parents.items(): parent_list = parent_model._meta.get_parent_list() yield { "model": parent_model, "field": parent_link, "reverse": False, "select_fields": [ select_index for select_index in klass_info["select_fields"] # Selected columns from a model or its parents. if ( self.select[select_index][0].target.model == parent_model or self.select[select_index][0].target.model in parent_list ) ], } def _get_first_selected_col_from_model(klass_info): """ Find the first selected column from a model. If it doesn't exist, don't lock a model. select_fields is filled recursively, so it also contains fields from the parent models. """ concrete_model = klass_info["model"]._meta.concrete_model for select_index in klass_info["select_fields"]: if self.select[select_index][0].target.model == concrete_model: return self.select[select_index][0] def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield "self" else: field = klass_info["field"] if klass_info["reverse"]: field = field.remote_field path = parent_path + [field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in _get_parent_klass_info(klass_info) ) queue.extend( (path, klass_info) for klass_info in klass_info.get("related_klass_infos", []) ) if not self.klass_info: return [] result = [] invalid_names = [] for name in self.query.select_for_update_of: klass_info = self.klass_info if name == "self": col = _get_first_selected_col_from_model(klass_info) else: for part in name.split(LOOKUP_SEP): klass_infos = ( *klass_info.get("related_klass_infos", []), *_get_parent_klass_info(klass_info), ) for related_klass_info in klass_infos: field = related_klass_info["field"] if related_klass_info["reverse"]: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue col = _get_first_selected_col_from_model(klass_info) if col is not None: if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( "Invalid field name(s) given in select_for_update(of=(...)): %s. " "Only relational fields followed in the query are allowed. " "Choices are: %s." % ( ", ".join(invalid_names), ", ".join(_get_field_choices()), ) ) return result def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def results_iter( self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE, ): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql( MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size ) fields = [s[0] for s in self.select[0 : self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ return bool(self.execute_sql(SINGLE)) def execute_sql( self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE ): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0 : self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch or not self.connection.features.can_use_chunked_reads: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested, # unless the database doesn't support it. return list(result) return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = "%s.%s" % (qn(alias), qn2(columns[index])) self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND) sql, params = self.as_sql() return "EXISTS (%s)" % sql, params def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. format_ = self.query.explain_info.format output_formatter = json.dumps if format_ and format_.lower() == "json" else str for row in result[0]: if not isinstance(row, str): yield " ".join(output_formatter(c) for c in row) else: yield row class SQLInsertCompiler(SQLCompiler): returning_fields = None returning_params = () def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, "as_sql"): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, "get_placeholder"): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = "%s", [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, "resolve_expression"): value = value.resolve_expression( self.query, allow_joins=False, for_save=True ) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' "can only be used to update, not to insert." % (value, field) ) if value.contains_aggregate: raise FieldError( "Aggregate functions are not allowed in this query " "(%s=%r)." % (field.name, value) ) if value.contains_over_clause: raise FieldError( "Window expressions are not allowed in this query (%s=%r)." % (field.name, value) ) else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() insert_statement = self.connection.ops.insert_statement( on_conflict=self.query.on_conflict, ) result = ["%s %s" % (insert_statement, qn(opts.db_table))] fields = self.query.fields or [opts.pk] result.append("(%s)" % ", ".join(qn(f.column) for f in fields)) if self.query.fields: value_rows = [ [ self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields ] for obj in self.query.objs ] else: # An empty object. value_rows = [ [self.connection.ops.pk_default_value()] for _ in self.query.objs ] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = ( not self.returning_fields and self.connection.features.has_bulk_insert ) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql( fields, self.query.on_conflict, self.query.update_fields, self.query.unique_fields, ) if ( self.returning_fields and self.connection.features.can_return_columns_from_insert ): if self.connection.features.can_return_rows_from_bulk_insert: result.append( self.connection.ops.bulk_insert_sql(fields, placeholder_rows) ) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) # Skip empty r_sql to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. r_sql, self.returning_params = self.connection.ops.return_insert_columns( self.returning_fields ) if r_sql: result.append(r_sql) params += [self.returning_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, returning_fields=None): assert not ( returning_fields and len(self.query.objs) != 1 and not self.connection.features.can_return_rows_from_bulk_insert ) opts = self.query.get_meta() self.returning_fields = returning_fields with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not self.returning_fields: return [] if ( self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1 ): rows = self.connection.ops.fetch_returned_insert_rows(cursor) elif self.connection.features.can_return_columns_from_insert: assert len(self.query.objs) == 1 rows = [ self.connection.ops.fetch_returned_insert_columns( cursor, self.returning_params, ) ] else: rows = [ ( self.connection.ops.last_insert_id( cursor, opts.db_table, opts.pk.column, ), ) ] cols = [field.get_col(opts.db_table) for field in self.returning_fields] converters = self.get_converters(cols) if converters: rows = list(self.apply_converters(rows, converters)) return rows class SQLDeleteCompiler(SQLCompiler): @cached_property def single_alias(self): # Ensure base table is in aliases. self.query.get_initial_alias() return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1 @classmethod def _expr_refs_base_model(cls, expr, base_model): if isinstance(expr, Query): return expr.model == base_model if not hasattr(expr, "get_source_expressions"): return False return any( cls._expr_refs_base_model(source_expr, base_model) for source_expr in expr.get_source_expressions() ) @cached_property def contains_self_reference_subquery(self): return any( self._expr_refs_base_model(expr, self.query.model) for expr in chain( self.query.annotations.values(), self.query.where.children ) ) def _as_sql(self, query): result = ["DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)] where, params = self.compile(query.where) if where: result.append("WHERE %s" % where) return " ".join(result), tuple(params) def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ if self.single_alias and not self.contains_self_reference_subquery: return self._as_sql(self.query) innerq = self.query.clone() innerq.__class__ = Query innerq.clear_select_clause() pk = self.query.model._meta.pk innerq.select = [pk.get_col(self.query.get_initial_alias())] outerq = Query(self.query.model) if not self.connection.features.update_can_self_select: # Force the materialization of the inner query to allow reference # to the target table on MySQL. sql, params = innerq.get_compiler(connection=self.connection).as_sql() innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params) outerq.add_filter("pk__in", innerq) return self._as_sql(outerq) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return "", () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, "resolve_expression"): val = val.resolve_expression( self.query, allow_joins=False, for_save=True ) if val.contains_aggregate: raise FieldError( "Aggregate functions are not allowed in this query " "(%s=%r)." % (field.name, val) ) if val.contains_over_clause: raise FieldError( "Window expressions are not allowed in this query " "(%s=%r)." % (field.name, val) ) elif hasattr(val, "prepare_database_save"): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, "get_placeholder"): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = "%s" name = field.column if hasattr(val, "as_sql"): sql, params = self.compile(val) values.append("%s = %s" % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append("%s = %s" % (qn(name), placeholder)) update_params.append(val) else: values.append("%s = NULL" % qn(name)) table = self.query.base_table result = [ "UPDATE %s SET" % qn(table), ", ".join(values), ] where, params = self.compile(self.query.where) if where: result.append("WHERE %s" % where) return " ".join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(force=True) query.extra = {} query.select = [] meta = query.get_meta() fields = [meta.pk.name] related_ids_index = [] for related in self.query.related_updates: if all( path.join_field.primary_key for path in meta.get_path_to_parent(related) ): # If a primary key chain exists to the targeted related update, # then the meta.pk value can be used for it. related_ids_index.append((related, 0)) else: # This branch will only be reached when updating a field of an # ancestor that is not part of the primary key chain of a MTI # tree. related_ids_index.append((related, len(fields))) fields.append(related._meta.pk.name) query.add_fields(fields) super().pre_sql_setup() must_pre_select = ( count > 1 and not self.connection.features.update_can_self_select ) # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.clear_where() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] related_ids = collections.defaultdict(list) for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) for parent, index in related_ids_index: related_ids[parent].extend(r[index] for r in rows) self.query.add_filter("pk__in", idents) self.query.related_ids = related_ids else: # The fast path. Filters and updates in one query. self.query.add_filter("pk__in", query) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation) ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ", ".join(sql) params = tuple(params) inner_query_sql, inner_query_params = self.query.inner_query.get_compiler( self.using, elide_empty=self.elide_empty, ).as_sql(with_col_aliases=True) sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql) params += inner_query_params return sql, params def cursor_iter(cursor, sentinel, col_count, itersize): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): yield rows if col_count is None else [r[:col_count] for r in rows] finally: cursor.close()
fd9a4699f8eb781df6c5991829fe478f8c805fe904b8a741784b9eac1d44221e
from collections import namedtuple import cx_Oracle from django.db import models from django.db.backends.base.introspection import BaseDatabaseIntrospection from django.db.backends.base.introspection import FieldInfo as BaseFieldInfo from django.db.backends.base.introspection import TableInfo from django.utils.functional import cached_property FieldInfo = namedtuple("FieldInfo", BaseFieldInfo._fields + ("is_autofield", "is_json")) class DatabaseIntrospection(BaseDatabaseIntrospection): cache_bust_counter = 1 # Maps type objects to Django Field types. @cached_property def data_types_reverse(self): if self.connection.cx_oracle_version < (8,): return { cx_Oracle.BLOB: "BinaryField", cx_Oracle.CLOB: "TextField", cx_Oracle.DATETIME: "DateField", cx_Oracle.FIXED_CHAR: "CharField", cx_Oracle.FIXED_NCHAR: "CharField", cx_Oracle.INTERVAL: "DurationField", cx_Oracle.NATIVE_FLOAT: "FloatField", cx_Oracle.NCHAR: "CharField", cx_Oracle.NCLOB: "TextField", cx_Oracle.NUMBER: "DecimalField", cx_Oracle.STRING: "CharField", cx_Oracle.TIMESTAMP: "DateTimeField", } else: return { cx_Oracle.DB_TYPE_DATE: "DateField", cx_Oracle.DB_TYPE_BINARY_DOUBLE: "FloatField", cx_Oracle.DB_TYPE_BLOB: "BinaryField", cx_Oracle.DB_TYPE_CHAR: "CharField", cx_Oracle.DB_TYPE_CLOB: "TextField", cx_Oracle.DB_TYPE_INTERVAL_DS: "DurationField", cx_Oracle.DB_TYPE_NCHAR: "CharField", cx_Oracle.DB_TYPE_NCLOB: "TextField", cx_Oracle.DB_TYPE_NVARCHAR: "CharField", cx_Oracle.DB_TYPE_NUMBER: "DecimalField", cx_Oracle.DB_TYPE_TIMESTAMP: "DateTimeField", cx_Oracle.DB_TYPE_VARCHAR: "CharField", } def get_field_type(self, data_type, description): if data_type == cx_Oracle.NUMBER: precision, scale = description[4:6] if scale == 0: if precision > 11: return ( "BigAutoField" if description.is_autofield else "BigIntegerField" ) elif 1 < precision < 6 and description.is_autofield: return "SmallAutoField" elif precision == 1: return "BooleanField" elif description.is_autofield: return "AutoField" else: return "IntegerField" elif scale == -127: return "FloatField" elif data_type == cx_Oracle.NCLOB and description.is_json: return "JSONField" return super().get_field_type(data_type, description) def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" cursor.execute( """ SELECT table_name, 't' FROM user_tables WHERE NOT EXISTS ( SELECT 1 FROM user_mviews WHERE user_mviews.mview_name = user_tables.table_name ) UNION ALL SELECT view_name, 'v' FROM user_views UNION ALL SELECT mview_name, 'v' FROM user_mviews """ ) return [ TableInfo(self.identifier_converter(row[0]), row[1]) for row in cursor.fetchall() ] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface. """ # user_tab_columns gives data default for columns cursor.execute( """ SELECT user_tab_cols.column_name, user_tab_cols.data_default, CASE WHEN user_tab_cols.collation = user_tables.default_collation THEN NULL ELSE user_tab_cols.collation END collation, CASE WHEN user_tab_cols.char_used IS NULL THEN user_tab_cols.data_length ELSE user_tab_cols.char_length END as internal_size, CASE WHEN user_tab_cols.identity_column = 'YES' THEN 1 ELSE 0 END as is_autofield, CASE WHEN EXISTS ( SELECT 1 FROM user_json_columns WHERE user_json_columns.table_name = user_tab_cols.table_name AND user_json_columns.column_name = user_tab_cols.column_name ) THEN 1 ELSE 0 END as is_json FROM user_tab_cols LEFT OUTER JOIN user_tables ON user_tables.table_name = user_tab_cols.table_name WHERE user_tab_cols.table_name = UPPER(%s) """, [table_name], ) field_map = { column: ( internal_size, default if default != "NULL" else None, collation, is_autofield, is_json, ) for ( column, default, collation, internal_size, is_autofield, is_json, ) in cursor.fetchall() } self.cache_bust_counter += 1 cursor.execute( "SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format( self.connection.ops.quote_name(table_name), self.cache_bust_counter ) ) description = [] for desc in cursor.description: name = desc[0] internal_size, default, collation, is_autofield, is_json = field_map[name] name %= {} # cx_Oracle, for some reason, doubles percent signs. description.append( FieldInfo( self.identifier_converter(name), *desc[1:3], internal_size, desc[4] or 0, desc[5] or 0, *desc[6:], default, collation, is_autofield, is_json, ) ) return description def identifier_converter(self, name): """Identifier comparison is case insensitive under Oracle.""" return name.lower() def get_sequences(self, cursor, table_name, table_fields=()): cursor.execute( """ SELECT user_tab_identity_cols.sequence_name, user_tab_identity_cols.column_name FROM user_tab_identity_cols, user_constraints, user_cons_columns cols WHERE user_constraints.constraint_name = cols.constraint_name AND user_constraints.table_name = user_tab_identity_cols.table_name AND cols.column_name = user_tab_identity_cols.column_name AND user_constraints.constraint_type = 'P' AND user_tab_identity_cols.table_name = UPPER(%s) """, [table_name], ) # Oracle allows only one identity column per table. row = cursor.fetchone() if row: return [ { "name": self.identifier_converter(row[0]), "table": self.identifier_converter(table_name), "column": self.identifier_converter(row[1]), } ] # To keep backward compatibility for AutoFields that aren't Oracle # identity columns. for f in table_fields: if isinstance(f, models.AutoField): return [{"table": table_name, "column": f.column}] return [] def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all foreign keys in the given table. """ table_name = table_name.upper() cursor.execute( """ SELECT ca.column_name, cb.table_name, cb.column_name FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb WHERE user_constraints.table_name = %s AND user_constraints.constraint_name = ca.constraint_name AND user_constraints.r_constraint_name = cb.constraint_name AND ca.position = cb.position""", [table_name], ) return { self.identifier_converter(field_name): ( self.identifier_converter(rel_field_name), self.identifier_converter(rel_table_name), ) for field_name, rel_table_name, rel_field_name in cursor.fetchall() } def get_primary_key_columns(self, cursor, table_name): cursor.execute( """ SELECT cols.column_name FROM user_constraints, user_cons_columns cols WHERE user_constraints.constraint_name = cols.constraint_name AND user_constraints.constraint_type = 'P' AND user_constraints.table_name = UPPER(%s) ORDER BY cols.position """, [table_name], ) return [self.identifier_converter(row[0]) for row in cursor.fetchall()] def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. """ constraints = {} # Loop over the constraints, getting PKs, uniques, and checks cursor.execute( """ SELECT user_constraints.constraint_name, LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.position), CASE user_constraints.constraint_type WHEN 'P' THEN 1 ELSE 0 END AS is_primary_key, CASE WHEN user_constraints.constraint_type IN ('P', 'U') THEN 1 ELSE 0 END AS is_unique, CASE user_constraints.constraint_type WHEN 'C' THEN 1 ELSE 0 END AS is_check_constraint FROM user_constraints LEFT OUTER JOIN user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name WHERE user_constraints.constraint_type = ANY('P', 'U', 'C') AND user_constraints.table_name = UPPER(%s) GROUP BY user_constraints.constraint_name, user_constraints.constraint_type """, [table_name], ) for constraint, columns, pk, unique, check in cursor.fetchall(): constraint = self.identifier_converter(constraint) constraints[constraint] = { "columns": columns.split(","), "primary_key": pk, "unique": unique, "foreign_key": None, "check": check, "index": unique, # All uniques come with an index } # Foreign key constraints cursor.execute( """ SELECT cons.constraint_name, LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.position), LOWER(rcols.table_name), LOWER(rcols.column_name) FROM user_constraints cons INNER JOIN user_cons_columns rcols ON rcols.constraint_name = cons.r_constraint_name AND rcols.position = 1 LEFT OUTER JOIN user_cons_columns cols ON cons.constraint_name = cols.constraint_name WHERE cons.constraint_type = 'R' AND cons.table_name = UPPER(%s) GROUP BY cons.constraint_name, rcols.table_name, rcols.column_name """, [table_name], ) for constraint, columns, other_table, other_column in cursor.fetchall(): constraint = self.identifier_converter(constraint) constraints[constraint] = { "primary_key": False, "unique": False, "foreign_key": (other_table, other_column), "check": False, "index": False, "columns": columns.split(","), } # Now get indexes cursor.execute( """ SELECT ind.index_name, LOWER(ind.index_type), LOWER(ind.uniqueness), LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.column_position), LISTAGG(cols.descend, ',') WITHIN GROUP (ORDER BY cols.column_position) FROM user_ind_columns cols, user_indexes ind WHERE cols.table_name = UPPER(%s) AND NOT EXISTS ( SELECT 1 FROM user_constraints cons WHERE ind.index_name = cons.index_name ) AND cols.index_name = ind.index_name GROUP BY ind.index_name, ind.index_type, ind.uniqueness """, [table_name], ) for constraint, type_, unique, columns, orders in cursor.fetchall(): constraint = self.identifier_converter(constraint) constraints[constraint] = { "primary_key": False, "unique": unique == "unique", "foreign_key": None, "check": False, "index": True, "type": "idx" if type_ == "normal" else type_, "columns": columns.split(","), "orders": orders.split(","), } return constraints
f5f63becb7dc3356c57549c2bebafbc229da377c82c2cddb351c9b65c070c2f6
""" Oracle database backend for Django. Requires cx_Oracle: https://oracle.github.io/python-cx_Oracle/ """ import datetime import decimal import os import platform from contextlib import contextmanager from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import IntegrityError from django.db.backends.base.base import BaseDatabaseWrapper from django.utils.asyncio import async_unsafe from django.utils.encoding import force_bytes, force_str from django.utils.functional import cached_property def _setup_environment(environ): # Cygwin requires some special voodoo to set the environment variables # properly so that Oracle will see them. if platform.system().upper().startswith("CYGWIN"): try: import ctypes except ImportError as e: raise ImproperlyConfigured( "Error loading ctypes: %s; " "the Oracle backend requires ctypes to " "operate correctly under Cygwin." % e ) kernel32 = ctypes.CDLL("kernel32") for name, value in environ: kernel32.SetEnvironmentVariableA(name, value) else: os.environ.update(environ) _setup_environment( [ # Oracle takes client-side character set encoding from the environment. ("NLS_LANG", ".AL32UTF8"), # This prevents Unicode from getting mangled by getting encoded into the # potentially non-Unicode database character set. ("ORA_NCHAR_LITERAL_REPLACE", "TRUE"), ] ) try: import cx_Oracle as Database except ImportError as e: raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e) # Some of these import cx_Oracle, so import them after checking if it's installed. from .client import DatabaseClient # NOQA from .creation import DatabaseCreation # NOQA from .features import DatabaseFeatures # NOQA from .introspection import DatabaseIntrospection # NOQA from .operations import DatabaseOperations # NOQA from .schema import DatabaseSchemaEditor # NOQA from .utils import Oracle_datetime, dsn # NOQA from .validation import DatabaseValidation # NOQA @contextmanager def wrap_oracle_errors(): try: yield except Database.DatabaseError as e: # cx_Oracle raises a cx_Oracle.DatabaseError exception with the # following attributes and values: # code = 2091 # message = 'ORA-02091: transaction rolled back # 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS # _C00102056) violated - parent key not found' # or: # 'ORA-00001: unique constraint (DJANGOTEST.DEFERRABLE_ # PINK_CONSTRAINT) violated # Convert that case to Django's IntegrityError exception. x = e.args[0] if ( hasattr(x, "code") and hasattr(x, "message") and x.code == 2091 and ("ORA-02291" in x.message or "ORA-00001" in x.message) ): raise IntegrityError(*tuple(e.args)) raise class _UninitializedOperatorsDescriptor: def __get__(self, instance, cls=None): # If connection.operators is looked up before a connection has been # created, transparently initialize connection.operators to avert an # AttributeError. if instance is None: raise AttributeError("operators not available as class attribute") # Creating a cursor will initialize the operators. instance.cursor().close() return instance.__dict__["operators"] class DatabaseWrapper(BaseDatabaseWrapper): vendor = "oracle" display_name = "Oracle" # This dictionary maps Field objects to their associated Oracle column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. # # Any format strings starting with "qn_" are quoted before being used in the # output (the "qn_" prefix is stripped before the lookup is performed. data_types = { "AutoField": "NUMBER(11) GENERATED BY DEFAULT ON NULL AS IDENTITY", "BigAutoField": "NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY", "BinaryField": "BLOB", "BooleanField": "NUMBER(1)", "CharField": "NVARCHAR2(%(max_length)s)", "DateField": "DATE", "DateTimeField": "TIMESTAMP", "DecimalField": "NUMBER(%(max_digits)s, %(decimal_places)s)", "DurationField": "INTERVAL DAY(9) TO SECOND(6)", "FileField": "NVARCHAR2(%(max_length)s)", "FilePathField": "NVARCHAR2(%(max_length)s)", "FloatField": "DOUBLE PRECISION", "IntegerField": "NUMBER(11)", "JSONField": "NCLOB", "BigIntegerField": "NUMBER(19)", "IPAddressField": "VARCHAR2(15)", "GenericIPAddressField": "VARCHAR2(39)", "OneToOneField": "NUMBER(11)", "PositiveBigIntegerField": "NUMBER(19)", "PositiveIntegerField": "NUMBER(11)", "PositiveSmallIntegerField": "NUMBER(11)", "SlugField": "NVARCHAR2(%(max_length)s)", "SmallAutoField": "NUMBER(5) GENERATED BY DEFAULT ON NULL AS IDENTITY", "SmallIntegerField": "NUMBER(11)", "TextField": "NCLOB", "TimeField": "TIMESTAMP", "URLField": "VARCHAR2(%(max_length)s)", "UUIDField": "VARCHAR2(32)", } data_type_check_constraints = { "BooleanField": "%(qn_column)s IN (0,1)", "JSONField": "%(qn_column)s IS JSON", "PositiveBigIntegerField": "%(qn_column)s >= 0", "PositiveIntegerField": "%(qn_column)s >= 0", "PositiveSmallIntegerField": "%(qn_column)s >= 0", } # Oracle doesn't support a database index on these columns. _limited_data_types = ("clob", "nclob", "blob") operators = _UninitializedOperatorsDescriptor() _standard_operators = { "exact": "= %s", "iexact": "= UPPER(%s)", "contains": ( "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)" ), "icontains": ( "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) " "ESCAPE TRANSLATE('\\' USING NCHAR_CS)" ), "gt": "> %s", "gte": ">= %s", "lt": "< %s", "lte": "<= %s", "startswith": ( "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)" ), "endswith": ( "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)" ), "istartswith": ( "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) " "ESCAPE TRANSLATE('\\' USING NCHAR_CS)" ), "iendswith": ( "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) " "ESCAPE TRANSLATE('\\' USING NCHAR_CS)" ), } _likec_operators = { **_standard_operators, "contains": "LIKEC %s ESCAPE '\\'", "icontains": "LIKEC UPPER(%s) ESCAPE '\\'", "startswith": "LIKEC %s ESCAPE '\\'", "endswith": "LIKEC %s ESCAPE '\\'", "istartswith": "LIKEC UPPER(%s) ESCAPE '\\'", "iendswith": "LIKEC UPPER(%s) ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, %, _) # should be escaped on the database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" _pattern_ops = { "contains": "'%%' || {} || '%%'", "icontains": "'%%' || UPPER({}) || '%%'", "startswith": "{} || '%%'", "istartswith": "UPPER({}) || '%%'", "endswith": "'%%' || {}", "iendswith": "'%%' || UPPER({})", } _standard_pattern_ops = { k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)" " ESCAPE TRANSLATE('\\' USING NCHAR_CS)" for k, v in _pattern_ops.items() } _likec_pattern_ops = { k: "LIKEC " + v + " ESCAPE '\\'" for k, v in _pattern_ops.items() } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations validation_class = DatabaseValidation def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) use_returning_into = self.settings_dict["OPTIONS"].get( "use_returning_into", True ) self.features.can_return_columns_from_insert = use_returning_into def get_database_version(self): return self.oracle_version def get_connection_params(self): conn_params = self.settings_dict["OPTIONS"].copy() if "use_returning_into" in conn_params: del conn_params["use_returning_into"] return conn_params @async_unsafe def get_new_connection(self, conn_params): return Database.connect( user=self.settings_dict["USER"], password=self.settings_dict["PASSWORD"], dsn=dsn(self.settings_dict), **conn_params, ) def init_connection_state(self): super().init_connection_state() cursor = self.create_cursor() # Set the territory first. The territory overrides NLS_DATE_FORMAT # and NLS_TIMESTAMP_FORMAT to the territory default. When all of # these are set in single statement it isn't clear what is supposed # to happen. cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'") # Set Oracle date to ANSI date format. This only needs to execute # once when we create a new connection. We also set the Territory # to 'AMERICA' which forces Sunday to evaluate to a '1' in # TO_CHAR(). cursor.execute( "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'" " NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" + (" TIME_ZONE = 'UTC'" if settings.USE_TZ else "") ) cursor.close() if "operators" not in self.__dict__: # Ticket #14149: Check whether our LIKE implementation will # work for this connection or we need to fall back on LIKEC. # This check is performed only once per DatabaseWrapper # instance per thread, since subsequent connections will use # the same settings. cursor = self.create_cursor() try: cursor.execute( "SELECT 1 FROM DUAL WHERE DUMMY %s" % self._standard_operators["contains"], ["X"], ) except Database.DatabaseError: self.operators = self._likec_operators self.pattern_ops = self._likec_pattern_ops else: self.operators = self._standard_operators self.pattern_ops = self._standard_pattern_ops cursor.close() self.connection.stmtcachesize = 20 # Ensure all changes are preserved even when AUTOCOMMIT is False. if not self.get_autocommit(): self.commit() @async_unsafe def create_cursor(self, name=None): return FormatStylePlaceholderCursor(self.connection) def _commit(self): if self.connection is not None: with wrap_oracle_errors(): return self.connection.commit() # Oracle doesn't support releasing savepoints. But we fake them when query # logging is enabled to keep query counts consistent with other backends. def _savepoint_commit(self, sid): if self.queries_logged: self.queries_log.append( { "sql": "-- RELEASE SAVEPOINT %s (faked)" % self.ops.quote_name(sid), "time": "0.000", } ) def _set_autocommit(self, autocommit): with self.wrap_database_errors: self.connection.autocommit = autocommit def check_constraints(self, table_names=None): """ Check constraints by setting them to immediate. Return them to deferred afterward. """ with self.cursor() as cursor: cursor.execute("SET CONSTRAINTS ALL IMMEDIATE") cursor.execute("SET CONSTRAINTS ALL DEFERRED") def is_usable(self): try: self.connection.ping() except Database.Error: return False else: return True @cached_property def cx_oracle_version(self): return tuple(int(x) for x in Database.version.split(".")) @cached_property def oracle_version(self): with self.temporary_connection(): return tuple(int(x) for x in self.connection.version.split(".")) class OracleParam: """ Wrapper object for formatting parameters for Oracle. If the string representation of the value is large enough (greater than 4000 characters) the input size needs to be set as CLOB. Alternatively, if the parameter has an `input_size` attribute, then the value of the `input_size` attribute will be used instead. Otherwise, no input size will be set for the parameter when executing the query. """ def __init__(self, param, cursor, strings_only=False): # With raw SQL queries, datetimes can reach this function # without being converted by DateTimeField.get_db_prep_value. if settings.USE_TZ and ( isinstance(param, datetime.datetime) and not isinstance(param, Oracle_datetime) ): param = Oracle_datetime.from_datetime(param) string_size = 0 # Oracle doesn't recognize True and False correctly. if param is True: param = 1 elif param is False: param = 0 if hasattr(param, "bind_parameter"): self.force_bytes = param.bind_parameter(cursor) elif isinstance(param, (Database.Binary, datetime.timedelta)): self.force_bytes = param else: # To transmit to the database, we need Unicode if supported # To get size right, we must consider bytes. self.force_bytes = force_str(param, cursor.charset, strings_only) if isinstance(self.force_bytes, str): # We could optimize by only converting up to 4000 bytes here string_size = len(force_bytes(param, cursor.charset, strings_only)) if hasattr(param, "input_size"): # If parameter has `input_size` attribute, use that. self.input_size = param.input_size elif string_size > 4000: # Mark any string param greater than 4000 characters as a CLOB. self.input_size = Database.CLOB elif isinstance(param, datetime.datetime): self.input_size = Database.TIMESTAMP else: self.input_size = None class VariableWrapper: """ An adapter class for cursor variables that prevents the wrapped object from being converted into a string when used to instantiate an OracleParam. This can be used generally for any other object that should be passed into Cursor.execute as-is. """ def __init__(self, var): self.var = var def bind_parameter(self, cursor): return self.var def __getattr__(self, key): return getattr(self.var, key) def __setattr__(self, key, value): if key == "var": self.__dict__[key] = value else: setattr(self.var, key, value) class FormatStylePlaceholderCursor: """ Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". """ charset = "utf-8" def __init__(self, connection): self.cursor = connection.cursor() self.cursor.outputtypehandler = self._output_type_handler @staticmethod def _output_number_converter(value): return decimal.Decimal(value) if "." in value else int(value) @staticmethod def _get_decimal_converter(precision, scale): if scale == 0: return int context = decimal.Context(prec=precision) quantize_value = decimal.Decimal(1).scaleb(-scale) return lambda v: decimal.Decimal(v).quantize(quantize_value, context=context) @staticmethod def _output_type_handler(cursor, name, defaultType, length, precision, scale): """ Called for each db column fetched from cursors. Return numbers as the appropriate Python type. """ if defaultType == Database.NUMBER: if scale == -127: if precision == 0: # NUMBER column: decimal-precision floating point. # This will normally be an integer from a sequence, # but it could be a decimal value. outconverter = FormatStylePlaceholderCursor._output_number_converter else: # FLOAT column: binary-precision floating point. # This comes from FloatField columns. outconverter = float elif precision > 0: # NUMBER(p,s) column: decimal-precision fixed point. # This comes from IntegerField and DecimalField columns. outconverter = FormatStylePlaceholderCursor._get_decimal_converter( precision, scale ) else: # No type information. This normally comes from a # mathematical expression in the SELECT list. Guess int # or Decimal based on whether it has a decimal point. outconverter = FormatStylePlaceholderCursor._output_number_converter return cursor.var( Database.STRING, size=255, arraysize=cursor.arraysize, outconverter=outconverter, ) def _format_params(self, params): try: return {k: OracleParam(v, self, True) for k, v in params.items()} except AttributeError: return tuple(OracleParam(p, self, True) for p in params) def _guess_input_sizes(self, params_list): # Try dict handling; if that fails, treat as sequence if hasattr(params_list[0], "keys"): sizes = {} for params in params_list: for k, value in params.items(): if value.input_size: sizes[k] = value.input_size if sizes: self.setinputsizes(**sizes) else: # It's not a list of dicts; it's a list of sequences sizes = [None] * len(params_list[0]) for params in params_list: for i, value in enumerate(params): if value.input_size: sizes[i] = value.input_size if sizes: self.setinputsizes(*sizes) def _param_generator(self, params): # Try dict handling; if that fails, treat as sequence if hasattr(params, "items"): return {k: v.force_bytes for k, v in params.items()} else: return [p.force_bytes for p in params] def _fix_for_params(self, query, params, unify_by_values=False): # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(";") or query.endswith("/"): query = query[:-1] if params is None: params = [] elif hasattr(params, "keys"): # Handle params as dict args = {k: ":%s" % k for k in params} query %= args elif unify_by_values and params: # Handle params as a dict with unified query parameters by their # values. It can be used only in single query execute() because # executemany() shares the formatted query with each of the params # list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75] # params_dict = {0.75: ':arg0', 2: ':arg1', 'sth': ':arg2'} # args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0'] # params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'} params_dict = { param: ":arg%d" % i for i, param in enumerate(dict.fromkeys(params)) } args = [params_dict[param] for param in params] params = {value: key for key, value in params_dict.items()} query %= tuple(args) else: # Handle params as sequence args = [(":arg%d" % i) for i in range(len(params))] query %= tuple(args) return query, self._format_params(params) def execute(self, query, params=None): query, params = self._fix_for_params(query, params, unify_by_values=True) self._guess_input_sizes([params]) with wrap_oracle_errors(): return self.cursor.execute(query, self._param_generator(params)) def executemany(self, query, params=None): if not params: # No params given, nothing to do return None # uniform treatment for sequences and iterables params_iter = iter(params) query, firstparams = self._fix_for_params(query, next(params_iter)) # we build a list of formatted params; as we're going to traverse it # more than once, we can't make it lazy by using a generator formatted = [firstparams] + [self._format_params(p) for p in params_iter] self._guess_input_sizes(formatted) with wrap_oracle_errors(): return self.cursor.executemany( query, [self._param_generator(p) for p in formatted] ) def close(self): try: self.cursor.close() except Database.InterfaceError: # already closed pass def var(self, *args): return VariableWrapper(self.cursor.var(*args)) def arrayvar(self, *args): return VariableWrapper(self.cursor.arrayvar(*args)) def __getattr__(self, attr): return getattr(self.cursor, attr) def __iter__(self): return iter(self.cursor)
3f8d37322ff63022334ede0a861ae25a4f18571c1d89937c2c0e4fa3ef7faa73
import logging import operator from datetime import datetime from django.conf import settings from django.db.backends.ddl_references import ( Columns, Expressions, ForeignKeyName, IndexName, Statement, Table, ) from django.db.backends.utils import names_digest, split_identifier from django.db.models import Deferrable, Index from django.db.models.sql import Query from django.db.transaction import TransactionManagementError, atomic from django.utils import timezone logger = logging.getLogger("django.db.backends.schema") def _is_relevant_relation(relation, altered_field): """ When altering the given field, must constraints on its model from the given relation be temporarily dropped? """ field = relation.field if field.many_to_many: # M2M reverse field return False if altered_field.primary_key and field.to_fields == [None]: # Foreign key constraint on the primary key, which is being altered. return True # Is the constraint targeting the field being altered? return altered_field.name in field.to_fields def _all_related_fields(model): # Related fields must be returned in a deterministic order. return sorted( model._meta._get_fields( forward=False, reverse=True, include_hidden=True, include_parents=False, ), key=operator.attrgetter("name"), ) def _related_non_m2m_objects(old_field, new_field): # Filter out m2m objects from reverse relations. # Return (old_relation, new_relation) tuples. related_fields = zip( ( obj for obj in _all_related_fields(old_field.model) if _is_relevant_relation(obj, old_field) ), ( obj for obj in _all_related_fields(new_field.model) if _is_relevant_relation(obj, new_field) ), ) for old_rel, new_rel in related_fields: yield old_rel, new_rel yield from _related_non_m2m_objects( old_rel.remote_field, new_rel.remote_field, ) class BaseDatabaseSchemaEditor: """ This class and its subclasses are responsible for emitting schema-changing statements to the databases - model creation/removal/alteration, field renaming, index fiddling, and so on. """ # Overrideable SQL templates sql_create_table = "CREATE TABLE %(table)s (%(definition)s)" sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s" sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s" sql_delete_table = "DROP TABLE %(table)s CASCADE" sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s" sql_alter_column = "ALTER TABLE %(table)s %(changes)s" sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s" sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL" sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL" sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s" sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT" sql_alter_column_no_default_null = sql_alter_column_no_default sql_alter_column_collate = "ALTER COLUMN %(column)s TYPE %(type)s%(collation)s" sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE" sql_rename_column = ( "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s" ) sql_update_with_default = ( "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL" ) sql_unique_constraint = "UNIQUE (%(columns)s)%(deferrable)s" sql_check_constraint = "CHECK (%(check)s)" sql_delete_constraint = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_constraint = "CONSTRAINT %(name)s %(constraint)s" sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)" sql_delete_check = sql_delete_constraint sql_create_unique = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s " "UNIQUE (%(columns)s)%(deferrable)s" ) sql_delete_unique = sql_delete_constraint sql_create_fk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) " "REFERENCES %(to_table)s (%(to_column)s)%(deferrable)s" ) sql_create_inline_fk = None sql_create_column_inline_fk = None sql_delete_fk = sql_delete_constraint sql_create_index = ( "CREATE INDEX %(name)s ON %(table)s " "(%(columns)s)%(include)s%(extra)s%(condition)s" ) sql_create_unique_index = ( "CREATE UNIQUE INDEX %(name)s ON %(table)s " "(%(columns)s)%(include)s%(condition)s" ) sql_rename_index = "ALTER INDEX %(old_name)s RENAME TO %(new_name)s" sql_delete_index = "DROP INDEX %(name)s" sql_create_pk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" ) sql_delete_pk = sql_delete_constraint sql_delete_procedure = "DROP PROCEDURE %(procedure)s" def __init__(self, connection, collect_sql=False, atomic=True): self.connection = connection self.collect_sql = collect_sql if self.collect_sql: self.collected_sql = [] self.atomic_migration = self.connection.features.can_rollback_ddl and atomic # State-managing methods def __enter__(self): self.deferred_sql = [] if self.atomic_migration: self.atomic = atomic(self.connection.alias) self.atomic.__enter__() return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: for sql in self.deferred_sql: self.execute(sql) if self.atomic_migration: self.atomic.__exit__(exc_type, exc_value, traceback) # Core utility functions def execute(self, sql, params=()): """Execute the given SQL statement, with optional parameters.""" # Don't perform the transactional DDL check if SQL is being collected # as it's not going to be executed anyway. if ( not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl ): raise TransactionManagementError( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) # Account for non-string statement objects. sql = str(sql) # Log the command we're running, then run it logger.debug( "%s; (params %r)", sql, params, extra={"params": params, "sql": sql} ) if self.collect_sql: ending = "" if sql.rstrip().endswith(";") else ";" if params is not None: self.collected_sql.append( (sql % tuple(map(self.quote_value, params))) + ending ) else: self.collected_sql.append(sql + ending) else: with self.connection.cursor() as cursor: cursor.execute(sql, params) def quote_name(self, name): return self.connection.ops.quote_name(name) def table_sql(self, model): """Take a model and return its table definition.""" # Add any unique_togethers (always deferred, as some fields might be # created afterward, like geometry fields with some backends). for field_names in model._meta.unique_together: fields = [model._meta.get_field(field) for field in field_names] self.deferred_sql.append(self._create_unique_sql(model, fields)) # Create column SQL, add FK deferreds if needed. column_sqls = [] params = [] for field in model._meta.local_fields: # SQL. definition, extra_params = self.column_sql(model, field) if definition is None: continue # Check constraints can go on the column SQL here. db_params = field.db_parameters(connection=self.connection) if db_params["check"]: definition += " " + self.sql_check_constraint % db_params # Autoincrement SQL (for backends with inline variant). col_type_suffix = field.db_type_suffix(connection=self.connection) if col_type_suffix: definition += " %s" % col_type_suffix params.extend(extra_params) # FK. if field.remote_field and field.db_constraint: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field( field.remote_field.field_name ).column if self.sql_create_inline_fk: definition += " " + self.sql_create_inline_fk % { "to_table": self.quote_name(to_table), "to_column": self.quote_name(to_column), } elif self.connection.features.supports_foreign_keys: self.deferred_sql.append( self._create_fk_sql( model, field, "_fk_%(to_table)s_%(to_column)s" ) ) # Add the SQL to our big list. column_sqls.append( "%s %s" % ( self.quote_name(field.column), definition, ) ) # Autoincrement SQL (for backends with post table definition # variant). if field.get_internal_type() in ( "AutoField", "BigAutoField", "SmallAutoField", ): autoinc_sql = self.connection.ops.autoinc_sql( model._meta.db_table, field.column ) if autoinc_sql: self.deferred_sql.extend(autoinc_sql) constraints = [ constraint.constraint_sql(model, self) for constraint in model._meta.constraints ] sql = self.sql_create_table % { "table": self.quote_name(model._meta.db_table), "definition": ", ".join( str(constraint) for constraint in (*column_sqls, *constraints) if constraint ), } if model._meta.db_tablespace: tablespace_sql = self.connection.ops.tablespace_sql( model._meta.db_tablespace ) if tablespace_sql: sql += " " + tablespace_sql return sql, params # Field <-> database mapping functions def _iter_column_sql( self, column_db_type, params, model, field, field_db_params, include_default ): yield column_db_type if collation := field_db_params.get("collation"): yield self._collate_sql(collation) # Work out nullability. null = field.null # Include a default value, if requested. include_default = ( include_default and not self.skip_default(field) and # Don't include a default value if it's a nullable field and the # default cannot be dropped in the ALTER COLUMN statement (e.g. # MySQL longtext and longblob). not (null and self.skip_default_on_alter(field)) ) if include_default: default_value = self.effective_default(field) if default_value is not None: column_default = "DEFAULT " + self._column_default_sql(field) if self.connection.features.requires_literal_defaults: # Some databases can't take defaults as a parameter (Oracle). # If this is the case, the individual schema backend should # implement prepare_default(). yield column_default % self.prepare_default(default_value) else: yield column_default params.append(default_value) # Oracle treats the empty string ('') as null, so coerce the null # option whenever '' is a possible value. if ( field.empty_strings_allowed and not field.primary_key and self.connection.features.interprets_empty_strings_as_nulls ): null = True if not null: yield "NOT NULL" elif not self.connection.features.implied_column_null: yield "NULL" if field.primary_key: yield "PRIMARY KEY" elif field.unique: yield "UNIQUE" # Optionally add the tablespace if it's an implicitly indexed column. tablespace = field.db_tablespace or model._meta.db_tablespace if ( tablespace and self.connection.features.supports_tablespaces and field.unique ): yield self.connection.ops.tablespace_sql(tablespace, inline=True) def column_sql(self, model, field, include_default=False): """ Return the column definition for a field. The field must already have had set_attributes_from_name() called. """ # Get the column's type and use that as the basis of the SQL. field_db_params = field.db_parameters(connection=self.connection) column_db_type = field_db_params["type"] # Check for fields that aren't actually columns (e.g. M2M). if column_db_type is None: return None, None params = [] return ( " ".join( # This appends to the params being returned. self._iter_column_sql( column_db_type, params, model, field, field_db_params, include_default, ) ), params, ) def skip_default(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob). """ return False def skip_default_on_alter(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob) in the ALTER COLUMN statement. """ return False def prepare_default(self, value): """ Only used for backends which have requires_literal_defaults feature """ raise NotImplementedError( "subclasses of BaseDatabaseSchemaEditor for backends which have " "requires_literal_defaults must provide a prepare_default() method" ) def _column_default_sql(self, field): """ Return the SQL to use in a DEFAULT clause. The resulting string should contain a '%s' placeholder for a default value. """ return "%s" @staticmethod def _effective_default(field): # This method allows testing its logic without a connection. if field.has_default(): default = field.get_default() elif not field.null and field.blank and field.empty_strings_allowed: if field.get_internal_type() == "BinaryField": default = b"" else: default = "" elif getattr(field, "auto_now", False) or getattr(field, "auto_now_add", False): internal_type = field.get_internal_type() if internal_type == "DateTimeField": default = timezone.now() else: default = datetime.now() if internal_type == "DateField": default = default.date() elif internal_type == "TimeField": default = default.time() else: default = None return default def effective_default(self, field): """Return a field's effective database default value.""" return field.get_db_prep_save(self._effective_default(field), self.connection) def quote_value(self, value): """ Return a quoted version of the value so it's safe to use in an SQL string. This is not safe against injection from user code; it is intended only for use in making SQL scripts or preparing default values for particularly tricky backends (defaults are not user-defined, though, so this is safe). """ raise NotImplementedError() # Actions def create_model(self, model): """ Create a table and any accompanying indexes or unique constraints for the given `model`. """ sql, params = self.table_sql(model) # Prevent using [] as params, in the case a literal '%' is used in the # definition. self.execute(sql, params or None) # Add any field index and index_together's (deferred as SQLite # _remake_table needs it). self.deferred_sql.extend(self._model_indexes_sql(model)) # Make M2M tables for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.create_model(field.remote_field.through) def delete_model(self, model): """Delete a model from the database.""" # Handle auto-created intermediary models for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # Delete the table self.execute( self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), } ) # Remove all deferred statements referencing the deleted table. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_table( model._meta.db_table ): self.deferred_sql.remove(sql) def add_index(self, model, index): """Add an index on a model.""" if ( index.contains_expressions and not self.connection.features.supports_expression_indexes ): return None # Index.create_sql returns interpolated SQL which makes params=None a # necessity to avoid escaping attempts on execution. self.execute(index.create_sql(model, self), params=None) def remove_index(self, model, index): """Remove an index from a model.""" if ( index.contains_expressions and not self.connection.features.supports_expression_indexes ): return None self.execute(index.remove_sql(model, self)) def rename_index(self, model, old_index, new_index): if self.connection.features.can_rename_index: self.execute( self._rename_index_sql(model, old_index.name, new_index.name), params=None, ) else: self.remove_index(model, old_index) self.add_index(model, new_index) def add_constraint(self, model, constraint): """Add a constraint to a model.""" sql = constraint.create_sql(model, self) if sql: # Constraint.create_sql returns interpolated SQL which makes # params=None a necessity to avoid escaping attempts on execution. self.execute(sql, params=None) def remove_constraint(self, model, constraint): """Remove a constraint from a model.""" sql = constraint.remove_sql(model, self) if sql: self.execute(sql) def alter_unique_together(self, model, old_unique_together, new_unique_together): """ Deal with a model changing its unique_together. The input unique_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_unique_together} news = {tuple(fields) for fields in new_unique_together} # Deleted uniques for fields in olds.difference(news): self._delete_composed_index( model, fields, {"unique": True, "primary_key": False}, self.sql_delete_unique, ) # Created uniques for field_names in news.difference(olds): fields = [model._meta.get_field(field) for field in field_names] self.execute(self._create_unique_sql(model, fields)) def alter_index_together(self, model, old_index_together, new_index_together): """ Deal with a model changing its index_together. The input index_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_index_together} news = {tuple(fields) for fields in new_index_together} # Deleted indexes for fields in olds.difference(news): self._delete_composed_index( model, fields, {"index": True, "unique": False}, self.sql_delete_index, ) # Created indexes for field_names in news.difference(olds): fields = [model._meta.get_field(field) for field in field_names] self.execute(self._create_index_sql(model, fields=fields, suffix="_idx")) def _delete_composed_index(self, model, fields, constraint_kwargs, sql): meta_constraint_names = { constraint.name for constraint in model._meta.constraints } meta_index_names = {constraint.name for constraint in model._meta.indexes} columns = [model._meta.get_field(field).column for field in fields] constraint_names = self._constraint_names( model, columns, exclude=meta_constraint_names | meta_index_names, **constraint_kwargs, ) if ( constraint_kwargs.get("unique") is True and constraint_names and self.connection.features.allows_multiple_constraints_on_same_fields ): # Constraint matching the unique_together name. default_name = str( self._unique_constraint_name(model._meta.db_table, columns, quote=False) ) if default_name in constraint_names: constraint_names = [default_name] if len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of constraints for %s(%s)" % ( len(constraint_names), model._meta.db_table, ", ".join(columns), ) ) self.execute(self._delete_constraint_sql(sql, model, constraint_names[0])) def alter_db_table(self, model, old_db_table, new_db_table): """Rename the table a model points to.""" if old_db_table == new_db_table or ( self.connection.features.ignores_table_name_case and old_db_table.lower() == new_db_table.lower() ): return self.execute( self.sql_rename_table % { "old_table": self.quote_name(old_db_table), "new_table": self.quote_name(new_db_table), } ) # Rename all references to the old table name. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_table_references(old_db_table, new_db_table) def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace): """Move a model's table between tablespaces.""" self.execute( self.sql_retablespace_table % { "table": self.quote_name(model._meta.db_table), "old_tablespace": self.quote_name(old_db_tablespace), "new_tablespace": self.quote_name(new_db_tablespace), } ) def add_field(self, model, field): """ Create a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields). """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.create_model(field.remote_field.through) # Get the column's definition definition, params = self.column_sql(model, field, include_default=True) # It might not actually have a column behind it if definition is None: return if col_type_suffix := field.db_type_suffix(connection=self.connection): definition += f" {col_type_suffix}" # Check constraints can go on the column SQL here db_params = field.db_parameters(connection=self.connection) if db_params["check"]: definition += " " + self.sql_check_constraint % db_params if ( field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint ): constraint_suffix = "_fk_%(to_table)s_%(to_column)s" # Add FK constraint inline, if supported. if self.sql_create_column_inline_fk: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field( field.remote_field.field_name ).column namespace, _ = split_identifier(model._meta.db_table) definition += " " + self.sql_create_column_inline_fk % { "name": self._fk_constraint_name(model, field, constraint_suffix), "namespace": "%s." % self.quote_name(namespace) if namespace else "", "column": self.quote_name(field.column), "to_table": self.quote_name(to_table), "to_column": self.quote_name(to_column), "deferrable": self.connection.ops.deferrable_sql(), } # Otherwise, add FK constraints later. else: self.deferred_sql.append( self._create_fk_sql(model, field, constraint_suffix) ) # Build the SQL and run it sql = self.sql_create_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), "definition": definition, } self.execute(sql, params) # Drop the default if we need to # (Django usually does not use in-database defaults) if ( not self.skip_default_on_alter(field) and self.effective_default(field) is not None ): changes_sql, params = self._alter_column_default_sql( model, None, field, drop=True ) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Add an index, if required self.deferred_sql.extend(self._field_indexes_sql(model, field)) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.delete_model(field.remote_field.through) # It might not actually have a column behind it if field.db_parameters(connection=self.connection)["type"] is None: return # Drop any FK constraints, MySQL requires explicit deletion if field.remote_field: fk_names = self._constraint_names(model, [field.column], foreign_key=True) for fk_name in fk_names: self.execute(self._delete_fk_sql(model, fk_name)) # Delete the column sql = self.sql_delete_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() # Remove all deferred statements referencing the deleted column. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_column( model._meta.db_table, field.column ): self.deferred_sql.remove(sql) def alter_field(self, model, old_field, new_field, strict=False): """ Allow a field's type, uniqueness, nullability, default, column, constraints, etc. to be modified. `old_field` is required to compute the necessary changes. If `strict` is True, raise errors if the old column does not match `old_field` precisely. """ if not self._field_should_be_altered(old_field, new_field): return # Ensure this field is even column-based old_db_params = old_field.db_parameters(connection=self.connection) old_type = old_db_params["type"] new_db_params = new_field.db_parameters(connection=self.connection) new_type = new_db_params["type"] if (old_type is None and old_field.remote_field is None) or ( new_type is None and new_field.remote_field is None ): raise ValueError( "Cannot alter field %s into %s - they do not properly define " "db_type (are you using a badly-written custom field?)" % (old_field, new_field), ) elif ( old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and old_field.remote_field.through._meta.auto_created and new_field.remote_field.through._meta.auto_created ) ): return self._alter_many_to_many(model, old_field, new_field, strict) elif ( old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and not old_field.remote_field.through._meta.auto_created and not new_field.remote_field.through._meta.auto_created ) ): # Both sides have through models; this is a no-op. return elif old_type is None or new_type is None: raise ValueError( "Cannot alter field %s into %s - they are not compatible types " "(you cannot alter to or from M2M fields, or add or remove " "through= on M2M fields)" % (old_field, new_field) ) self._alter_field( model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict, ) def _alter_field( self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False, ): """Perform a "physical" (non-ManyToMany) field update.""" # Drop any FK constraints, we'll remake them later fks_dropped = set() if ( self.connection.features.supports_foreign_keys and old_field.remote_field and old_field.db_constraint ): fk_names = self._constraint_names( model, [old_field.column], foreign_key=True ) if strict and len(fk_names) != 1: raise ValueError( "Found wrong number (%s) of foreign key constraints for %s.%s" % ( len(fk_names), model._meta.db_table, old_field.column, ) ) for fk_name in fk_names: fks_dropped.add((old_field.column,)) self.execute(self._delete_fk_sql(model, fk_name)) # Has unique been removed? if old_field.unique and ( not new_field.unique or self._field_became_primary_key(old_field, new_field) ): # Find the unique constraint for this field meta_constraint_names = { constraint.name for constraint in model._meta.constraints } constraint_names = self._constraint_names( model, [old_field.column], unique=True, primary_key=False, exclude=meta_constraint_names, ) if strict and len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of unique constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, ) ) for constraint_name in constraint_names: self.execute(self._delete_unique_sql(model, constraint_name)) # Drop incoming FK constraints if the field is a primary key or unique, # which might be a to_field target, and things are going to change. old_collation = old_db_params.get("collation") new_collation = new_db_params.get("collation") drop_foreign_keys = ( self.connection.features.supports_foreign_keys and ( (old_field.primary_key and new_field.primary_key) or (old_field.unique and new_field.unique) ) and ((old_type != new_type) or (old_collation != new_collation)) ) if drop_foreign_keys: # '_meta.related_field' also contains M2M reverse fields, these # will be filtered out for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field): rel_fk_names = self._constraint_names( new_rel.related_model, [new_rel.field.column], foreign_key=True ) for fk_name in rel_fk_names: self.execute(self._delete_fk_sql(new_rel.related_model, fk_name)) # Removed an index? (no strict check, as multiple indexes are possible) # Remove indexes if db_index switched to False or a unique constraint # will now be used in lieu of an index. The following lines from the # truth table show all True cases; the rest are False: # # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique # ------------------------------------------------------------------------------ # True | False | False | False # True | False | False | True # True | False | True | True if ( old_field.db_index and not old_field.unique and (not new_field.db_index or new_field.unique) ): # Find the index for this field meta_index_names = {index.name for index in model._meta.indexes} # Retrieve only BTREE indexes since this is what's created with # db_index=True. index_names = self._constraint_names( model, [old_field.column], index=True, type_=Index.suffix, exclude=meta_index_names, ) for index_name in index_names: # The only way to check if an index was created with # db_index=True or with Index(['field'], name='foo') # is to look at its name (refs #28053). self.execute(self._delete_index_sql(model, index_name)) # Change check constraints? if old_db_params["check"] != new_db_params["check"] and old_db_params["check"]: meta_constraint_names = { constraint.name for constraint in model._meta.constraints } constraint_names = self._constraint_names( model, [old_field.column], check=True, exclude=meta_constraint_names, ) if strict and len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of check constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, ) ) for constraint_name in constraint_names: self.execute(self._delete_check_sql(model, constraint_name)) # Have they renamed the column? if old_field.column != new_field.column: self.execute( self._rename_field_sql( model._meta.db_table, old_field, new_field, new_type ) ) # Rename all references to the renamed column. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_column_references( model._meta.db_table, old_field.column, new_field.column ) # Next, start accumulating actions to do actions = [] null_actions = [] post_actions = [] # Type suffix change? (e.g. auto increment). old_type_suffix = old_field.db_type_suffix(connection=self.connection) new_type_suffix = new_field.db_type_suffix(connection=self.connection) # Collation change? if old_collation != new_collation: # Collation change handles also a type change. fragment = self._alter_column_collation_sql( model, new_field, new_type, new_collation, old_field ) actions.append(fragment) # Type change? elif (old_type, old_type_suffix) != (new_type, new_type_suffix): fragment, other_actions = self._alter_column_type_sql( model, old_field, new_field, new_type ) actions.append(fragment) post_actions.extend(other_actions) # When changing a column NULL constraint to NOT NULL with a given # default value, we need to perform 4 steps: # 1. Add a default for new incoming writes # 2. Update existing NULL rows with new default # 3. Replace NULL constraint with NOT NULL # 4. Drop the default again. # Default change? needs_database_default = False if old_field.null and not new_field.null: old_default = self.effective_default(old_field) new_default = self.effective_default(new_field) if ( not self.skip_default_on_alter(new_field) and old_default != new_default and new_default is not None ): needs_database_default = True actions.append( self._alter_column_default_sql(model, old_field, new_field) ) # Nullability change? if old_field.null != new_field.null: fragment = self._alter_column_null_sql(model, old_field, new_field) if fragment: null_actions.append(fragment) # Only if we have a default and there is a change from NULL to NOT NULL four_way_default_alteration = new_field.has_default() and ( old_field.null and not new_field.null ) if actions or null_actions: if not four_way_default_alteration: # If we don't have to do a 4-way default alteration we can # directly run a (NOT) NULL alteration actions += null_actions # Combine actions together if we can (e.g. postgres) if self.connection.features.supports_combined_alters and actions: sql, params = tuple(zip(*actions)) actions = [(", ".join(sql), sum(params, []))] # Apply those actions for sql, params in actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if four_way_default_alteration: # Update existing rows with default value self.execute( self.sql_update_with_default % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(new_field.column), "default": "%s", }, [new_default], ) # Since we didn't run a NOT NULL change before we need to do it # now for sql, params in null_actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if post_actions: for sql, params in post_actions: self.execute(sql, params) # If primary_key changed to False, delete the primary key constraint. if old_field.primary_key and not new_field.primary_key: self._delete_primary_key(model, strict) # Added a unique? if self._unique_should_be_added(old_field, new_field): self.execute(self._create_unique_sql(model, [new_field])) # Added an index? Add an index if db_index switched to True or a unique # constraint will no longer be used in lieu of an index. The following # lines from the truth table show all True cases; the rest are False: # # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique # ------------------------------------------------------------------------------ # False | False | True | False # False | True | True | False # True | True | True | False if ( (not old_field.db_index or old_field.unique) and new_field.db_index and not new_field.unique ): self.execute(self._create_index_sql(model, fields=[new_field])) # Type alteration on primary key? Then we need to alter the column # referring to us. rels_to_update = [] if drop_foreign_keys: rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Changed to become primary key? if self._field_became_primary_key(old_field, new_field): # Make the new one self.execute(self._create_primary_key_sql(model, new_field)) # Update all referencing columns rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Handle our type alters on the other end of rels from the PK stuff above for old_rel, new_rel in rels_to_update: rel_db_params = new_rel.field.db_parameters(connection=self.connection) rel_type = rel_db_params["type"] rel_collation = rel_db_params.get("collation") old_rel_db_params = old_rel.field.db_parameters(connection=self.connection) old_rel_collation = old_rel_db_params.get("collation") if old_rel_collation != rel_collation: # Collation change handles also a type change. fragment = self._alter_column_collation_sql( new_rel.related_model, new_rel.field, rel_type, rel_collation, old_rel.field, ) other_actions = [] else: fragment, other_actions = self._alter_column_type_sql( new_rel.related_model, old_rel.field, new_rel.field, rel_type ) self.execute( self.sql_alter_column % { "table": self.quote_name(new_rel.related_model._meta.db_table), "changes": fragment[0], }, fragment[1], ) for sql, params in other_actions: self.execute(sql, params) # Does it have a foreign key? if ( self.connection.features.supports_foreign_keys and new_field.remote_field and ( fks_dropped or not old_field.remote_field or not old_field.db_constraint ) and new_field.db_constraint ): self.execute( self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s") ) # Rebuild FKs that pointed to us if we previously had to drop them if drop_foreign_keys: for _, rel in rels_to_update: if rel.field.db_constraint: self.execute( self._create_fk_sql(rel.related_model, rel.field, "_fk") ) # Does it have check constraints we need to add? if old_db_params["check"] != new_db_params["check"] and new_db_params["check"]: constraint_name = self._create_index_name( model._meta.db_table, [new_field.column], suffix="_check" ) self.execute( self._create_check_sql(model, constraint_name, new_db_params["check"]) ) # Drop the default if we need to # (Django usually does not use in-database defaults) if needs_database_default: changes_sql, params = self._alter_column_default_sql( model, old_field, new_field, drop=True ) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def _alter_column_null_sql(self, model, old_field, new_field): """ Hook to specialize column null alteration. Return a (sql, params) fragment to set a column to null or non-null as required by new_field, or None if no changes are required. """ if ( self.connection.features.interprets_empty_strings_as_nulls and new_field.empty_strings_allowed ): # The field is nullable in the database anyway, leave it alone. return else: new_db_params = new_field.db_parameters(connection=self.connection) sql = ( self.sql_alter_column_null if new_field.null else self.sql_alter_column_not_null ) return ( sql % { "column": self.quote_name(new_field.column), "type": new_db_params["type"], }, [], ) def _alter_column_default_sql(self, model, old_field, new_field, drop=False): """ Hook to specialize column default alteration. Return a (sql, params) fragment to add or drop (depending on the drop argument) a default to new_field's column. """ new_default = self.effective_default(new_field) default = self._column_default_sql(new_field) params = [new_default] if drop: params = [] elif self.connection.features.requires_literal_defaults: # Some databases (Oracle) can't take defaults as a parameter # If this is the case, the SchemaEditor for that database should # implement prepare_default(). default = self.prepare_default(new_default) params = [] new_db_params = new_field.db_parameters(connection=self.connection) if drop: if new_field.null: sql = self.sql_alter_column_no_default_null else: sql = self.sql_alter_column_no_default else: sql = self.sql_alter_column_default return ( sql % { "column": self.quote_name(new_field.column), "type": new_db_params["type"], "default": default, }, params, ) def _alter_column_type_sql(self, model, old_field, new_field, new_type): """ Hook to specialize column type alteration for different backends, for cases when a creation type is different to an alteration type (e.g. SERIAL in PostgreSQL, PostGIS fields). Return a two-tuple of: an SQL fragment of (sql, params) to insert into an ALTER TABLE statement and a list of extra (sql, params) tuples to run once the field is altered. """ return ( ( self.sql_alter_column_type % { "column": self.quote_name(new_field.column), "type": new_type, }, [], ), [], ) def _alter_column_collation_sql( self, model, new_field, new_type, new_collation, old_field ): return ( self.sql_alter_column_collate % { "column": self.quote_name(new_field.column), "type": new_type, "collation": " " + self._collate_sql(new_collation) if new_collation else "", }, [], ) def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" # Rename the through table if ( old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table ): self.alter_db_table( old_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table, ) # Repoint the FK to the other side self.alter_field( new_field.remote_field.through, # The field that points to the target model is needed, so we can # tell alter_field to change it - this is m2m_reverse_field_name() # (as opposed to m2m_field_name(), which points to our model). old_field.remote_field.through._meta.get_field( old_field.m2m_reverse_field_name() ), new_field.remote_field.through._meta.get_field( new_field.m2m_reverse_field_name() ), ) self.alter_field( new_field.remote_field.through, # for self-referential models we need to alter field from the other end too old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()), ) def _create_index_name(self, table_name, column_names, suffix=""): """ Generate a unique name for an index/unique constraint. The name is divided into 3 parts: the table name, the column names, and a unique digest and suffix. """ _, table_name = split_identifier(table_name) hash_suffix_part = "%s%s" % ( names_digest(table_name, *column_names, length=8), suffix, ) max_length = self.connection.ops.max_name_length() or 200 # If everything fits into max_length, use that name. index_name = "%s_%s_%s" % (table_name, "_".join(column_names), hash_suffix_part) if len(index_name) <= max_length: return index_name # Shorten a long suffix. if len(hash_suffix_part) > max_length / 3: hash_suffix_part = hash_suffix_part[: max_length // 3] other_length = (max_length - len(hash_suffix_part)) // 2 - 1 index_name = "%s_%s_%s" % ( table_name[:other_length], "_".join(column_names)[:other_length], hash_suffix_part, ) # Prepend D if needed to prevent the name from starting with an # underscore or a number (not permitted on Oracle). if index_name[0] == "_" or index_name[0].isdigit(): index_name = "D%s" % index_name[:-1] return index_name def _get_index_tablespace_sql(self, model, fields, db_tablespace=None): if db_tablespace is None: if len(fields) == 1 and fields[0].db_tablespace: db_tablespace = fields[0].db_tablespace elif settings.DEFAULT_INDEX_TABLESPACE: db_tablespace = settings.DEFAULT_INDEX_TABLESPACE elif model._meta.db_tablespace: db_tablespace = model._meta.db_tablespace if db_tablespace is not None: return " " + self.connection.ops.tablespace_sql(db_tablespace) return "" def _index_condition_sql(self, condition): if condition: return " WHERE " + condition return "" def _index_include_sql(self, model, columns): if not columns or not self.connection.features.supports_covering_indexes: return "" return Statement( " INCLUDE (%(columns)s)", columns=Columns(model._meta.db_table, columns, self.quote_name), ) def _create_index_sql( self, model, *, fields=None, name=None, suffix="", using="", db_tablespace=None, col_suffixes=(), sql=None, opclasses=(), condition=None, include=None, expressions=None, ): """ Return the SQL statement to create the index for one or several fields or expressions. `sql` can be specified if the syntax differs from the standard (GIS indexes, ...). """ fields = fields or [] expressions = expressions or [] compiler = Query(model, alias_cols=False).get_compiler( connection=self.connection, ) tablespace_sql = self._get_index_tablespace_sql( model, fields, db_tablespace=db_tablespace ) columns = [field.column for field in fields] sql_create_index = sql or self.sql_create_index table = model._meta.db_table def create_index_name(*args, **kwargs): nonlocal name if name is None: name = self._create_index_name(*args, **kwargs) return self.quote_name(name) return Statement( sql_create_index, table=Table(table, self.quote_name), name=IndexName(table, columns, suffix, create_index_name), using=using, columns=( self._index_columns(table, columns, col_suffixes, opclasses) if columns else Expressions(table, expressions, compiler, self.quote_value) ), extra=tablespace_sql, condition=self._index_condition_sql(condition), include=self._index_include_sql(model, include), ) def _delete_index_sql(self, model, name, sql=None): return Statement( sql or self.sql_delete_index, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), ) def _rename_index_sql(self, model, old_name, new_name): return Statement( self.sql_rename_index, table=Table(model._meta.db_table, self.quote_name), old_name=self.quote_name(old_name), new_name=self.quote_name(new_name), ) def _index_columns(self, table, columns, col_suffixes, opclasses): return Columns(table, columns, self.quote_name, col_suffixes=col_suffixes) def _model_indexes_sql(self, model): """ Return a list of all index SQL statements (field indexes, index_together, Meta.indexes) for the specified model. """ if not model._meta.managed or model._meta.proxy or model._meta.swapped: return [] output = [] for field in model._meta.local_fields: output.extend(self._field_indexes_sql(model, field)) for field_names in model._meta.index_together: fields = [model._meta.get_field(field) for field in field_names] output.append(self._create_index_sql(model, fields=fields, suffix="_idx")) for index in model._meta.indexes: if ( not index.contains_expressions or self.connection.features.supports_expression_indexes ): output.append(index.create_sql(model, self)) return output def _field_indexes_sql(self, model, field): """ Return a list of all index SQL statements for the specified field. """ output = [] if self._field_should_be_indexed(model, field): output.append(self._create_index_sql(model, fields=[field])) return output def _field_should_be_altered(self, old_field, new_field): _, old_path, old_args, old_kwargs = old_field.deconstruct() _, new_path, new_args, new_kwargs = new_field.deconstruct() # Don't alter when: # - changing only a field name # - changing an attribute that doesn't affect the schema # - adding only a db_column and the column name is not changed for attr in old_field.non_db_attrs: old_kwargs.pop(attr, None) for attr in new_field.non_db_attrs: new_kwargs.pop(attr, None) return self.quote_name(old_field.column) != self.quote_name( new_field.column ) or (old_path, old_args, old_kwargs) != (new_path, new_args, new_kwargs) def _field_should_be_indexed(self, model, field): return field.db_index and not field.unique def _field_became_primary_key(self, old_field, new_field): return not old_field.primary_key and new_field.primary_key def _unique_should_be_added(self, old_field, new_field): return ( not new_field.primary_key and new_field.unique and (not old_field.unique or old_field.primary_key) ) def _rename_field_sql(self, table, old_field, new_field, new_type): return self.sql_rename_column % { "table": self.quote_name(table), "old_column": self.quote_name(old_field.column), "new_column": self.quote_name(new_field.column), "type": new_type, } def _create_fk_sql(self, model, field, suffix): table = Table(model._meta.db_table, self.quote_name) name = self._fk_constraint_name(model, field, suffix) column = Columns(model._meta.db_table, [field.column], self.quote_name) to_table = Table(field.target_field.model._meta.db_table, self.quote_name) to_column = Columns( field.target_field.model._meta.db_table, [field.target_field.column], self.quote_name, ) deferrable = self.connection.ops.deferrable_sql() return Statement( self.sql_create_fk, table=table, name=name, column=column, to_table=to_table, to_column=to_column, deferrable=deferrable, ) def _fk_constraint_name(self, model, field, suffix): def create_fk_name(*args, **kwargs): return self.quote_name(self._create_index_name(*args, **kwargs)) return ForeignKeyName( model._meta.db_table, [field.column], split_identifier(field.target_field.model._meta.db_table)[1], [field.target_field.column], suffix, create_fk_name, ) def _delete_fk_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_fk, model, name) def _deferrable_constraint_sql(self, deferrable): if deferrable is None: return "" if deferrable == Deferrable.DEFERRED: return " DEFERRABLE INITIALLY DEFERRED" if deferrable == Deferrable.IMMEDIATE: return " DEFERRABLE INITIALLY IMMEDIATE" def _unique_sql( self, model, fields, name, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, ): if ( deferrable and not self.connection.features.supports_deferrable_unique_constraints ): return None if condition or include or opclasses or expressions: # Databases support conditional, covering, and functional unique # constraints via a unique index. sql = self._create_unique_sql( model, fields, name=name, condition=condition, include=include, opclasses=opclasses, expressions=expressions, ) if sql: self.deferred_sql.append(sql) return None constraint = self.sql_unique_constraint % { "columns": ", ".join([self.quote_name(field.column) for field in fields]), "deferrable": self._deferrable_constraint_sql(deferrable), } return self.sql_constraint % { "name": self.quote_name(name), "constraint": constraint, } def _create_unique_sql( self, model, fields, name=None, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, ): if ( ( deferrable and not self.connection.features.supports_deferrable_unique_constraints ) or (condition and not self.connection.features.supports_partial_indexes) or (include and not self.connection.features.supports_covering_indexes) or ( expressions and not self.connection.features.supports_expression_indexes ) ): return None compiler = Query(model, alias_cols=False).get_compiler( connection=self.connection ) table = model._meta.db_table columns = [field.column for field in fields] if name is None: name = self._unique_constraint_name(table, columns, quote=True) else: name = self.quote_name(name) if condition or include or opclasses or expressions: sql = self.sql_create_unique_index else: sql = self.sql_create_unique if columns: columns = self._index_columns( table, columns, col_suffixes=(), opclasses=opclasses ) else: columns = Expressions(table, expressions, compiler, self.quote_value) return Statement( sql, table=Table(table, self.quote_name), name=name, columns=columns, condition=self._index_condition_sql(condition), deferrable=self._deferrable_constraint_sql(deferrable), include=self._index_include_sql(model, include), ) def _unique_constraint_name(self, table, columns, quote=True): if quote: def create_unique_name(*args, **kwargs): return self.quote_name(self._create_index_name(*args, **kwargs)) else: create_unique_name = self._create_index_name return IndexName(table, columns, "_uniq", create_unique_name) def _delete_unique_sql( self, model, name, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, ): if ( ( deferrable and not self.connection.features.supports_deferrable_unique_constraints ) or (condition and not self.connection.features.supports_partial_indexes) or (include and not self.connection.features.supports_covering_indexes) or ( expressions and not self.connection.features.supports_expression_indexes ) ): return None if condition or include or opclasses or expressions: sql = self.sql_delete_index else: sql = self.sql_delete_unique return self._delete_constraint_sql(sql, model, name) def _check_sql(self, name, check): return self.sql_constraint % { "name": self.quote_name(name), "constraint": self.sql_check_constraint % {"check": check}, } def _create_check_sql(self, model, name, check): return Statement( self.sql_create_check, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), check=check, ) def _delete_check_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_check, model, name) def _delete_constraint_sql(self, template, model, name): return Statement( template, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), ) def _constraint_names( self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None, type_=None, exclude=None, ): """Return all constraint names matching the columns and conditions.""" if column_names is not None: column_names = [ self.connection.introspection.identifier_converter(name) for name in column_names ] with self.connection.cursor() as cursor: constraints = self.connection.introspection.get_constraints( cursor, model._meta.db_table ) result = [] for name, infodict in constraints.items(): if column_names is None or column_names == infodict["columns"]: if unique is not None and infodict["unique"] != unique: continue if primary_key is not None and infodict["primary_key"] != primary_key: continue if index is not None and infodict["index"] != index: continue if check is not None and infodict["check"] != check: continue if foreign_key is not None and not infodict["foreign_key"]: continue if type_ is not None and infodict["type"] != type_: continue if not exclude or name not in exclude: result.append(name) return result def _delete_primary_key(self, model, strict=False): constraint_names = self._constraint_names(model, primary_key=True) if strict and len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of PK constraints for %s" % ( len(constraint_names), model._meta.db_table, ) ) for constraint_name in constraint_names: self.execute(self._delete_primary_key_sql(model, constraint_name)) def _create_primary_key_sql(self, model, field): return Statement( self.sql_create_pk, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name( self._create_index_name( model._meta.db_table, [field.column], suffix="_pk" ) ), columns=Columns(model._meta.db_table, [field.column], self.quote_name), ) def _delete_primary_key_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_pk, model, name) def _collate_sql(self, collation): return "COLLATE " + self.quote_name(collation) def remove_procedure(self, procedure_name, param_types=()): sql = self.sql_delete_procedure % { "procedure": self.quote_name(procedure_name), "param_types": ",".join(param_types), } self.execute(sql)
c248e19b34e3f29d14bcad9733d098edb921133e0c6cd4a7f55155a721fa6446
import operator from django.db import transaction from django.db.backends.base.features import BaseDatabaseFeatures from django.db.utils import OperationalError from django.utils.functional import cached_property from .base import Database class DatabaseFeatures(BaseDatabaseFeatures): minimum_database_version = (3, 9) test_db_allows_multiple_connections = False supports_unspecified_pk = True supports_timezones = False max_query_params = 999 supports_transactions = True atomic_transactions = False can_rollback_ddl = True can_create_inline_fk = False supports_paramstyle_pyformat = False requires_literal_defaults = True can_clone_databases = True supports_temporal_subtraction = True ignores_table_name_case = True supports_cast_with_precision = False time_cast_precision = 3 can_release_savepoints = True has_case_insensitive_like = True # Is "ALTER TABLE ... RENAME COLUMN" supported? can_alter_table_rename_column = Database.sqlite_version_info >= (3, 25, 0) # Is "ALTER TABLE ... DROP COLUMN" supported? can_alter_table_drop_column = Database.sqlite_version_info >= (3, 35, 5) supports_parentheses_in_compound = False # Deferred constraint checks can be emulated on SQLite < 3.20 but not in a # reasonably performant way. supports_pragma_foreign_key_check = Database.sqlite_version_info >= (3, 20, 0) can_defer_constraint_checks = supports_pragma_foreign_key_check supports_functions_in_partial_indexes = Database.sqlite_version_info >= (3, 15, 0) supports_over_clause = Database.sqlite_version_info >= (3, 25, 0) supports_frame_range_fixed_distance = Database.sqlite_version_info >= (3, 28, 0) supports_aggregate_filter_clause = Database.sqlite_version_info >= (3, 30, 1) supports_order_by_nulls_modifier = Database.sqlite_version_info >= (3, 30, 0) # NULLS LAST/FIRST emulation on < 3.30 requires subquery wrapping. requires_compound_order_by_subquery = Database.sqlite_version_info < (3, 30) order_by_nulls_first = True supports_json_field_contains = False supports_update_conflicts = Database.sqlite_version_info >= (3, 24, 0) supports_update_conflicts_with_target = supports_update_conflicts test_collations = { "ci": "nocase", "cs": "binary", "non_default": "nocase", } django_test_expected_failures = { # The django_format_dtdelta() function doesn't properly handle mixed # Date/DateTime fields and timedeltas. "expressions.tests.FTimeDeltaTests.test_mixed_comparisons1", } create_test_table_with_composite_primary_key = """ CREATE TABLE test_table_composite_pk ( column_1 INTEGER NOT NULL, column_2 INTEGER NOT NULL, PRIMARY KEY(column_1, column_2) ) """ @cached_property def django_test_skips(self): skips = { "SQLite stores values rounded to 15 significant digits.": { "model_fields.test_decimalfield.DecimalFieldTests." "test_fetch_from_db_without_float_rounding", }, "SQLite naively remakes the table on field alteration.": { "schema.tests.SchemaTests.test_unique_no_unnecessary_fk_drops", "schema.tests.SchemaTests.test_unique_and_reverse_m2m", "schema.tests.SchemaTests." "test_alter_field_default_doesnt_perform_queries", "schema.tests.SchemaTests." "test_rename_column_renames_deferred_sql_references", }, "SQLite doesn't support negative precision for ROUND().": { "db_functions.math.test_round.RoundTests." "test_null_with_negative_precision", "db_functions.math.test_round.RoundTests." "test_decimal_with_negative_precision", "db_functions.math.test_round.RoundTests." "test_float_with_negative_precision", "db_functions.math.test_round.RoundTests." "test_integer_with_negative_precision", }, } if Database.sqlite_version_info < (3, 27): skips.update( { "Nondeterministic failure on SQLite < 3.27.": { "expressions_window.tests.WindowFunctionTests." "test_subquery_row_range_rank", }, } ) if self.connection.is_in_memory_db(): skips.update( { "the sqlite backend's close() method is a no-op when using an " "in-memory database": { "servers.test_liveserverthread.LiveServerThreadTest." "test_closes_connections", "servers.tests.LiveServerTestCloseConnectionTest." "test_closes_connections", }, } ) return skips @cached_property def supports_atomic_references_rename(self): return Database.sqlite_version_info >= (3, 26, 0) @cached_property def introspected_field_types(self): return { **super().introspected_field_types, "BigAutoField": "AutoField", "DurationField": "BigIntegerField", "GenericIPAddressField": "CharField", "SmallAutoField": "AutoField", } @cached_property def supports_json_field(self): with self.connection.cursor() as cursor: try: with transaction.atomic(self.connection.alias): cursor.execute('SELECT JSON(\'{"a": "b"}\')') except OperationalError: return False return True can_introspect_json_field = property(operator.attrgetter("supports_json_field")) has_json_object_function = property(operator.attrgetter("supports_json_field")) @cached_property def can_return_columns_from_insert(self): return Database.sqlite_version_info >= (3, 35) can_return_rows_from_bulk_insert = property( operator.attrgetter("can_return_columns_from_insert") )
bb44172979655dc093f30c0bbf5632b9c61a844f34e565cecfa71cc9eeea874b
""" SQLite backend for the sqlite3 module in the standard library. """ import datetime import decimal import warnings from itertools import chain from sqlite3 import dbapi2 as Database from django.core.exceptions import ImproperlyConfigured from django.db import IntegrityError from django.db.backends.base.base import BaseDatabaseWrapper from django.utils.asyncio import async_unsafe from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.regex_helper import _lazy_re_compile from ._functions import register as register_functions from .client import DatabaseClient from .creation import DatabaseCreation from .features import DatabaseFeatures from .introspection import DatabaseIntrospection from .operations import DatabaseOperations from .schema import DatabaseSchemaEditor def decoder(conv_func): """ Convert bytestrings from Python's sqlite3 interface to a regular string. """ return lambda s: conv_func(s.decode()) def adapt_date(val): return val.isoformat() def adapt_datetime(val): return val.isoformat(" ") Database.register_converter("bool", b"1".__eq__) Database.register_converter("date", decoder(parse_date)) Database.register_converter("time", decoder(parse_time)) Database.register_converter("datetime", decoder(parse_datetime)) Database.register_converter("timestamp", decoder(parse_datetime)) Database.register_adapter(decimal.Decimal, str) Database.register_adapter(datetime.date, adapt_date) Database.register_adapter(datetime.datetime, adapt_datetime) class DatabaseWrapper(BaseDatabaseWrapper): vendor = "sqlite" display_name = "SQLite" # SQLite doesn't actually support most of these types, but it "does the right # thing" given more verbose field definitions, so leave them as is so that # schema inspection is more useful. data_types = { "AutoField": "integer", "BigAutoField": "integer", "BinaryField": "BLOB", "BooleanField": "bool", "CharField": "varchar(%(max_length)s)", "DateField": "date", "DateTimeField": "datetime", "DecimalField": "decimal", "DurationField": "bigint", "FileField": "varchar(%(max_length)s)", "FilePathField": "varchar(%(max_length)s)", "FloatField": "real", "IntegerField": "integer", "BigIntegerField": "bigint", "IPAddressField": "char(15)", "GenericIPAddressField": "char(39)", "JSONField": "text", "OneToOneField": "integer", "PositiveBigIntegerField": "bigint unsigned", "PositiveIntegerField": "integer unsigned", "PositiveSmallIntegerField": "smallint unsigned", "SlugField": "varchar(%(max_length)s)", "SmallAutoField": "integer", "SmallIntegerField": "smallint", "TextField": "text", "TimeField": "time", "UUIDField": "char(32)", } data_type_check_constraints = { "PositiveBigIntegerField": '"%(column)s" >= 0', "JSONField": '(JSON_VALID("%(column)s") OR "%(column)s" IS NULL)', "PositiveIntegerField": '"%(column)s" >= 0', "PositiveSmallIntegerField": '"%(column)s" >= 0', } data_types_suffix = { "AutoField": "AUTOINCREMENT", "BigAutoField": "AUTOINCREMENT", "SmallAutoField": "AUTOINCREMENT", } # SQLite requires LIKE statements to include an ESCAPE clause if the value # being escaped has a percent or underscore in it. # See https://www.sqlite.org/lang_expr.html for an explanation. operators = { "exact": "= %s", "iexact": "LIKE %s ESCAPE '\\'", "contains": "LIKE %s ESCAPE '\\'", "icontains": "LIKE %s ESCAPE '\\'", "regex": "REGEXP %s", "iregex": "REGEXP '(?i)' || %s", "gt": "> %s", "gte": ">= %s", "lt": "< %s", "lte": "<= %s", "startswith": "LIKE %s ESCAPE '\\'", "endswith": "LIKE %s ESCAPE '\\'", "istartswith": "LIKE %s ESCAPE '\\'", "iendswith": "LIKE %s ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { "contains": r"LIKE '%%' || {} || '%%' ESCAPE '\'", "icontains": r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'", "startswith": r"LIKE {} || '%%' ESCAPE '\'", "istartswith": r"LIKE UPPER({}) || '%%' ESCAPE '\'", "endswith": r"LIKE '%%' || {} ESCAPE '\'", "iendswith": r"LIKE '%%' || UPPER({}) ESCAPE '\'", } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations def get_connection_params(self): settings_dict = self.settings_dict if not settings_dict["NAME"]: raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value." ) kwargs = { "database": settings_dict["NAME"], "detect_types": Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES, **settings_dict["OPTIONS"], } # Always allow the underlying SQLite connection to be shareable # between multiple threads. The safe-guarding will be handled at a # higher level by the `BaseDatabaseWrapper.allow_thread_sharing` # property. This is necessary as the shareability is disabled by # default in sqlite3 and it cannot be changed once a connection is # opened. if "check_same_thread" in kwargs and kwargs["check_same_thread"]: warnings.warn( "The `check_same_thread` option was provided and set to " "True. It will be overridden with False. Use the " "`DatabaseWrapper.allow_thread_sharing` property instead " "for controlling thread shareability.", RuntimeWarning, ) kwargs.update({"check_same_thread": False, "uri": True}) return kwargs def get_database_version(self): return self.Database.sqlite_version_info @async_unsafe def get_new_connection(self, conn_params): conn = Database.connect(**conn_params) register_functions(conn) conn.execute("PRAGMA foreign_keys = ON") # The macOS bundled SQLite defaults legacy_alter_table ON, which # prevents atomic table renames (feature supports_atomic_references_rename) conn.execute("PRAGMA legacy_alter_table = OFF") return conn def create_cursor(self, name=None): return self.connection.cursor(factory=SQLiteCursorWrapper) @async_unsafe def close(self): self.validate_thread_sharing() # If database is in memory, closing the connection destroys the # database. To prevent accidental data loss, ignore close requests on # an in-memory db. if not self.is_in_memory_db(): BaseDatabaseWrapper.close(self) def _savepoint_allowed(self): # When 'isolation_level' is not None, sqlite3 commits before each # savepoint; it's a bug. When it is None, savepoints don't make sense # because autocommit is enabled. The only exception is inside 'atomic' # blocks. To work around that bug, on SQLite, 'atomic' starts a # transaction explicitly rather than simply disable autocommit. return self.in_atomic_block def _set_autocommit(self, autocommit): if autocommit: level = None else: # sqlite3's internal default is ''. It's different from None. # See Modules/_sqlite/connection.c. level = "" # 'isolation_level' is a misleading API. # SQLite always runs at the SERIALIZABLE isolation level. with self.wrap_database_errors: self.connection.isolation_level = level def disable_constraint_checking(self): with self.cursor() as cursor: cursor.execute("PRAGMA foreign_keys = OFF") # Foreign key constraints cannot be turned off while in a multi- # statement transaction. Fetch the current state of the pragma # to determine if constraints are effectively disabled. enabled = cursor.execute("PRAGMA foreign_keys").fetchone()[0] return not bool(enabled) def enable_constraint_checking(self): with self.cursor() as cursor: cursor.execute("PRAGMA foreign_keys = ON") def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ if self.features.supports_pragma_foreign_key_check: with self.cursor() as cursor: if table_names is None: violations = cursor.execute("PRAGMA foreign_key_check").fetchall() else: violations = chain.from_iterable( cursor.execute( "PRAGMA foreign_key_check(%s)" % self.ops.quote_name(table_name) ).fetchall() for table_name in table_names ) # See https://www.sqlite.org/pragma.html#pragma_foreign_key_check for ( table_name, rowid, referenced_table_name, foreign_key_index, ) in violations: foreign_key = cursor.execute( "PRAGMA foreign_key_list(%s)" % self.ops.quote_name(table_name) ).fetchall()[foreign_key_index] column_name, referenced_column_name = foreign_key[3:5] primary_key_column_name = self.introspection.get_primary_key_column( cursor, table_name ) primary_key_value, bad_value = cursor.execute( "SELECT %s, %s FROM %s WHERE rowid = %%s" % ( self.ops.quote_name(primary_key_column_name), self.ops.quote_name(column_name), self.ops.quote_name(table_name), ), (rowid,), ).fetchone() raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, primary_key_value, table_name, column_name, bad_value, referenced_table_name, referenced_column_name, ) ) else: with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column( cursor, table_name ) if not primary_key_column_name: continue relations = self.introspection.get_relations(cursor, table_name) for column_name, ( referenced_column_name, referenced_table_name, ) in relations.items(): cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): return True def _start_transaction_under_autocommit(self): """ Start a transaction explicitly in autocommit mode. Staying in autocommit mode works around a bug of sqlite3 that breaks savepoints when autocommit is disabled. """ self.cursor().execute("BEGIN") def is_in_memory_db(self): return self.creation.is_in_memory_db(self.settings_dict["NAME"]) FORMAT_QMARK_REGEX = _lazy_re_compile(r"(?<!%)%s") class SQLiteCursorWrapper(Database.Cursor): """ Django uses "format" style placeholders, but sqlite3 uses "qmark" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". """ def execute(self, query, params=None): if params is None: return Database.Cursor.execute(self, query) query = self.convert_query(query) return Database.Cursor.execute(self, query, params) def executemany(self, query, param_list): query = self.convert_query(query) return Database.Cursor.executemany(self, query, param_list) def convert_query(self, query): return FORMAT_QMARK_REGEX.sub("?", query).replace("%%", "%")
7035bb4a44750fd3bbac1cc758df22002bd1360b3dcb5aba99cb61eef4c3a0a7
import datetime import decimal import uuid from functools import lru_cache from itertools import chain from django.conf import settings from django.core.exceptions import FieldError from django.db import DatabaseError, NotSupportedError, models from django.db.backends.base.operations import BaseDatabaseOperations from django.db.models.constants import OnConflict from django.db.models.expressions import Col from django.utils import timezone from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.functional import cached_property class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = "text" cast_data_types = { "DateField": "TEXT", "DateTimeField": "TEXT", } explain_prefix = "EXPLAIN QUERY PLAN" # List of datatypes to that cannot be extracted with JSON_EXTRACT() on # SQLite. Use JSON_TYPE() instead. jsonfield_datatype_values = frozenset(["null", "false", "true"]) def bulk_batch_size(self, fields, objs): """ SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of 999 variables per query. If there's only a single field to insert, the limit is 500 (SQLITE_MAX_COMPOUND_SELECT). """ if len(fields) == 1: return 500 elif len(fields) > 1: return self.connection.features.max_query_params // len(fields) else: return len(objs) def check_expression_support(self, expression): bad_fields = (models.DateField, models.DateTimeField, models.TimeField) bad_aggregates = (models.Sum, models.Avg, models.Variance, models.StdDev) if isinstance(expression, bad_aggregates): for expr in expression.get_source_expressions(): try: output_field = expr.output_field except (AttributeError, FieldError): # Not every subexpression has an output_field which is fine # to ignore. pass else: if isinstance(output_field, bad_fields): raise NotSupportedError( "You cannot use Sum, Avg, StdDev, and Variance " "aggregations on date/time fields in sqlite3 " "since date/time is saved as text." ) if ( isinstance(expression, models.Aggregate) and expression.distinct and len(expression.source_expressions) > 1 ): raise NotSupportedError( "SQLite doesn't support DISTINCT on aggregate functions " "accepting multiple arguments." ) def date_extract_sql(self, lookup_type, sql, params): """ Support EXTRACT with a user-defined function django_date_extract() that's registered in connect(). Use single quotes because this is a string and could otherwise cause a collision with a field name. """ return f"django_date_extract(%s, {sql})", (lookup_type.lower(), *params) def fetch_returned_insert_rows(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the list of returned data. """ return cursor.fetchall() def format_for_duration_arithmetic(self, sql): """Do nothing since formatting is handled in the custom function.""" return sql def date_trunc_sql(self, lookup_type, sql, params, tzname=None): return f"django_date_trunc(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def time_trunc_sql(self, lookup_type, sql, params, tzname=None): return f"django_time_trunc(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def _convert_tznames_to_sql(self, tzname): if tzname and settings.USE_TZ: return tzname, self.connection.timezone_name return None, None def datetime_cast_date_sql(self, sql, params, tzname): return f"django_datetime_cast_date({sql}, %s, %s)", ( *params, *self._convert_tznames_to_sql(tzname), ) def datetime_cast_time_sql(self, sql, params, tzname): return f"django_datetime_cast_time({sql}, %s, %s)", ( *params, *self._convert_tznames_to_sql(tzname), ) def datetime_extract_sql(self, lookup_type, sql, params, tzname): return f"django_datetime_extract(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def datetime_trunc_sql(self, lookup_type, sql, params, tzname): return f"django_datetime_trunc(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def time_extract_sql(self, lookup_type, sql, params): return f"django_time_extract(%s, {sql})", (lookup_type.lower(), *params) def pk_default_value(self): return "NULL" def _quote_params_for_last_executed_query(self, params): """ Only for last_executed_query! Don't use this to execute SQL queries! """ # This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the # number of parameters, default = 999) and SQLITE_MAX_COLUMN (the # number of return values, default = 2000). Since Python's sqlite3 # module doesn't expose the get_limit() C API, assume the default # limits are in effect and split the work in batches if needed. BATCH_SIZE = 999 if len(params) > BATCH_SIZE: results = () for index in range(0, len(params), BATCH_SIZE): chunk = params[index : index + BATCH_SIZE] results += self._quote_params_for_last_executed_query(chunk) return results sql = "SELECT " + ", ".join(["QUOTE(?)"] * len(params)) # Bypass Django's wrappers and use the underlying sqlite3 connection # to avoid logging this query - it would trigger infinite recursion. cursor = self.connection.connection.cursor() # Native sqlite3 cursors cannot be used as context managers. try: return cursor.execute(sql, params).fetchone() finally: cursor.close() def last_executed_query(self, cursor, sql, params): # Python substitutes parameters in Modules/_sqlite/cursor.c with: # bind_parameters(state, self->statement, parameters); # Unfortunately there is no way to reach self->statement from Python, # so we quote and substitute parameters manually. if params: if isinstance(params, (list, tuple)): params = self._quote_params_for_last_executed_query(params) else: values = tuple(params.values()) values = self._quote_params_for_last_executed_query(values) params = dict(zip(params, values)) return sql % params # For consistency with SQLiteCursorWrapper.execute(), just return sql # when there are no parameters. See #13648 and #17158. else: return sql def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def no_limit_value(self): return -1 def __references_graph(self, table_name): query = """ WITH tables AS ( SELECT %s name UNION SELECT sqlite_master.name FROM sqlite_master JOIN tables ON (sql REGEXP %s || tables.name || %s) ) SELECT name FROM tables; """ params = ( table_name, r'(?i)\s+references\s+("|\')?', r'("|\')?\s*\(', ) with self.connection.cursor() as cursor: results = cursor.execute(query, params) return [row[0] for row in results.fetchall()] @cached_property def _references_graph(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__references_graph) def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): if tables and allow_cascade: # Simulate TRUNCATE CASCADE by recursively collecting the tables # referencing the tables to be flushed. tables = set( chain.from_iterable(self._references_graph(table) for table in tables) ) sql = [ "%s %s %s;" % ( style.SQL_KEYWORD("DELETE"), style.SQL_KEYWORD("FROM"), style.SQL_FIELD(self.quote_name(table)), ) for table in tables ] if reset_sequences: sequences = [{"table": table} for table in tables] sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql def sequence_reset_by_name_sql(self, style, sequences): if not sequences: return [] return [ "%s %s %s %s = 0 %s %s %s (%s);" % ( style.SQL_KEYWORD("UPDATE"), style.SQL_TABLE(self.quote_name("sqlite_sequence")), style.SQL_KEYWORD("SET"), style.SQL_FIELD(self.quote_name("seq")), style.SQL_KEYWORD("WHERE"), style.SQL_FIELD(self.quote_name("name")), style.SQL_KEYWORD("IN"), ", ".join( ["'%s'" % sequence_info["table"] for sequence_info in sequences] ), ), ] def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, "resolve_expression"): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError( "SQLite backend does not support timezone-aware datetimes when " "USE_TZ is False." ) return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, "resolve_expression"): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): raise ValueError("SQLite backend does not support timezone-aware times.") return str(value) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == "DateTimeField": converters.append(self.convert_datetimefield_value) elif internal_type == "DateField": converters.append(self.convert_datefield_value) elif internal_type == "TimeField": converters.append(self.convert_timefield_value) elif internal_type == "DecimalField": converters.append(self.get_decimalfield_converter(expression)) elif internal_type == "UUIDField": converters.append(self.convert_uuidfield_value) elif internal_type == "BooleanField": converters.append(self.convert_booleanfield_value) return converters def convert_datetimefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.datetime): value = parse_datetime(value) if settings.USE_TZ and not timezone.is_aware(value): value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.date): value = parse_date(value) return value def convert_timefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.time): value = parse_time(value) return value def get_decimalfield_converter(self, expression): # SQLite stores only 15 significant digits. Digits coming from # float inaccuracy must be removed. create_decimal = decimal.Context(prec=15).create_decimal_from_float if isinstance(expression, Col): quantize_value = decimal.Decimal(1).scaleb( -expression.output_field.decimal_places ) def converter(value, expression, connection): if value is not None: return create_decimal(value).quantize( quantize_value, context=expression.output_field.context ) else: def converter(value, expression, connection): if value is not None: return create_decimal(value) return converter def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def convert_booleanfield_value(self, value, expression, connection): return bool(value) if value in (1, 0) else value def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join(f"({sql})" for sql in placeholder_rows_sql) return f"VALUES {values_sql}" def combine_expression(self, connector, sub_expressions): # SQLite doesn't have a ^ operator, so use the user-defined POWER # function that's registered in connect(). if connector == "^": return "POWER(%s)" % ",".join(sub_expressions) elif connector == "#": return "BITXOR(%s)" % ",".join(sub_expressions) return super().combine_expression(connector, sub_expressions) def combine_duration_expression(self, connector, sub_expressions): if connector not in ["+", "-", "*", "/"]: raise DatabaseError("Invalid connector for timedelta: %s." % connector) fn_params = ["'%s'" % connector] + sub_expressions if len(fn_params) > 3: raise ValueError("Too many params for timedelta operations.") return "django_format_dtdelta(%s)" % ", ".join(fn_params) def integer_field_range(self, internal_type): # SQLite doesn't enforce any integer constraints return (None, None) def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) if internal_type == "TimeField": return "django_time_diff(%s, %s)" % (lhs_sql, rhs_sql), params return "django_timestamp_diff(%s, %s)" % (lhs_sql, rhs_sql), params def insert_statement(self, on_conflict=None): if on_conflict == OnConflict.IGNORE: return "INSERT OR IGNORE INTO" return super().insert_statement(on_conflict=on_conflict) def return_insert_columns(self, fields): # SQLite < 3.35 doesn't support an INSERT...RETURNING statement. if not fields: return "", () columns = [ "%s.%s" % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), ) for field in fields ] return "RETURNING %s" % ", ".join(columns), () def on_conflict_suffix_sql(self, fields, on_conflict, update_fields, unique_fields): if ( on_conflict == OnConflict.UPDATE and self.connection.features.supports_update_conflicts_with_target ): return "ON CONFLICT(%s) DO UPDATE SET %s" % ( ", ".join(map(self.quote_name, unique_fields)), ", ".join( [ f"{field} = EXCLUDED.{field}" for field in map(self.quote_name, update_fields) ] ), ) return super().on_conflict_suffix_sql( fields, on_conflict, update_fields, unique_fields, )
f20e0d557e5a6012ce3fced49d3c4732235c28b0b9f15b89b320f3bacef59035
import copy from decimal import Decimal from django.apps.registry import Apps from django.db import NotSupportedError from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.backends.ddl_references import Statement from django.db.backends.utils import strip_quotes from django.db.models import UniqueConstraint from django.db.transaction import atomic class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_delete_table = "DROP TABLE %(table)s" sql_create_fk = None sql_create_inline_fk = ( "REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED" ) sql_create_column_inline_fk = sql_create_inline_fk sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)" sql_delete_unique = "DROP INDEX %(name)s" def __enter__(self): # Some SQLite schema alterations need foreign key constraints to be # disabled. Enforce it here for the duration of the schema edition. if not self.connection.disable_constraint_checking(): raise NotSupportedError( "SQLite schema editor cannot be used while foreign key " "constraint checks are enabled. Make sure to disable them " "before entering a transaction.atomic() context because " "SQLite does not support disabling them in the middle of " "a multi-statement transaction." ) return super().__enter__() def __exit__(self, exc_type, exc_value, traceback): self.connection.check_constraints() super().__exit__(exc_type, exc_value, traceback) self.connection.enable_constraint_checking() def quote_value(self, value): # The backend "mostly works" without this function and there are use # cases for compiling Python without the sqlite3 libraries (e.g. # security hardening). try: import sqlite3 value = sqlite3.adapt(value) except ImportError: pass except sqlite3.ProgrammingError: pass # Manual emulation of SQLite parameter quoting if isinstance(value, bool): return str(int(value)) elif isinstance(value, (Decimal, float, int)): return str(value) elif isinstance(value, str): return "'%s'" % value.replace("'", "''") elif value is None: return "NULL" elif isinstance(value, (bytes, bytearray, memoryview)): # Bytes are only allowed for BLOB fields, encoded as string # literals containing hexadecimal data and preceded by a single "X" # character. return "X'%s'" % value.hex() else: raise ValueError( "Cannot quote parameter value %r of type %s" % (value, type(value)) ) def prepare_default(self, value): return self.quote_value(value) def _is_referenced_by_fk_constraint( self, table_name, column_name=None, ignore_self=False ): """ Return whether or not the provided table name is referenced by another one. If `column_name` is specified, only references pointing to that column are considered. If `ignore_self` is True, self-referential constraints are ignored. """ with self.connection.cursor() as cursor: for other_table in self.connection.introspection.get_table_list(cursor): if ignore_self and other_table.name == table_name: continue relations = self.connection.introspection.get_relations( cursor, other_table.name ) for constraint_column, constraint_table in relations.values(): if constraint_table == table_name and ( column_name is None or constraint_column == column_name ): return True return False def alter_db_table( self, model, old_db_table, new_db_table, disable_constraints=True ): if ( not self.connection.features.supports_atomic_references_rename and disable_constraints and self._is_referenced_by_fk_constraint(old_db_table) ): if self.connection.in_atomic_block: raise NotSupportedError( ( "Renaming the %r table while in a transaction is not " "supported on SQLite < 3.26 because it would break referential " "integrity. Try adding `atomic = False` to the Migration class." ) % old_db_table ) self.connection.enable_constraint_checking() super().alter_db_table(model, old_db_table, new_db_table) self.connection.disable_constraint_checking() else: super().alter_db_table(model, old_db_table, new_db_table) def alter_field(self, model, old_field, new_field, strict=False): if not self._field_should_be_altered(old_field, new_field): return old_field_name = old_field.name table_name = model._meta.db_table _, old_column_name = old_field.get_attname_column() if ( new_field.name != old_field_name and not self.connection.features.supports_atomic_references_rename and self._is_referenced_by_fk_constraint( table_name, old_column_name, ignore_self=True ) ): if self.connection.in_atomic_block: raise NotSupportedError( ( "Renaming the %r.%r column while in a transaction is not " "supported on SQLite < 3.26 because it would break referential " "integrity. Try adding `atomic = False` to the Migration class." ) % (model._meta.db_table, old_field_name) ) with atomic(self.connection.alias): super().alter_field(model, old_field, new_field, strict=strict) # Follow SQLite's documented procedure for performing changes # that don't affect the on-disk content. # https://sqlite.org/lang_altertable.html#otheralter with self.connection.cursor() as cursor: schema_version = cursor.execute("PRAGMA schema_version").fetchone()[ 0 ] cursor.execute("PRAGMA writable_schema = 1") references_template = ' REFERENCES "%s" ("%%s") ' % table_name new_column_name = new_field.get_attname_column()[1] search = references_template % old_column_name replacement = references_template % new_column_name cursor.execute( "UPDATE sqlite_master SET sql = replace(sql, %s, %s)", (search, replacement), ) cursor.execute("PRAGMA schema_version = %d" % (schema_version + 1)) cursor.execute("PRAGMA writable_schema = 0") # The integrity check will raise an exception and rollback # the transaction if the sqlite_master updates corrupt the # database. cursor.execute("PRAGMA integrity_check") # Perform a VACUUM to refresh the database representation from # the sqlite_master table. with self.connection.cursor() as cursor: cursor.execute("VACUUM") else: super().alter_field(model, old_field, new_field, strict=strict) def _remake_table( self, model, create_field=None, delete_field=None, alter_field=None ): """ Shortcut to transform a model from old_model into new_model This follows the correct procedure to perform non-rename or column addition operations based on SQLite's documentation https://www.sqlite.org/lang_altertable.html#caution The essential steps are: 1. Create a table with the updated definition called "new__app_model" 2. Copy the data from the existing "app_model" table to the new table 3. Drop the "app_model" table 4. Rename the "new__app_model" table to "app_model" 5. Restore any index of the previous "app_model" table. """ # Self-referential fields must be recreated rather than copied from # the old model to ensure their remote_field.field_name doesn't refer # to an altered field. def is_self_referential(f): return f.is_relation and f.remote_field.model is model # Work out the new fields dict / mapping body = { f.name: f.clone() if is_self_referential(f) else f for f in model._meta.local_concrete_fields } # Since mapping might mix column names and default values, # its values must be already quoted. mapping = { f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields } # This maps field names (not columns) for things like unique_together rename_mapping = {} # If any of the new or altered fields is introducing a new PK, # remove the old one restore_pk_field = None if getattr(create_field, "primary_key", False) or ( alter_field and getattr(alter_field[1], "primary_key", False) ): for name, field in list(body.items()): if field.primary_key and not ( # Do not remove the old primary key when an altered field # that introduces a primary key is the same field. alter_field and name == alter_field[1].name ): field.primary_key = False restore_pk_field = field if field.auto_created: del body[name] del mapping[field.column] # Add in any created fields if create_field: body[create_field.name] = create_field # Choose a default and insert it into the copy map if not create_field.many_to_many and create_field.concrete: mapping[create_field.column] = self.prepare_default( self.effective_default(create_field), ) # Add in any altered fields if alter_field: old_field, new_field = alter_field body.pop(old_field.name, None) mapping.pop(old_field.column, None) body[new_field.name] = new_field if old_field.null and not new_field.null: case_sql = "coalesce(%(col)s, %(default)s)" % { "col": self.quote_name(old_field.column), "default": self.prepare_default(self.effective_default(new_field)), } mapping[new_field.column] = case_sql else: mapping[new_field.column] = self.quote_name(old_field.column) rename_mapping[old_field.name] = new_field.name # Remove any deleted fields if delete_field: del body[delete_field.name] del mapping[delete_field.column] # Remove any implicit M2M tables if ( delete_field.many_to_many and delete_field.remote_field.through._meta.auto_created ): return self.delete_model(delete_field.remote_field.through) # Work inside a new app registry apps = Apps() # Work out the new value of unique_together, taking renames into # account unique_together = [ [rename_mapping.get(n, n) for n in unique] for unique in model._meta.unique_together ] # Work out the new value for index_together, taking renames into # account index_together = [ [rename_mapping.get(n, n) for n in index] for index in model._meta.index_together ] indexes = model._meta.indexes if delete_field: indexes = [ index for index in indexes if delete_field.name not in index.fields ] constraints = list(model._meta.constraints) # Provide isolated instances of the fields to the new model body so # that the existing model's internals aren't interfered with when # the dummy model is constructed. body_copy = copy.deepcopy(body) # Construct a new model with the new fields to allow self referential # primary key to resolve to. This model won't ever be materialized as a # table and solely exists for foreign key reference resolution purposes. # This wouldn't be required if the schema editor was operating on model # states instead of rendered models. meta_contents = { "app_label": model._meta.app_label, "db_table": model._meta.db_table, "unique_together": unique_together, "index_together": index_together, "indexes": indexes, "constraints": constraints, "apps": apps, } meta = type("Meta", (), meta_contents) body_copy["Meta"] = meta body_copy["__module__"] = model.__module__ type(model._meta.object_name, model.__bases__, body_copy) # Construct a model with a renamed table name. body_copy = copy.deepcopy(body) meta_contents = { "app_label": model._meta.app_label, "db_table": "new__%s" % strip_quotes(model._meta.db_table), "unique_together": unique_together, "index_together": index_together, "indexes": indexes, "constraints": constraints, "apps": apps, } meta = type("Meta", (), meta_contents) body_copy["Meta"] = meta body_copy["__module__"] = model.__module__ new_model = type("New%s" % model._meta.object_name, model.__bases__, body_copy) # Create a new table with the updated schema. self.create_model(new_model) # Copy data from the old table into the new table self.execute( "INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_model._meta.db_table), ", ".join(self.quote_name(x) for x in mapping), ", ".join(mapping.values()), self.quote_name(model._meta.db_table), ) ) # Delete the old table to make way for the new self.delete_model(model, handle_autom2m=False) # Rename the new table to take way for the old self.alter_db_table( new_model, new_model._meta.db_table, model._meta.db_table, disable_constraints=False, ) # Run deferred SQL on correct table for sql in self.deferred_sql: self.execute(sql) self.deferred_sql = [] # Fix any PK-removed field if restore_pk_field: restore_pk_field.primary_key = True def delete_model(self, model, handle_autom2m=True): if handle_autom2m: super().delete_model(model) else: # Delete the table (and only that) self.execute( self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), } ) # Remove all deferred statements referencing the deleted table. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_table( model._meta.db_table ): self.deferred_sql.remove(sql) def add_field(self, model, field): """Create a field on a model.""" # Special-case implicit M2M tables. if field.many_to_many and field.remote_field.through._meta.auto_created: self.create_model(field.remote_field.through) elif ( # Primary keys and unique fields are not supported in ALTER TABLE # ADD COLUMN. field.primary_key or field.unique or # Fields with default values cannot by handled by ALTER TABLE ADD # COLUMN statement because DROP DEFAULT is not supported in # ALTER TABLE. not field.null or self.effective_default(field) is not None ): self._remake_table(model, create_field=field) else: super().add_field(model, field) def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # M2M fields are a special case if field.many_to_many: # For implicit M2M tables, delete the auto-created table if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # For explicit "through" M2M fields, do nothing elif ( self.connection.features.can_alter_table_drop_column # Primary keys, unique fields, indexed fields, and foreign keys are # not supported in ALTER TABLE DROP COLUMN. and not field.primary_key and not field.unique and not field.db_index and not (field.remote_field and field.db_constraint) ): super().remove_field(model, field) # For everything else, remake. else: # It might not actually have a column behind it if field.db_parameters(connection=self.connection)["type"] is None: return self._remake_table(model, delete_field=field) def _alter_field( self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False, ): """Perform a "physical" (non-ManyToMany) field update.""" # Use "ALTER TABLE ... RENAME COLUMN" if only the column name # changed and there aren't any constraints. if ( self.connection.features.can_alter_table_rename_column and old_field.column != new_field.column and self.column_sql(model, old_field) == self.column_sql(model, new_field) and not ( old_field.remote_field and old_field.db_constraint or new_field.remote_field and new_field.db_constraint ) ): return self.execute( self._rename_field_sql( model._meta.db_table, old_field, new_field, new_type ) ) # Alter by remaking table self._remake_table(model, alter_field=(old_field, new_field)) # Rebuild tables with FKs pointing to this field. old_collation = old_db_params.get("collation") new_collation = new_db_params.get("collation") if new_field.unique and ( old_type != new_type or old_collation != new_collation ): related_models = set() opts = new_field.model._meta for remote_field in opts.related_objects: # Ignore self-relationship since the table was already rebuilt. if remote_field.related_model == model: continue if not remote_field.many_to_many: if remote_field.field_name == new_field.name: related_models.add(remote_field.related_model) elif new_field.primary_key and remote_field.through._meta.auto_created: related_models.add(remote_field.through) if new_field.primary_key: for many_to_many in opts.many_to_many: # Ignore self-relationship since the table was already rebuilt. if many_to_many.related_model == model: continue if many_to_many.remote_field.through._meta.auto_created: related_models.add(many_to_many.remote_field.through) for related_model in related_models: self._remake_table(related_model) def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" if ( old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table ): # The field name didn't change, but some options did, so we have to # propagate this altering. self._remake_table( old_field.remote_field.through, alter_field=( # The field that points to the target model is needed, so # we can tell alter_field to change it - this is # m2m_reverse_field_name() (as opposed to m2m_field_name(), # which points to our model). old_field.remote_field.through._meta.get_field( old_field.m2m_reverse_field_name() ), new_field.remote_field.through._meta.get_field( new_field.m2m_reverse_field_name() ), ), ) return # Make a new through table self.create_model(new_field.remote_field.through) # Copy the data across self.execute( "INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_field.remote_field.through._meta.db_table), ", ".join( [ "id", new_field.m2m_column_name(), new_field.m2m_reverse_name(), ] ), ", ".join( [ "id", old_field.m2m_column_name(), old_field.m2m_reverse_name(), ] ), self.quote_name(old_field.remote_field.through._meta.db_table), ) ) # Delete the old through table self.delete_model(old_field.remote_field.through) def add_constraint(self, model, constraint): if isinstance(constraint, UniqueConstraint) and ( constraint.condition or constraint.contains_expressions or constraint.include or constraint.deferrable ): super().add_constraint(model, constraint) else: self._remake_table(model) def remove_constraint(self, model, constraint): if isinstance(constraint, UniqueConstraint) and ( constraint.condition or constraint.contains_expressions or constraint.include or constraint.deferrable ): super().remove_constraint(model, constraint) else: self._remake_table(model) def _collate_sql(self, collation): return "COLLATE " + collation
907185aae96f128532cc36fa8e8f7e57fe056d1cd80f24e1935fdb0b85820634
""" Implementations of SQL functions for SQLite. """ import functools import random import statistics from datetime import timedelta from hashlib import sha1, sha224, sha256, sha384, sha512 from math import ( acos, asin, atan, atan2, ceil, cos, degrees, exp, floor, fmod, log, pi, radians, sin, sqrt, tan, ) from re import search as re_search from django.db.backends.base.base import timezone_constructor from django.db.backends.utils import ( split_tzname_delta, typecast_time, typecast_timestamp, ) from django.utils import timezone from django.utils.crypto import md5 from django.utils.duration import duration_microseconds def register(connection): create_deterministic_function = functools.partial( connection.create_function, deterministic=True, ) create_deterministic_function("django_date_extract", 2, _sqlite_datetime_extract) create_deterministic_function("django_date_trunc", 4, _sqlite_date_trunc) create_deterministic_function( "django_datetime_cast_date", 3, _sqlite_datetime_cast_date ) create_deterministic_function( "django_datetime_cast_time", 3, _sqlite_datetime_cast_time ) create_deterministic_function( "django_datetime_extract", 4, _sqlite_datetime_extract ) create_deterministic_function("django_datetime_trunc", 4, _sqlite_datetime_trunc) create_deterministic_function("django_time_extract", 2, _sqlite_time_extract) create_deterministic_function("django_time_trunc", 4, _sqlite_time_trunc) create_deterministic_function("django_time_diff", 2, _sqlite_time_diff) create_deterministic_function("django_timestamp_diff", 2, _sqlite_timestamp_diff) create_deterministic_function("django_format_dtdelta", 3, _sqlite_format_dtdelta) create_deterministic_function("regexp", 2, _sqlite_regexp) create_deterministic_function("BITXOR", 2, _sqlite_bitxor) create_deterministic_function("COT", 1, _sqlite_cot) create_deterministic_function("LPAD", 3, _sqlite_lpad) create_deterministic_function("MD5", 1, _sqlite_md5) create_deterministic_function("REPEAT", 2, _sqlite_repeat) create_deterministic_function("REVERSE", 1, _sqlite_reverse) create_deterministic_function("RPAD", 3, _sqlite_rpad) create_deterministic_function("SHA1", 1, _sqlite_sha1) create_deterministic_function("SHA224", 1, _sqlite_sha224) create_deterministic_function("SHA256", 1, _sqlite_sha256) create_deterministic_function("SHA384", 1, _sqlite_sha384) create_deterministic_function("SHA512", 1, _sqlite_sha512) create_deterministic_function("SIGN", 1, _sqlite_sign) # Don't use the built-in RANDOM() function because it returns a value # in the range [-1 * 2^63, 2^63 - 1] instead of [0, 1). connection.create_function("RAND", 0, random.random) connection.create_aggregate("STDDEV_POP", 1, StdDevPop) connection.create_aggregate("STDDEV_SAMP", 1, StdDevSamp) connection.create_aggregate("VAR_POP", 1, VarPop) connection.create_aggregate("VAR_SAMP", 1, VarSamp) # Some math functions are enabled by default in SQLite 3.35+. sql = "select sqlite_compileoption_used('ENABLE_MATH_FUNCTIONS')" if not connection.execute(sql).fetchone()[0]: create_deterministic_function("ACOS", 1, _sqlite_acos) create_deterministic_function("ASIN", 1, _sqlite_asin) create_deterministic_function("ATAN", 1, _sqlite_atan) create_deterministic_function("ATAN2", 2, _sqlite_atan2) create_deterministic_function("CEILING", 1, _sqlite_ceiling) create_deterministic_function("COS", 1, _sqlite_cos) create_deterministic_function("DEGREES", 1, _sqlite_degrees) create_deterministic_function("EXP", 1, _sqlite_exp) create_deterministic_function("FLOOR", 1, _sqlite_floor) create_deterministic_function("LN", 1, _sqlite_ln) create_deterministic_function("LOG", 2, _sqlite_log) create_deterministic_function("MOD", 2, _sqlite_mod) create_deterministic_function("PI", 0, _sqlite_pi) create_deterministic_function("POWER", 2, _sqlite_power) create_deterministic_function("RADIANS", 1, _sqlite_radians) create_deterministic_function("SIN", 1, _sqlite_sin) create_deterministic_function("SQRT", 1, _sqlite_sqrt) create_deterministic_function("TAN", 1, _sqlite_tan) def _sqlite_datetime_parse(dt, tzname=None, conn_tzname=None): if dt is None: return None try: dt = typecast_timestamp(dt) except (TypeError, ValueError): return None if conn_tzname: dt = dt.replace(tzinfo=timezone_constructor(conn_tzname)) if tzname is not None and tzname != conn_tzname: tzname, sign, offset = split_tzname_delta(tzname) if offset: hours, minutes = offset.split(":") offset_delta = timedelta(hours=int(hours), minutes=int(minutes)) dt += offset_delta if sign == "+" else -offset_delta dt = timezone.localtime(dt, timezone_constructor(tzname)) return dt def _sqlite_date_trunc(lookup_type, dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None if lookup_type == "year": return f"{dt.year:04d}-01-01" elif lookup_type == "quarter": month_in_quarter = dt.month - (dt.month - 1) % 3 return f"{dt.year:04d}-{month_in_quarter:02d}-01" elif lookup_type == "month": return f"{dt.year:04d}-{dt.month:02d}-01" elif lookup_type == "week": dt -= timedelta(days=dt.weekday()) return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d}" elif lookup_type == "day": return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d}" raise ValueError(f"Unsupported lookup type: {lookup_type!r}") def _sqlite_time_trunc(lookup_type, dt, tzname, conn_tzname): if dt is None: return None dt_parsed = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt_parsed is None: try: dt = typecast_time(dt) except (ValueError, TypeError): return None else: dt = dt_parsed if lookup_type == "hour": return f"{dt.hour:02d}:00:00" elif lookup_type == "minute": return f"{dt.hour:02d}:{dt.minute:02d}:00" elif lookup_type == "second": return f"{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}" raise ValueError(f"Unsupported lookup type: {lookup_type!r}") def _sqlite_datetime_cast_date(dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None return dt.date().isoformat() def _sqlite_datetime_cast_time(dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None return dt.time().isoformat() def _sqlite_datetime_extract(lookup_type, dt, tzname=None, conn_tzname=None): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None if lookup_type == "week_day": return (dt.isoweekday() % 7) + 1 elif lookup_type == "iso_week_day": return dt.isoweekday() elif lookup_type == "week": return dt.isocalendar()[1] elif lookup_type == "quarter": return ceil(dt.month / 3) elif lookup_type == "iso_year": return dt.isocalendar()[0] else: return getattr(dt, lookup_type) def _sqlite_datetime_trunc(lookup_type, dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None if lookup_type == "year": return f"{dt.year:04d}-01-01 00:00:00" elif lookup_type == "quarter": month_in_quarter = dt.month - (dt.month - 1) % 3 return f"{dt.year:04d}-{month_in_quarter:02d}-01 00:00:00" elif lookup_type == "month": return f"{dt.year:04d}-{dt.month:02d}-01 00:00:00" elif lookup_type == "week": dt -= timedelta(days=dt.weekday()) return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} 00:00:00" elif lookup_type == "day": return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} 00:00:00" elif lookup_type == "hour": return f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} {dt.hour:02d}:00:00" elif lookup_type == "minute": return ( f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} " f"{dt.hour:02d}:{dt.minute:02d}:00" ) elif lookup_type == "second": return ( f"{dt.year:04d}-{dt.month:02d}-{dt.day:02d} " f"{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}" ) raise ValueError(f"Unsupported lookup type: {lookup_type!r}") def _sqlite_time_extract(lookup_type, dt): if dt is None: return None try: dt = typecast_time(dt) except (ValueError, TypeError): return None return getattr(dt, lookup_type) def _sqlite_prepare_dtdelta_param(conn, param): if conn in ["+", "-"]: if isinstance(param, int): return timedelta(0, 0, param) else: return typecast_timestamp(param) return param def _sqlite_format_dtdelta(connector, lhs, rhs): """ LHS and RHS can be either: - An integer number of microseconds - A string representing a datetime - A scalar value, e.g. float """ if connector is None or lhs is None or rhs is None: return None connector = connector.strip() try: real_lhs = _sqlite_prepare_dtdelta_param(connector, lhs) real_rhs = _sqlite_prepare_dtdelta_param(connector, rhs) except (ValueError, TypeError): return None if connector == "+": # typecast_timestamp() returns a date or a datetime without timezone. # It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]" out = str(real_lhs + real_rhs) elif connector == "-": out = str(real_lhs - real_rhs) elif connector == "*": out = real_lhs * real_rhs else: out = real_lhs / real_rhs return out def _sqlite_time_diff(lhs, rhs): if lhs is None or rhs is None: return None left = typecast_time(lhs) right = typecast_time(rhs) return ( (left.hour * 60 * 60 * 1000000) + (left.minute * 60 * 1000000) + (left.second * 1000000) + (left.microsecond) - (right.hour * 60 * 60 * 1000000) - (right.minute * 60 * 1000000) - (right.second * 1000000) - (right.microsecond) ) def _sqlite_timestamp_diff(lhs, rhs): if lhs is None or rhs is None: return None left = typecast_timestamp(lhs) right = typecast_timestamp(rhs) return duration_microseconds(left - right) def _sqlite_regexp(pattern, string): if pattern is None or string is None: return None if not isinstance(string, str): string = str(string) return bool(re_search(pattern, string)) def _sqlite_acos(x): if x is None: return None return acos(x) def _sqlite_asin(x): if x is None: return None return asin(x) def _sqlite_atan(x): if x is None: return None return atan(x) def _sqlite_atan2(y, x): if y is None or x is None: return None return atan2(y, x) def _sqlite_bitxor(x, y): if x is None or y is None: return None return x ^ y def _sqlite_ceiling(x): if x is None: return None return ceil(x) def _sqlite_cos(x): if x is None: return None return cos(x) def _sqlite_cot(x): if x is None: return None return 1 / tan(x) def _sqlite_degrees(x): if x is None: return None return degrees(x) def _sqlite_exp(x): if x is None: return None return exp(x) def _sqlite_floor(x): if x is None: return None return floor(x) def _sqlite_ln(x): if x is None: return None return log(x) def _sqlite_log(base, x): if base is None or x is None: return None # Arguments reversed to match SQL standard. return log(x, base) def _sqlite_lpad(text, length, fill_text): if text is None or length is None or fill_text is None: return None delta = length - len(text) if delta <= 0: return text[:length] return (fill_text * length)[:delta] + text def _sqlite_md5(text): if text is None: return None return md5(text.encode()).hexdigest() def _sqlite_mod(x, y): if x is None or y is None: return None return fmod(x, y) def _sqlite_pi(): return pi def _sqlite_power(x, y): if x is None or y is None: return None return x**y def _sqlite_radians(x): if x is None: return None return radians(x) def _sqlite_repeat(text, count): if text is None or count is None: return None return text * count def _sqlite_reverse(text): if text is None: return None return text[::-1] def _sqlite_rpad(text, length, fill_text): if text is None or length is None or fill_text is None: return None return (text + fill_text * length)[:length] def _sqlite_sha1(text): if text is None: return None return sha1(text.encode()).hexdigest() def _sqlite_sha224(text): if text is None: return None return sha224(text.encode()).hexdigest() def _sqlite_sha256(text): if text is None: return None return sha256(text.encode()).hexdigest() def _sqlite_sha384(text): if text is None: return None return sha384(text.encode()).hexdigest() def _sqlite_sha512(text): if text is None: return None return sha512(text.encode()).hexdigest() def _sqlite_sign(x): if x is None: return None return (x > 0) - (x < 0) def _sqlite_sin(x): if x is None: return None return sin(x) def _sqlite_sqrt(x): if x is None: return None return sqrt(x) def _sqlite_tan(x): if x is None: return None return tan(x) class ListAggregate(list): step = list.append class StdDevPop(ListAggregate): finalize = statistics.pstdev class StdDevSamp(ListAggregate): finalize = statistics.stdev class VarPop(ListAggregate): finalize = statistics.pvariance class VarSamp(ListAggregate): finalize = statistics.variance
86a1040419de9a97119e6cae76db9c96e671c8744129f0df245a44e090bb8641
import unicodedata from django import forms from django.contrib.auth import authenticate, get_user_model, password_validation from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX, identify_hasher from django.contrib.auth.models import User from django.contrib.auth.tokens import default_token_generator from django.contrib.sites.shortcuts import get_current_site from django.core.exceptions import ValidationError from django.core.mail import EmailMultiAlternatives from django.template import loader from django.utils.encoding import force_bytes from django.utils.http import urlsafe_base64_encode from django.utils.text import capfirst from django.utils.translation import gettext from django.utils.translation import gettext_lazy as _ UserModel = get_user_model() def _unicode_ci_compare(s1, s2): """ Perform case-insensitive comparison of two identifiers, using the recommended algorithm from Unicode Technical Report 36, section 2.11.2(B)(2). """ return ( unicodedata.normalize("NFKC", s1).casefold() == unicodedata.normalize("NFKC", s2).casefold() ) class ReadOnlyPasswordHashWidget(forms.Widget): template_name = "auth/widgets/read_only_password_hash.html" read_only = True def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) summary = [] if not value or value.startswith(UNUSABLE_PASSWORD_PREFIX): summary.append({"label": gettext("No password set.")}) else: try: hasher = identify_hasher(value) except ValueError: summary.append( { "label": gettext( "Invalid password format or unknown hashing algorithm." ) } ) else: for key, value_ in hasher.safe_summary(value).items(): summary.append({"label": gettext(key), "value": value_}) context["summary"] = summary return context def id_for_label(self, id_): return None class ReadOnlyPasswordHashField(forms.Field): widget = ReadOnlyPasswordHashWidget def __init__(self, *args, **kwargs): kwargs.setdefault("required", False) kwargs.setdefault("disabled", True) super().__init__(*args, **kwargs) class UsernameField(forms.CharField): def to_python(self, value): return unicodedata.normalize("NFKC", super().to_python(value)) def widget_attrs(self, widget): return { **super().widget_attrs(widget), "autocapitalize": "none", "autocomplete": "username", } class UserCreationForm(forms.ModelForm): """ A form that creates a user, with no privileges, from the given username and password. """ error_messages = { "password_mismatch": _("The two password fields didn’t match."), } password1 = forms.CharField( label=_("Password"), strip=False, widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}), help_text=password_validation.password_validators_help_text_html(), ) password2 = forms.CharField( label=_("Password confirmation"), widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}), strip=False, help_text=_("Enter the same password as before, for verification."), ) class Meta: model = User fields = ("username",) field_classes = {"username": UsernameField} def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self._meta.model.USERNAME_FIELD in self.fields: self.fields[self._meta.model.USERNAME_FIELD].widget.attrs[ "autofocus" ] = True def clean_password2(self): password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 and password2 and password1 != password2: raise ValidationError( self.error_messages["password_mismatch"], code="password_mismatch", ) return password2 def _post_clean(self): super()._post_clean() # Validate the password after self.instance is updated with form data # by super(). password = self.cleaned_data.get("password2") if password: try: password_validation.validate_password(password, self.instance) except ValidationError as error: self.add_error("password2", error) def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data["password1"]) if commit: user.save() return user class UserChangeForm(forms.ModelForm): password = ReadOnlyPasswordHashField( label=_("Password"), help_text=_( "Raw passwords are not stored, so there is no way to see this " "user’s password, but you can change the password using " '<a href="{}">this form</a>.' ), ) class Meta: model = User fields = "__all__" field_classes = {"username": UsernameField} def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) password = self.fields.get("password") if password: password.help_text = password.help_text.format( f"../../{self.instance.pk}/password/" ) user_permissions = self.fields.get("user_permissions") if user_permissions: user_permissions.queryset = user_permissions.queryset.select_related( "content_type" ) class AuthenticationForm(forms.Form): """ Base class for authenticating users. Extend this to get a form that accepts username/password logins. """ username = UsernameField(widget=forms.TextInput(attrs={"autofocus": True})) password = forms.CharField( label=_("Password"), strip=False, widget=forms.PasswordInput(attrs={"autocomplete": "current-password"}), ) error_messages = { "invalid_login": _( "Please enter a correct %(username)s and password. Note that both " "fields may be case-sensitive." ), "inactive": _("This account is inactive."), } def __init__(self, request=None, *args, **kwargs): """ The 'request' parameter is set for custom auth use by subclasses. The form data comes in via the standard 'data' kwarg. """ self.request = request self.user_cache = None super().__init__(*args, **kwargs) # Set the max length and label for the "username" field. self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD) username_max_length = self.username_field.max_length or 254 self.fields["username"].max_length = username_max_length self.fields["username"].widget.attrs["maxlength"] = username_max_length if self.fields["username"].label is None: self.fields["username"].label = capfirst(self.username_field.verbose_name) def clean(self): username = self.cleaned_data.get("username") password = self.cleaned_data.get("password") if username is not None and password: self.user_cache = authenticate( self.request, username=username, password=password ) if self.user_cache is None: raise self.get_invalid_login_error() else: self.confirm_login_allowed(self.user_cache) return self.cleaned_data def confirm_login_allowed(self, user): """ Controls whether the given User may log in. This is a policy setting, independent of end-user authentication. This default behavior is to allow login by active users, and reject login by inactive users. If the given user cannot log in, this method should raise a ``ValidationError``. If the given user may log in, this method should return None. """ if not user.is_active: raise ValidationError( self.error_messages["inactive"], code="inactive", ) def get_user(self): return self.user_cache def get_invalid_login_error(self): return ValidationError( self.error_messages["invalid_login"], code="invalid_login", params={"username": self.username_field.verbose_name}, ) class PasswordResetForm(forms.Form): email = forms.EmailField( label=_("Email"), max_length=254, widget=forms.EmailInput(attrs={"autocomplete": "email"}), ) def send_mail( self, subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None, ): """ Send a django.core.mail.EmailMultiAlternatives to `to_email`. """ subject = loader.render_to_string(subject_template_name, context) # Email subject *must not* contain newlines subject = "".join(subject.splitlines()) body = loader.render_to_string(email_template_name, context) email_message = EmailMultiAlternatives(subject, body, from_email, [to_email]) if html_email_template_name is not None: html_email = loader.render_to_string(html_email_template_name, context) email_message.attach_alternative(html_email, "text/html") email_message.send() def get_users(self, email): """Given an email, return matching user(s) who should receive a reset. This allows subclasses to more easily customize the default policies that prevent inactive users and users with unusable passwords from resetting their password. """ email_field_name = UserModel.get_email_field_name() active_users = UserModel._default_manager.filter( **{ "%s__iexact" % email_field_name: email, "is_active": True, } ) return ( u for u in active_users if u.has_usable_password() and _unicode_ci_compare(email, getattr(u, email_field_name)) ) def save( self, domain_override=None, subject_template_name="registration/password_reset_subject.txt", email_template_name="registration/password_reset_email.html", use_https=False, token_generator=default_token_generator, from_email=None, request=None, html_email_template_name=None, extra_email_context=None, ): """ Generate a one-use only link for resetting password and send it to the user. """ email = self.cleaned_data["email"] if not domain_override: current_site = get_current_site(request) site_name = current_site.name domain = current_site.domain else: site_name = domain = domain_override email_field_name = UserModel.get_email_field_name() for user in self.get_users(email): user_email = getattr(user, email_field_name) context = { "email": user_email, "domain": domain, "site_name": site_name, "uid": urlsafe_base64_encode(force_bytes(user.pk)), "user": user, "token": token_generator.make_token(user), "protocol": "https" if use_https else "http", **(extra_email_context or {}), } self.send_mail( subject_template_name, email_template_name, context, from_email, user_email, html_email_template_name=html_email_template_name, ) class SetPasswordForm(forms.Form): """ A form that lets a user change set their password without entering the old password """ error_messages = { "password_mismatch": _("The two password fields didn’t match."), } new_password1 = forms.CharField( label=_("New password"), widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}), strip=False, help_text=password_validation.password_validators_help_text_html(), ) new_password2 = forms.CharField( label=_("New password confirmation"), strip=False, widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}), ) def __init__(self, user, *args, **kwargs): self.user = user super().__init__(*args, **kwargs) def clean_new_password2(self): password1 = self.cleaned_data.get("new_password1") password2 = self.cleaned_data.get("new_password2") if password1 and password2 and password1 != password2: raise ValidationError( self.error_messages["password_mismatch"], code="password_mismatch", ) password_validation.validate_password(password2, self.user) return password2 def save(self, commit=True): password = self.cleaned_data["new_password1"] self.user.set_password(password) if commit: self.user.save() return self.user class PasswordChangeForm(SetPasswordForm): """ A form that lets a user change their password by entering their old password. """ error_messages = { **SetPasswordForm.error_messages, "password_incorrect": _( "Your old password was entered incorrectly. Please enter it again." ), } old_password = forms.CharField( label=_("Old password"), strip=False, widget=forms.PasswordInput( attrs={"autocomplete": "current-password", "autofocus": True} ), ) field_order = ["old_password", "new_password1", "new_password2"] def clean_old_password(self): """ Validate that the old_password field is correct. """ old_password = self.cleaned_data["old_password"] if not self.user.check_password(old_password): raise ValidationError( self.error_messages["password_incorrect"], code="password_incorrect", ) return old_password class AdminPasswordChangeForm(forms.Form): """ A form used to change the password of a user in the admin interface. """ error_messages = { "password_mismatch": _("The two password fields didn’t match."), } required_css_class = "required" password1 = forms.CharField( label=_("Password"), widget=forms.PasswordInput( attrs={"autocomplete": "new-password", "autofocus": True} ), strip=False, help_text=password_validation.password_validators_help_text_html(), ) password2 = forms.CharField( label=_("Password (again)"), widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}), strip=False, help_text=_("Enter the same password as before, for verification."), ) def __init__(self, user, *args, **kwargs): self.user = user super().__init__(*args, **kwargs) def clean_password2(self): password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 and password2 and password1 != password2: raise ValidationError( self.error_messages["password_mismatch"], code="password_mismatch", ) password_validation.validate_password(password2, self.user) return password2 def save(self, commit=True): """Save the new password.""" password = self.cleaned_data["password1"] self.user.set_password(password) if commit: self.user.save() return self.user @property def changed_data(self): data = super().changed_data for name in self.fields: if name not in data: return [] return ["password"]
60d41865bb2c22e39675e22a6021138ecd3d1ec1618c25fa3c7513475daa58a2
import json from django import forms from django.contrib.admin.utils import ( display_for_field, flatten_fieldsets, help_text_for_field, label_for_field, lookup_field, quote, ) from django.core.exceptions import ObjectDoesNotExist from django.db.models.fields.related import ( ForeignObjectRel, ManyToManyRel, OneToOneField, ) from django.forms.utils import flatatt from django.template.defaultfilters import capfirst, linebreaksbr from django.urls import NoReverseMatch, reverse from django.utils.html import conditional_escape, format_html from django.utils.safestring import mark_safe from django.utils.translation import gettext from django.utils.translation import gettext_lazy as _ ACTION_CHECKBOX_NAME = "_selected_action" class ActionForm(forms.Form): action = forms.ChoiceField(label=_("Action:")) select_across = forms.BooleanField( label="", required=False, initial=0, widget=forms.HiddenInput({"class": "select-across"}), ) checkbox = forms.CheckboxInput({"class": "action-select"}, lambda value: False) class AdminForm: def __init__( self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None, ): self.form, self.fieldsets = form, fieldsets self.prepopulated_fields = [ {"field": form[field_name], "dependencies": [form[f] for f in dependencies]} for field_name, dependencies in prepopulated_fields.items() ] self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __repr__(self): return ( f"<{self.__class__.__qualname__}: " f"form={self.form.__class__.__qualname__} " f"fieldsets={self.fieldsets!r}>" ) def __iter__(self): for name, options in self.fieldsets: yield Fieldset( self.form, name, readonly_fields=self.readonly_fields, model_admin=self.model_admin, **options, ) @property def errors(self): return self.form.errors @property def non_field_errors(self): return self.form.non_field_errors @property def fields(self): return self.form.fields @property def is_bound(self): return self.form.is_bound @property def media(self): media = self.form.media for fs in self: media += fs.media return media class Fieldset: def __init__( self, form, name=None, readonly_fields=(), fields=(), classes=(), description=None, model_admin=None, ): self.form = form self.name, self.fields = name, fields self.classes = " ".join(classes) self.description = description self.model_admin = model_admin self.readonly_fields = readonly_fields @property def media(self): if "collapse" in self.classes: return forms.Media(js=["admin/js/collapse.js"]) return forms.Media() def __iter__(self): for field in self.fields: yield Fieldline( self.form, field, self.readonly_fields, model_admin=self.model_admin ) class Fieldline: def __init__(self, form, field, readonly_fields=None, model_admin=None): self.form = form # A django.forms.Form instance if not hasattr(field, "__iter__") or isinstance(field, str): self.fields = [field] else: self.fields = field self.has_visible_field = not all( field in self.form.fields and self.form.fields[field].widget.is_hidden for field in self.fields ) self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for i, field in enumerate(self.fields): if field in self.readonly_fields: yield AdminReadonlyField( self.form, field, is_first=(i == 0), model_admin=self.model_admin ) else: yield AdminField(self.form, field, is_first=(i == 0)) def errors(self): return mark_safe( "\n".join( self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields ).strip("\n") ) class AdminField: def __init__(self, form, field, is_first): self.field = form[field] # A django.forms.BoundField instance self.is_first = is_first # Whether this field is first on the line self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput) self.is_readonly = False def label_tag(self): classes = [] contents = conditional_escape(self.field.label) if self.is_checkbox: classes.append("vCheckboxLabel") if self.field.field.required: classes.append("required") if not self.is_first: classes.append("inline") attrs = {"class": " ".join(classes)} if classes else {} # checkboxes should not have a label suffix as the checkbox appears # to the left of the label. return self.field.label_tag( contents=mark_safe(contents), attrs=attrs, label_suffix="" if self.is_checkbox else None, ) def errors(self): return mark_safe(self.field.errors.as_ul()) class AdminReadonlyField: def __init__(self, form, field, is_first, model_admin=None): # Make self.field look a little bit like a field. This means that # {{ field.name }} must be a useful class name to identify the field. # For convenience, store other field-related data here too. if callable(field): class_name = field.__name__ if field.__name__ != "<lambda>" else "" else: class_name = field if form._meta.labels and class_name in form._meta.labels: label = form._meta.labels[class_name] else: label = label_for_field(field, form._meta.model, model_admin, form=form) if form._meta.help_texts and class_name in form._meta.help_texts: help_text = form._meta.help_texts[class_name] else: help_text = help_text_for_field(class_name, form._meta.model) if field in form.fields: is_hidden = form.fields[field].widget.is_hidden else: is_hidden = False self.field = { "name": class_name, "label": label, "help_text": help_text, "field": field, "is_hidden": is_hidden, } self.form = form self.model_admin = model_admin self.is_first = is_first self.is_checkbox = False self.is_readonly = True self.empty_value_display = model_admin.get_empty_value_display() def label_tag(self): attrs = {} if not self.is_first: attrs["class"] = "inline" label = self.field["label"] return format_html( "<label{}>{}{}</label>", flatatt(attrs), capfirst(label), self.form.label_suffix, ) def get_admin_url(self, remote_field, remote_obj): url_name = "admin:%s_%s_change" % ( remote_field.model._meta.app_label, remote_field.model._meta.model_name, ) try: url = reverse( url_name, args=[quote(remote_obj.pk)], current_app=self.model_admin.admin_site.name, ) return format_html('<a href="{}">{}</a>', url, remote_obj) except NoReverseMatch: return str(remote_obj) def contents(self): from django.contrib.admin.templatetags.admin_list import _boolean_icon field, obj, model_admin = ( self.field["field"], self.form.instance, self.model_admin, ) try: f, attr, value = lookup_field(field, obj, model_admin) except (AttributeError, ValueError, ObjectDoesNotExist): result_repr = self.empty_value_display else: if field in self.form.fields: widget = self.form[field].field.widget # This isn't elegant but suffices for contrib.auth's # ReadOnlyPasswordHashWidget. if getattr(widget, "read_only", False): return widget.render(field, value) if f is None: if getattr(attr, "boolean", False): result_repr = _boolean_icon(value) else: if hasattr(value, "__html__"): result_repr = value else: result_repr = linebreaksbr(value) else: if isinstance(f.remote_field, ManyToManyRel) and value is not None: result_repr = ", ".join(map(str, value.all())) elif ( isinstance(f.remote_field, (ForeignObjectRel, OneToOneField)) and value is not None ): result_repr = self.get_admin_url(f.remote_field, value) else: result_repr = display_for_field(value, f, self.empty_value_display) result_repr = linebreaksbr(result_repr) return conditional_escape(result_repr) class InlineAdminFormSet: """ A wrapper around an inline formset for use in the admin system. """ def __init__( self, inline, formset, fieldsets, prepopulated_fields=None, readonly_fields=None, model_admin=None, has_add_permission=True, has_change_permission=True, has_delete_permission=True, has_view_permission=True, ): self.opts = inline self.formset = formset self.fieldsets = fieldsets self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields if prepopulated_fields is None: prepopulated_fields = {} self.prepopulated_fields = prepopulated_fields self.classes = " ".join(inline.classes) if inline.classes else "" self.has_add_permission = has_add_permission self.has_change_permission = has_change_permission self.has_delete_permission = has_delete_permission self.has_view_permission = has_view_permission def __iter__(self): if self.has_change_permission: readonly_fields_for_editing = self.readonly_fields else: readonly_fields_for_editing = self.readonly_fields + flatten_fieldsets( self.fieldsets ) for form, original in zip( self.formset.initial_forms, self.formset.get_queryset() ): view_on_site_url = self.opts.get_view_on_site_url(original) yield InlineAdminForm( self.formset, form, self.fieldsets, self.prepopulated_fields, original, readonly_fields_for_editing, model_admin=self.opts, view_on_site_url=view_on_site_url, ) for form in self.formset.extra_forms: yield InlineAdminForm( self.formset, form, self.fieldsets, self.prepopulated_fields, None, self.readonly_fields, model_admin=self.opts, ) if self.has_add_permission: yield InlineAdminForm( self.formset, self.formset.empty_form, self.fieldsets, self.prepopulated_fields, None, self.readonly_fields, model_admin=self.opts, ) def fields(self): fk = getattr(self.formset, "fk", None) empty_form = self.formset.empty_form meta_labels = empty_form._meta.labels or {} meta_help_texts = empty_form._meta.help_texts or {} for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)): if fk and fk.name == field_name: continue if not self.has_change_permission or field_name in self.readonly_fields: form_field = empty_form.fields.get(field_name) widget_is_hidden = False if form_field is not None: widget_is_hidden = form_field.widget.is_hidden yield { "name": field_name, "label": meta_labels.get(field_name) or label_for_field( field_name, self.opts.model, self.opts, form=empty_form, ), "widget": {"is_hidden": widget_is_hidden}, "required": False, "help_text": meta_help_texts.get(field_name) or help_text_for_field(field_name, self.opts.model), } else: form_field = empty_form.fields[field_name] label = form_field.label if label is None: label = label_for_field( field_name, self.opts.model, self.opts, form=empty_form ) yield { "name": field_name, "label": label, "widget": form_field.widget, "required": form_field.required, "help_text": form_field.help_text, } def inline_formset_data(self): verbose_name = self.opts.verbose_name return json.dumps( { "name": "#%s" % self.formset.prefix, "options": { "prefix": self.formset.prefix, "addText": gettext("Add another %(verbose_name)s") % { "verbose_name": capfirst(verbose_name), }, "deleteText": gettext("Remove"), }, } ) @property def forms(self): return self.formset.forms def non_form_errors(self): return self.formset.non_form_errors() @property def is_bound(self): return self.formset.is_bound @property def total_form_count(self): return self.formset.total_form_count @property def media(self): media = self.opts.media + self.formset.media for fs in self: media += fs.media return media class InlineAdminForm(AdminForm): """ A wrapper around an inline form for use in the admin system. """ def __init__( self, formset, form, fieldsets, prepopulated_fields, original, readonly_fields=None, model_admin=None, view_on_site_url=None, ): self.formset = formset self.model_admin = model_admin self.original = original self.show_url = original and view_on_site_url is not None self.absolute_url = view_on_site_url super().__init__( form, fieldsets, prepopulated_fields, readonly_fields, model_admin ) def __iter__(self): for name, options in self.fieldsets: yield InlineFieldset( self.formset, self.form, name, self.readonly_fields, model_admin=self.model_admin, **options, ) def needs_explicit_pk_field(self): return ( # Auto fields are editable, so check for auto or non-editable pk. self.form._meta.model._meta.auto_field or not self.form._meta.model._meta.pk.editable or # Also search any parents for an auto field. (The pk info is # propagated to child models so that does not need to be checked # in parents.) any( parent._meta.auto_field or not parent._meta.model._meta.pk.editable for parent in self.form._meta.model._meta.get_parent_list() ) ) def pk_field(self): return AdminField(self.form, self.formset._pk_field.name, False) def fk_field(self): fk = getattr(self.formset, "fk", None) if fk: return AdminField(self.form, fk.name, False) else: return "" def deletion_field(self): from django.forms.formsets import DELETION_FIELD_NAME return AdminField(self.form, DELETION_FIELD_NAME, False) class InlineFieldset(Fieldset): def __init__(self, formset, *args, **kwargs): self.formset = formset super().__init__(*args, **kwargs) def __iter__(self): fk = getattr(self.formset, "fk", None) for field in self.fields: if not fk or fk.name != field: yield Fieldline( self.form, field, self.readonly_fields, model_admin=self.model_admin ) class AdminErrorList(forms.utils.ErrorList): """Store errors for the form/formsets in an add/change view.""" def __init__(self, form, inline_formsets): super().__init__() if form.is_bound: self.extend(form.errors.values()) for inline_formset in inline_formsets: self.extend(inline_formset.non_form_errors()) for errors_in_inline_form in inline_formset.errors: self.extend(errors_in_inline_form.values())
bcff88ea721e0ae618764e56bde49a89118d9887bd05ce368326a2a917fd465c
import copy import json import re from functools import partial, update_wrapper from urllib.parse import quote as urlquote from django import forms from django.conf import settings from django.contrib import messages from django.contrib.admin import helpers, widgets from django.contrib.admin.checks import ( BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks, ) from django.contrib.admin.decorators import display from django.contrib.admin.exceptions import DisallowedModelAdminToField from django.contrib.admin.templatetags.admin_urls import add_preserved_filters from django.contrib.admin.utils import ( NestedObjects, construct_change_message, flatten_fieldsets, get_deleted_objects, lookup_spawns_duplicates, model_format_dict, model_ngettext, quote, unquote, ) from django.contrib.admin.widgets import AutocompleteSelect, AutocompleteSelectMultiple from django.contrib.auth import get_permission_codename from django.core.exceptions import ( FieldDoesNotExist, FieldError, PermissionDenied, ValidationError, ) from django.core.paginator import Paginator from django.db import models, router, transaction from django.db.models.constants import LOOKUP_SEP from django.forms.formsets import DELETION_FIELD_NAME, all_valid from django.forms.models import ( BaseInlineFormSet, inlineformset_factory, modelform_defines_fields, modelform_factory, modelformset_factory, ) from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple from django.http import HttpResponseRedirect from django.http.response import HttpResponseBase from django.template.response import SimpleTemplateResponse, TemplateResponse from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.html import format_html from django.utils.http import urlencode from django.utils.safestring import mark_safe from django.utils.text import ( capfirst, format_lazy, get_text_list, smart_split, unescape_string_literal, ) from django.utils.translation import gettext as _ from django.utils.translation import ngettext from django.views.decorators.csrf import csrf_protect from django.views.generic import RedirectView IS_POPUP_VAR = "_popup" TO_FIELD_VAR = "_to_field" HORIZONTAL, VERTICAL = 1, 2 def get_content_type_for_model(obj): # Since this module gets imported in the application's root package, # it cannot import models from other applications at the module level. from django.contrib.contenttypes.models import ContentType return ContentType.objects.get_for_model(obj, for_concrete_model=False) def get_ul_class(radio_style): return "radiolist" if radio_style == VERTICAL else "radiolist inline" class IncorrectLookupParameters(Exception): pass # Defaults for formfield_overrides. ModelAdmin subclasses can change this # by adding to ModelAdmin.formfield_overrides. FORMFIELD_FOR_DBFIELD_DEFAULTS = { models.DateTimeField: { "form_class": forms.SplitDateTimeField, "widget": widgets.AdminSplitDateTime, }, models.DateField: {"widget": widgets.AdminDateWidget}, models.TimeField: {"widget": widgets.AdminTimeWidget}, models.TextField: {"widget": widgets.AdminTextareaWidget}, models.URLField: {"widget": widgets.AdminURLFieldWidget}, models.IntegerField: {"widget": widgets.AdminIntegerFieldWidget}, models.BigIntegerField: {"widget": widgets.AdminBigIntegerFieldWidget}, models.CharField: {"widget": widgets.AdminTextInputWidget}, models.ImageField: {"widget": widgets.AdminFileWidget}, models.FileField: {"widget": widgets.AdminFileWidget}, models.EmailField: {"widget": widgets.AdminEmailInputWidget}, models.UUIDField: {"widget": widgets.AdminUUIDInputWidget}, } csrf_protect_m = method_decorator(csrf_protect) class BaseModelAdmin(metaclass=forms.MediaDefiningClass): """Functionality common to both ModelAdmin and InlineAdmin.""" autocomplete_fields = () raw_id_fields = () fields = None exclude = None fieldsets = None form = forms.ModelForm filter_vertical = () filter_horizontal = () radio_fields = {} prepopulated_fields = {} formfield_overrides = {} readonly_fields = () ordering = None sortable_by = None view_on_site = True show_full_result_count = True checks_class = BaseModelAdminChecks def check(self, **kwargs): return self.checks_class().check(self, **kwargs) def __init__(self): # Merge FORMFIELD_FOR_DBFIELD_DEFAULTS with the formfield_overrides # rather than simply overwriting. overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS) for k, v in self.formfield_overrides.items(): overrides.setdefault(k, {}).update(v) self.formfield_overrides = overrides def formfield_for_dbfield(self, db_field, request, **kwargs): """ Hook for specifying the form Field instance for a given database Field instance. If kwargs are given, they're passed to the form Field's constructor. """ # If the field specifies choices, we don't need to look for special # admin widgets - we just need to use a select widget of some kind. if db_field.choices: return self.formfield_for_choice_field(db_field, request, **kwargs) # ForeignKey or ManyToManyFields if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)): # Combine the field kwargs with any options for formfield_overrides. # Make sure the passed in **kwargs override anything in # formfield_overrides because **kwargs is more specific, and should # always win. if db_field.__class__ in self.formfield_overrides: kwargs = {**self.formfield_overrides[db_field.__class__], **kwargs} # Get the correct formfield. if isinstance(db_field, models.ForeignKey): formfield = self.formfield_for_foreignkey(db_field, request, **kwargs) elif isinstance(db_field, models.ManyToManyField): formfield = self.formfield_for_manytomany(db_field, request, **kwargs) # For non-raw_id fields, wrap the widget with a wrapper that adds # extra HTML -- the "add other" interface -- to the end of the # rendered output. formfield can be None if it came from a # OneToOneField with parent_link=True or a M2M intermediary. if formfield and db_field.name not in self.raw_id_fields: related_modeladmin = self.admin_site._registry.get( db_field.remote_field.model ) wrapper_kwargs = {} if related_modeladmin: wrapper_kwargs.update( can_add_related=related_modeladmin.has_add_permission(request), can_change_related=related_modeladmin.has_change_permission( request ), can_delete_related=related_modeladmin.has_delete_permission( request ), can_view_related=related_modeladmin.has_view_permission( request ), ) formfield.widget = widgets.RelatedFieldWidgetWrapper( formfield.widget, db_field.remote_field, self.admin_site, **wrapper_kwargs, ) return formfield # If we've got overrides for the formfield defined, use 'em. **kwargs # passed to formfield_for_dbfield override the defaults. for klass in db_field.__class__.mro(): if klass in self.formfield_overrides: kwargs = {**copy.deepcopy(self.formfield_overrides[klass]), **kwargs} return db_field.formfield(**kwargs) # For any other type of field, just call its formfield() method. return db_field.formfield(**kwargs) def formfield_for_choice_field(self, db_field, request, **kwargs): """ Get a form Field for a database Field that has declared choices. """ # If the field is named as a radio_field, use a RadioSelect if db_field.name in self.radio_fields: # Avoid stomping on custom widget/choices arguments. if "widget" not in kwargs: kwargs["widget"] = widgets.AdminRadioSelect( attrs={ "class": get_ul_class(self.radio_fields[db_field.name]), } ) if "choices" not in kwargs: kwargs["choices"] = db_field.get_choices( include_blank=db_field.blank, blank_choice=[("", _("None"))] ) return db_field.formfield(**kwargs) def get_field_queryset(self, db, db_field, request): """ If the ModelAdmin specifies ordering, the queryset should respect that ordering. Otherwise don't specify the queryset, let the field decide (return None in that case). """ related_admin = self.admin_site._registry.get(db_field.remote_field.model) if related_admin is not None: ordering = related_admin.get_ordering(request) if ordering is not None and ordering != (): return db_field.remote_field.model._default_manager.using(db).order_by( *ordering ) return None def formfield_for_foreignkey(self, db_field, request, **kwargs): """ Get a form Field for a ForeignKey. """ db = kwargs.get("using") if "widget" not in kwargs: if db_field.name in self.get_autocomplete_fields(request): kwargs["widget"] = AutocompleteSelect( db_field, self.admin_site, using=db ) elif db_field.name in self.raw_id_fields: kwargs["widget"] = widgets.ForeignKeyRawIdWidget( db_field.remote_field, self.admin_site, using=db ) elif db_field.name in self.radio_fields: kwargs["widget"] = widgets.AdminRadioSelect( attrs={ "class": get_ul_class(self.radio_fields[db_field.name]), } ) kwargs["empty_label"] = ( kwargs.get("empty_label", _("None")) if db_field.blank else None ) if "queryset" not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs["queryset"] = queryset return db_field.formfield(**kwargs) def formfield_for_manytomany(self, db_field, request, **kwargs): """ Get a form Field for a ManyToManyField. """ # If it uses an intermediary model that isn't auto created, don't show # a field in admin. if not db_field.remote_field.through._meta.auto_created: return None db = kwargs.get("using") if "widget" not in kwargs: autocomplete_fields = self.get_autocomplete_fields(request) if db_field.name in autocomplete_fields: kwargs["widget"] = AutocompleteSelectMultiple( db_field, self.admin_site, using=db, ) elif db_field.name in self.raw_id_fields: kwargs["widget"] = widgets.ManyToManyRawIdWidget( db_field.remote_field, self.admin_site, using=db, ) elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]: kwargs["widget"] = widgets.FilteredSelectMultiple( db_field.verbose_name, db_field.name in self.filter_vertical ) if "queryset" not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs["queryset"] = queryset form_field = db_field.formfield(**kwargs) if ( isinstance(form_field.widget, SelectMultiple) and form_field.widget.allow_multiple_selected and not isinstance( form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple) ) ): msg = _( "Hold down “Control”, or “Command” on a Mac, to select more than one." ) help_text = form_field.help_text form_field.help_text = ( format_lazy("{} {}", help_text, msg) if help_text else msg ) return form_field def get_autocomplete_fields(self, request): """ Return a list of ForeignKey and/or ManyToMany fields which should use an autocomplete widget. """ return self.autocomplete_fields def get_view_on_site_url(self, obj=None): if obj is None or not self.view_on_site: return None if callable(self.view_on_site): return self.view_on_site(obj) elif hasattr(obj, "get_absolute_url"): # use the ContentType lookup if view_on_site is True return reverse( "admin:view_on_site", kwargs={ "content_type_id": get_content_type_for_model(obj).pk, "object_id": obj.pk, }, current_app=self.admin_site.name, ) def get_empty_value_display(self): """ Return the empty_value_display set on ModelAdmin or AdminSite. """ try: return mark_safe(self.empty_value_display) except AttributeError: return mark_safe(self.admin_site.empty_value_display) def get_exclude(self, request, obj=None): """ Hook for specifying exclude. """ return self.exclude def get_fields(self, request, obj=None): """ Hook for specifying fields. """ if self.fields: return self.fields # _get_form_for_get_fields() is implemented in subclasses. form = self._get_form_for_get_fields(request, obj) return [*form.base_fields, *self.get_readonly_fields(request, obj)] def get_fieldsets(self, request, obj=None): """ Hook for specifying fieldsets. """ if self.fieldsets: return self.fieldsets return [(None, {"fields": self.get_fields(request, obj)})] def get_inlines(self, request, obj): """Hook for specifying custom inlines.""" return self.inlines def get_ordering(self, request): """ Hook for specifying field ordering. """ return self.ordering or () # otherwise we might try to *None, which is bad ;) def get_readonly_fields(self, request, obj=None): """ Hook for specifying custom readonly fields. """ return self.readonly_fields def get_prepopulated_fields(self, request, obj=None): """ Hook for specifying custom prepopulated fields. """ return self.prepopulated_fields def get_queryset(self, request): """ Return a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view. """ qs = self.model._default_manager.get_queryset() # TODO: this should be handled by some parameter to the ChangeList. ordering = self.get_ordering(request) if ordering: qs = qs.order_by(*ordering) return qs def get_sortable_by(self, request): """Hook for specifying which fields can be sorted in the changelist.""" return ( self.sortable_by if self.sortable_by is not None else self.get_list_display(request) ) def lookup_allowed(self, lookup, value): from django.contrib.admin.filters import SimpleListFilter model = self.model # Check FKey lookups that are allowed, so that popups produced by # ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to, # are allowed to work. for fk_lookup in model._meta.related_fkey_lookups: # As ``limit_choices_to`` can be a callable, invoke it here. if callable(fk_lookup): fk_lookup = fk_lookup() if (lookup, value) in widgets.url_params_from_lookup_dict( fk_lookup ).items(): return True relation_parts = [] prev_field = None for part in lookup.split(LOOKUP_SEP): try: field = model._meta.get_field(part) except FieldDoesNotExist: # Lookups on nonexistent fields are ok, since they're ignored # later. break # It is allowed to filter on values that would be found from local # model anyways. For example, if you filter on employee__department__id, # then the id value would be found already from employee__department_id. if not prev_field or ( prev_field.is_relation and field not in prev_field.path_infos[-1].target_fields ): relation_parts.append(part) if not getattr(field, "path_infos", None): # This is not a relational field, so further parts # must be transforms. break prev_field = field model = field.path_infos[-1].to_opts.model if len(relation_parts) <= 1: # Either a local field filter, or no fields at all. return True valid_lookups = {self.date_hierarchy} for filter_item in self.list_filter: if isinstance(filter_item, type) and issubclass( filter_item, SimpleListFilter ): valid_lookups.add(filter_item.parameter_name) elif isinstance(filter_item, (list, tuple)): valid_lookups.add(filter_item[0]) else: valid_lookups.add(filter_item) # Is it a valid relational lookup? return not { LOOKUP_SEP.join(relation_parts), LOOKUP_SEP.join(relation_parts + [part]), }.isdisjoint(valid_lookups) def to_field_allowed(self, request, to_field): """ Return True if the model associated with this admin should be allowed to be referenced by the specified field. """ try: field = self.opts.get_field(to_field) except FieldDoesNotExist: return False # Always allow referencing the primary key since it's already possible # to get this information from the change view URL. if field.primary_key: return True # Allow reverse relationships to models defining m2m fields if they # target the specified field. for many_to_many in self.opts.many_to_many: if many_to_many.m2m_target_field_name() == to_field: return True # Make sure at least one of the models registered for this site # references this field through a FK or a M2M relationship. registered_models = set() for model, admin in self.admin_site._registry.items(): registered_models.add(model) for inline in admin.inlines: registered_models.add(inline.model) related_objects = ( f for f in self.opts.get_fields(include_hidden=True) if (f.auto_created and not f.concrete) ) for related_object in related_objects: related_model = related_object.related_model remote_field = related_object.field.remote_field if ( any(issubclass(model, related_model) for model in registered_models) and hasattr(remote_field, "get_related_field") and remote_field.get_related_field() == field ): return True return False def has_add_permission(self, request): """ Return True if the given request has permission to add an object. Can be overridden by the user in subclasses. """ opts = self.opts codename = get_permission_codename("add", opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_change_permission(self, request, obj=None): """ Return True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to change the `obj` model instance. If `obj` is None, this should return True if the given request has permission to change *any* object of the given type. """ opts = self.opts codename = get_permission_codename("change", opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_delete_permission(self, request, obj=None): """ Return True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to delete the `obj` model instance. If `obj` is None, this should return True if the given request has permission to delete *any* object of the given type. """ opts = self.opts codename = get_permission_codename("delete", opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_view_permission(self, request, obj=None): """ Return True if the given request has permission to view the given Django model instance. The default implementation doesn't examine the `obj` parameter. If overridden by the user in subclasses, it should return True if the given request has permission to view the `obj` model instance. If `obj` is None, it should return True if the request has permission to view any object of the given type. """ opts = self.opts codename_view = get_permission_codename("view", opts) codename_change = get_permission_codename("change", opts) return request.user.has_perm( "%s.%s" % (opts.app_label, codename_view) ) or request.user.has_perm("%s.%s" % (opts.app_label, codename_change)) def has_view_or_change_permission(self, request, obj=None): return self.has_view_permission(request, obj) or self.has_change_permission( request, obj ) def has_module_permission(self, request): """ Return True if the given request has any permission in the given app label. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to view the module on the admin index page and access the module's index page. Overriding it does not restrict access to the add, change or delete views. Use `ModelAdmin.has_(add|change|delete)_permission` for that. """ return request.user.has_module_perms(self.opts.app_label) class ModelAdmin(BaseModelAdmin): """Encapsulate all admin options and functionality for a given model.""" list_display = ("__str__",) list_display_links = () list_filter = () list_select_related = False list_per_page = 100 list_max_show_all = 200 list_editable = () search_fields = () search_help_text = None date_hierarchy = None save_as = False save_as_continue = True save_on_top = False paginator = Paginator preserve_filters = True inlines = () # Custom templates (designed to be over-ridden in subclasses) add_form_template = None change_form_template = None change_list_template = None delete_confirmation_template = None delete_selected_confirmation_template = None object_history_template = None popup_response_template = None # Actions actions = () action_form = helpers.ActionForm actions_on_top = True actions_on_bottom = False actions_selection_counter = True checks_class = ModelAdminChecks def __init__(self, model, admin_site): self.model = model self.opts = model._meta self.admin_site = admin_site super().__init__() def __str__(self): return "%s.%s" % (self.opts.app_label, self.__class__.__name__) def __repr__(self): return ( f"<{self.__class__.__qualname__}: model={self.model.__qualname__} " f"site={self.admin_site!r}>" ) def get_inline_instances(self, request, obj=None): inline_instances = [] for inline_class in self.get_inlines(request, obj): inline = inline_class(self.model, self.admin_site) if request: if not ( inline.has_view_or_change_permission(request, obj) or inline.has_add_permission(request, obj) or inline.has_delete_permission(request, obj) ): continue if not inline.has_add_permission(request, obj): inline.max_num = 0 inline_instances.append(inline) return inline_instances def get_urls(self): from django.urls import path def wrap(view): def wrapper(*args, **kwargs): return self.admin_site.admin_view(view)(*args, **kwargs) wrapper.model_admin = self return update_wrapper(wrapper, view) info = self.opts.app_label, self.opts.model_name return [ path("", wrap(self.changelist_view), name="%s_%s_changelist" % info), path("add/", wrap(self.add_view), name="%s_%s_add" % info), path( "<path:object_id>/history/", wrap(self.history_view), name="%s_%s_history" % info, ), path( "<path:object_id>/delete/", wrap(self.delete_view), name="%s_%s_delete" % info, ), path( "<path:object_id>/change/", wrap(self.change_view), name="%s_%s_change" % info, ), # For backwards compatibility (was the change url before 1.9) path( "<path:object_id>/", wrap( RedirectView.as_view( pattern_name="%s:%s_%s_change" % ((self.admin_site.name,) + info) ) ), ), ] @property def urls(self): return self.get_urls() @property def media(self): extra = "" if settings.DEBUG else ".min" js = [ "vendor/jquery/jquery%s.js" % extra, "jquery.init.js", "core.js", "admin/RelatedObjectLookups.js", "actions.js", "urlify.js", "prepopulate.js", "vendor/xregexp/xregexp%s.js" % extra, ] return forms.Media(js=["admin/js/%s" % url for url in js]) def get_model_perms(self, request): """ Return a dict of all perms for this model. This dict has the keys ``add``, ``change``, ``delete``, and ``view`` mapping to the True/False for each of those actions. """ return { "add": self.has_add_permission(request), "change": self.has_change_permission(request), "delete": self.has_delete_permission(request), "view": self.has_view_permission(request), } def _get_form_for_get_fields(self, request, obj): return self.get_form(request, obj, fields=None) def get_form(self, request, obj=None, change=False, **kwargs): """ Return a Form class for use in the admin add view. This is used by add_view and change_view. """ if "fields" in kwargs: fields = kwargs.pop("fields") else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) excluded = self.get_exclude(request, obj) exclude = [] if excluded is None else list(excluded) readonly_fields = self.get_readonly_fields(request, obj) exclude.extend(readonly_fields) # Exclude all fields if it's a change form and the user doesn't have # the change permission. if ( change and hasattr(request, "user") and not self.has_change_permission(request, obj) ): exclude.extend(fields) if excluded is None and hasattr(self.form, "_meta") and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # ModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # if exclude is an empty list we pass None to be consistent with the # default on modelform_factory exclude = exclude or None # Remove declared form fields which are in readonly_fields. new_attrs = dict.fromkeys( f for f in readonly_fields if f in self.form.declared_fields ) form = type(self.form.__name__, (self.form,), new_attrs) defaults = { "form": form, "fields": fields, "exclude": exclude, "formfield_callback": partial(self.formfield_for_dbfield, request=request), **kwargs, } if defaults["fields"] is None and not modelform_defines_fields( defaults["form"] ): defaults["fields"] = forms.ALL_FIELDS try: return modelform_factory(self.model, **defaults) except FieldError as e: raise FieldError( "%s. Check fields/fieldsets/exclude attributes of class %s." % (e, self.__class__.__name__) ) def get_changelist(self, request, **kwargs): """ Return the ChangeList class for use on the changelist page. """ from django.contrib.admin.views.main import ChangeList return ChangeList def get_changelist_instance(self, request): """ Return a `ChangeList` instance based on `request`. May raise `IncorrectLookupParameters`. """ list_display = self.get_list_display(request) list_display_links = self.get_list_display_links(request, list_display) # Add the action checkboxes if any actions are available. if self.get_actions(request): list_display = ["action_checkbox", *list_display] sortable_by = self.get_sortable_by(request) ChangeList = self.get_changelist(request) return ChangeList( request, self.model, list_display, list_display_links, self.get_list_filter(request), self.date_hierarchy, self.get_search_fields(request), self.get_list_select_related(request), self.list_per_page, self.list_max_show_all, self.list_editable, self, sortable_by, self.search_help_text, ) def get_object(self, request, object_id, from_field=None): """ Return an instance matching the field and value provided, the primary key is used if no field is provided. Return ``None`` if no match is found or the object_id fails validation. """ queryset = self.get_queryset(request) model = queryset.model field = ( model._meta.pk if from_field is None else model._meta.get_field(from_field) ) try: object_id = field.to_python(object_id) return queryset.get(**{field.name: object_id}) except (model.DoesNotExist, ValidationError, ValueError): return None def get_changelist_form(self, request, **kwargs): """ Return a Form class for use in the Formset on the changelist page. """ defaults = { "formfield_callback": partial(self.formfield_for_dbfield, request=request), **kwargs, } if defaults.get("fields") is None and not modelform_defines_fields( defaults.get("form") ): defaults["fields"] = forms.ALL_FIELDS return modelform_factory(self.model, **defaults) def get_changelist_formset(self, request, **kwargs): """ Return a FormSet class for use on the changelist page if list_editable is used. """ defaults = { "formfield_callback": partial(self.formfield_for_dbfield, request=request), **kwargs, } return modelformset_factory( self.model, self.get_changelist_form(request), extra=0, fields=self.list_editable, **defaults, ) def get_formsets_with_inlines(self, request, obj=None): """ Yield formsets and the corresponding inlines. """ for inline in self.get_inline_instances(request, obj): yield inline.get_formset(request, obj), inline def get_paginator( self, request, queryset, per_page, orphans=0, allow_empty_first_page=True ): return self.paginator(queryset, per_page, orphans, allow_empty_first_page) def log_addition(self, request, obj, message): """ Log that an object has been successfully added. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import ADDITION, LogEntry return LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(obj).pk, object_id=obj.pk, object_repr=str(obj), action_flag=ADDITION, change_message=message, ) def log_change(self, request, obj, message): """ Log that an object has been successfully changed. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import CHANGE, LogEntry return LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(obj).pk, object_id=obj.pk, object_repr=str(obj), action_flag=CHANGE, change_message=message, ) def log_deletion(self, request, obj, object_repr): """ Log that an object will be deleted. Note that this method must be called before the deletion. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import DELETION, LogEntry return LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(obj).pk, object_id=obj.pk, object_repr=object_repr, action_flag=DELETION, ) @display(description=mark_safe('<input type="checkbox" id="action-toggle">')) def action_checkbox(self, obj): """ A list_display column containing a checkbox widget. """ return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, str(obj.pk)) @staticmethod def _get_action_description(func, name): return getattr(func, "short_description", capfirst(name.replace("_", " "))) def _get_base_actions(self): """Return the list of actions, prior to any request-based filtering.""" actions = [] base_actions = (self.get_action(action) for action in self.actions or []) # get_action might have returned None, so filter any of those out. base_actions = [action for action in base_actions if action] base_action_names = {name for _, name, _ in base_actions} # Gather actions from the admin site first for (name, func) in self.admin_site.actions: if name in base_action_names: continue description = self._get_action_description(func, name) actions.append((func, name, description)) # Add actions from this ModelAdmin. actions.extend(base_actions) return actions def _filter_actions_by_permissions(self, request, actions): """Filter out any actions that the user doesn't have access to.""" filtered_actions = [] for action in actions: callable = action[0] if not hasattr(callable, "allowed_permissions"): filtered_actions.append(action) continue permission_checks = ( getattr(self, "has_%s_permission" % permission) for permission in callable.allowed_permissions ) if any(has_permission(request) for has_permission in permission_checks): filtered_actions.append(action) return filtered_actions def get_actions(self, request): """ Return a dictionary mapping the names of all actions for this ModelAdmin to a tuple of (callable, name, description) for each action. """ # If self.actions is set to None that means actions are disabled on # this page. if self.actions is None or IS_POPUP_VAR in request.GET: return {} actions = self._filter_actions_by_permissions(request, self._get_base_actions()) return {name: (func, name, desc) for func, name, desc in actions} def get_action_choices(self, request, default_choices=models.BLANK_CHOICE_DASH): """ Return a list of choices for use in a form object. Each choice is a tuple (name, description). """ choices = [] + default_choices for func, name, description in self.get_actions(request).values(): choice = (name, description % model_format_dict(self.opts)) choices.append(choice) return choices def get_action(self, action): """ Return a given action from a parameter, which can either be a callable, or the name of a method on the ModelAdmin. Return is a tuple of (callable, name, description). """ # If the action is a callable, just use it. if callable(action): func = action action = action.__name__ # Next, look for a method. Grab it off self.__class__ to get an unbound # method instead of a bound one; this ensures that the calling # conventions are the same for functions and methods. elif hasattr(self.__class__, action): func = getattr(self.__class__, action) # Finally, look for a named method on the admin site else: try: func = self.admin_site.get_action(action) except KeyError: return None description = self._get_action_description(func, action) return func, action, description def get_list_display(self, request): """ Return a sequence containing the fields to be displayed on the changelist. """ return self.list_display def get_list_display_links(self, request, list_display): """ Return a sequence containing the fields to be displayed as links on the changelist. The list_display parameter is the list of fields returned by get_list_display(). """ if ( self.list_display_links or self.list_display_links is None or not list_display ): return self.list_display_links else: # Use only the first item in list_display as link return list(list_display)[:1] def get_list_filter(self, request): """ Return a sequence containing the fields to be displayed as filters in the right sidebar of the changelist page. """ return self.list_filter def get_list_select_related(self, request): """ Return a list of fields to add to the select_related() part of the changelist items query. """ return self.list_select_related def get_search_fields(self, request): """ Return a sequence containing the fields to be searched whenever somebody submits a search query. """ return self.search_fields def get_search_results(self, request, queryset, search_term): """ Return a tuple containing a queryset to implement the search and a boolean indicating if the results may contain duplicates. """ # Apply keyword searches. def construct_search(field_name): if field_name.startswith("^"): return "%s__istartswith" % field_name[1:] elif field_name.startswith("="): return "%s__iexact" % field_name[1:] elif field_name.startswith("@"): return "%s__search" % field_name[1:] # Use field_name if it includes a lookup. opts = queryset.model._meta lookup_fields = field_name.split(LOOKUP_SEP) # Go through the fields, following all relations. prev_field = None for path_part in lookup_fields: if path_part == "pk": path_part = opts.pk.name try: field = opts.get_field(path_part) except FieldDoesNotExist: # Use valid query lookups. if prev_field and prev_field.get_lookup(path_part): return field_name else: prev_field = field if hasattr(field, "path_infos"): # Update opts to follow the relation. opts = field.path_infos[-1].to_opts # Otherwise, use the field with icontains. return "%s__icontains" % field_name may_have_duplicates = False search_fields = self.get_search_fields(request) if search_fields and search_term: orm_lookups = [ construct_search(str(search_field)) for search_field in search_fields ] term_queries = [] for bit in smart_split(search_term): if bit.startswith(('"', "'")) and bit[0] == bit[-1]: bit = unescape_string_literal(bit) or_queries = models.Q.create( [(orm_lookup, bit) for orm_lookup in orm_lookups], connector=models.Q.OR, ) term_queries.append(or_queries) queryset = queryset.filter(models.Q.create(term_queries)) may_have_duplicates |= any( lookup_spawns_duplicates(self.opts, search_spec) for search_spec in orm_lookups ) return queryset, may_have_duplicates def get_preserved_filters(self, request): """ Return the preserved filters querystring. """ match = request.resolver_match if self.preserve_filters and match: current_url = "%s:%s" % (match.app_name, match.url_name) changelist_url = "admin:%s_%s_changelist" % ( self.opts.app_label, self.opts.model_name, ) if current_url == changelist_url: preserved_filters = request.GET.urlencode() else: preserved_filters = request.GET.get("_changelist_filters") if preserved_filters: return urlencode({"_changelist_filters": preserved_filters}) return "" def construct_change_message(self, request, form, formsets, add=False): """ Construct a JSON structure describing changes from a changed object. """ return construct_change_message(form, formsets, add) def message_user( self, request, message, level=messages.INFO, extra_tags="", fail_silently=False ): """ Send a message to the user. The default implementation posts a message using the django.contrib.messages backend. Exposes almost the same API as messages.add_message(), but accepts the positional arguments in a different order to maintain backwards compatibility. For convenience, it accepts the `level` argument as a string rather than the usual level number. """ if not isinstance(level, int): # attempt to get the level if passed a string try: level = getattr(messages.constants, level.upper()) except AttributeError: levels = messages.constants.DEFAULT_TAGS.values() levels_repr = ", ".join("`%s`" % level for level in levels) raise ValueError( "Bad message level string: `%s`. Possible values are: %s" % (level, levels_repr) ) messages.add_message( request, level, message, extra_tags=extra_tags, fail_silently=fail_silently ) def save_form(self, request, form, change): """ Given a ModelForm return an unsaved instance. ``change`` is True if the object is being changed, and False if it's being added. """ return form.save(commit=False) def save_model(self, request, obj, form, change): """ Given a model instance save it to the database. """ obj.save() def delete_model(self, request, obj): """ Given a model instance delete it from the database. """ obj.delete() def delete_queryset(self, request, queryset): """Given a queryset, delete it from the database.""" queryset.delete() def save_formset(self, request, form, formset, change): """ Given an inline formset save it to the database. """ formset.save() def save_related(self, request, form, formsets, change): """ Given the ``HttpRequest``, the parent ``ModelForm`` instance, the list of inline formsets and a boolean value based on whether the parent is being added or changed, save the related objects to the database. Note that at this point save_form() and save_model() have already been called. """ form.save_m2m() for formset in formsets: self.save_formset(request, form, formset, change=change) def render_change_form( self, request, context, add=False, change=False, form_url="", obj=None ): app_label = self.opts.app_label preserved_filters = self.get_preserved_filters(request) form_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": self.opts}, form_url ) view_on_site_url = self.get_view_on_site_url(obj) has_editable_inline_admin_formsets = False for inline in context["inline_admin_formsets"]: if ( inline.has_add_permission or inline.has_change_permission or inline.has_delete_permission ): has_editable_inline_admin_formsets = True break context.update( { "add": add, "change": change, "has_view_permission": self.has_view_permission(request, obj), "has_add_permission": self.has_add_permission(request), "has_change_permission": self.has_change_permission(request, obj), "has_delete_permission": self.has_delete_permission(request, obj), "has_editable_inline_admin_formsets": ( has_editable_inline_admin_formsets ), "has_file_field": context["adminform"].form.is_multipart() or any( admin_formset.formset.is_multipart() for admin_formset in context["inline_admin_formsets"] ), "has_absolute_url": view_on_site_url is not None, "absolute_url": view_on_site_url, "form_url": form_url, "opts": self.opts, "content_type_id": get_content_type_for_model(self.model).pk, "save_as": self.save_as, "save_on_top": self.save_on_top, "to_field_var": TO_FIELD_VAR, "is_popup_var": IS_POPUP_VAR, "app_label": app_label, } ) if add and self.add_form_template is not None: form_template = self.add_form_template else: form_template = self.change_form_template request.current_app = self.admin_site.name return TemplateResponse( request, form_template or [ "admin/%s/%s/change_form.html" % (app_label, self.opts.model_name), "admin/%s/change_form.html" % app_label, "admin/change_form.html", ], context, ) def response_add(self, request, obj, post_url_continue=None): """ Determine the HttpResponse for the add_view stage. """ opts = obj._meta preserved_filters = self.get_preserved_filters(request) obj_url = reverse( "admin:%s_%s_change" % (opts.app_label, opts.model_name), args=(quote(obj.pk),), current_app=self.admin_site.name, ) # Add a link to the object's change form if the user can edit the obj. if self.has_change_permission(request, obj): obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj) else: obj_repr = str(obj) msg_dict = { "name": opts.verbose_name, "obj": obj_repr, } # Here, we distinguish between different save types by checking for # the presence of keys in request.POST. if IS_POPUP_VAR in request.POST: to_field = request.POST.get(TO_FIELD_VAR) if to_field: attr = str(to_field) else: attr = obj._meta.pk.attname value = obj.serializable_value(attr) popup_response_data = json.dumps( { "value": str(value), "obj": str(obj), } ) return TemplateResponse( request, self.popup_response_template or [ "admin/%s/%s/popup_response.html" % (opts.app_label, opts.model_name), "admin/%s/popup_response.html" % opts.app_label, "admin/popup_response.html", ], { "popup_response_data": popup_response_data, }, ) elif "_continue" in request.POST or ( # Redirecting after "Save as new". "_saveasnew" in request.POST and self.save_as_continue and self.has_change_permission(request, obj) ): msg = _("The {name} “{obj}” was added successfully.") if self.has_change_permission(request, obj): msg += " " + _("You may edit it again below.") self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS) if post_url_continue is None: post_url_continue = obj_url post_url_continue = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, post_url_continue, ) return HttpResponseRedirect(post_url_continue) elif "_addanother" in request.POST: msg = format_html( _( "The {name} “{obj}” was added successfully. You may add another " "{name} below." ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) else: msg = format_html( _("The {name} “{obj}” was added successfully."), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_add(request, obj) def response_change(self, request, obj): """ Determine the HttpResponse for the change_view stage. """ if IS_POPUP_VAR in request.POST: opts = obj._meta to_field = request.POST.get(TO_FIELD_VAR) attr = str(to_field) if to_field else opts.pk.attname value = request.resolver_match.kwargs["object_id"] new_value = obj.serializable_value(attr) popup_response_data = json.dumps( { "action": "change", "value": str(value), "obj": str(obj), "new_value": str(new_value), } ) return TemplateResponse( request, self.popup_response_template or [ "admin/%s/%s/popup_response.html" % (opts.app_label, opts.model_name), "admin/%s/popup_response.html" % opts.app_label, "admin/popup_response.html", ], { "popup_response_data": popup_response_data, }, ) opts = self.opts preserved_filters = self.get_preserved_filters(request) msg_dict = { "name": opts.verbose_name, "obj": format_html('<a href="{}">{}</a>', urlquote(request.path), obj), } if "_continue" in request.POST: msg = format_html( _( "The {name} “{obj}” was changed successfully. You may edit it " "again below." ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) elif "_saveasnew" in request.POST: msg = format_html( _( "The {name} “{obj}” was added successfully. You may edit it again " "below." ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = reverse( "admin:%s_%s_change" % (opts.app_label, opts.model_name), args=(obj.pk,), current_app=self.admin_site.name, ) redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) elif "_addanother" in request.POST: msg = format_html( _( "The {name} “{obj}” was changed successfully. You may add another " "{name} below." ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = reverse( "admin:%s_%s_add" % (opts.app_label, opts.model_name), current_app=self.admin_site.name, ) redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) else: msg = format_html( _("The {name} “{obj}” was changed successfully."), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_change(request, obj) def _response_post_save(self, request, obj): if self.has_view_or_change_permission(request): post_url = reverse( "admin:%s_%s_changelist" % (self.opts.app_label, self.opts.model_name), current_app=self.admin_site.name, ) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": self.opts}, post_url ) else: post_url = reverse("admin:index", current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def response_post_save_add(self, request, obj): """ Figure out where to redirect after the 'Save' button has been pressed when adding a new object. """ return self._response_post_save(request, obj) def response_post_save_change(self, request, obj): """ Figure out where to redirect after the 'Save' button has been pressed when editing an existing object. """ return self._response_post_save(request, obj) def response_action(self, request, queryset): """ Handle an admin action. This is called if a request is POSTed to the changelist; it returns an HttpResponse if the action was handled, and None otherwise. """ # There can be multiple action forms on the page (at the top # and bottom of the change list, for example). Get the action # whose button was pushed. try: action_index = int(request.POST.get("index", 0)) except ValueError: action_index = 0 # Construct the action form. data = request.POST.copy() data.pop(helpers.ACTION_CHECKBOX_NAME, None) data.pop("index", None) # Use the action whose button was pushed try: data.update({"action": data.getlist("action")[action_index]}) except IndexError: # If we didn't get an action from the chosen form that's invalid # POST data, so by deleting action it'll fail the validation check # below. So no need to do anything here pass action_form = self.action_form(data, auto_id=None) action_form.fields["action"].choices = self.get_action_choices(request) # If the form's valid we can handle the action. if action_form.is_valid(): action = action_form.cleaned_data["action"] select_across = action_form.cleaned_data["select_across"] func = self.get_actions(request)[action][0] # Get the list of selected PKs. If nothing's selected, we can't # perform an action on it, so bail. Except we want to perform # the action explicitly on all objects. selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) if not selected and not select_across: # Reminder that something needs to be selected or nothing will happen msg = _( "Items must be selected in order to perform " "actions on them. No items have been changed." ) self.message_user(request, msg, messages.WARNING) return None if not select_across: # Perform the action only on the selected objects queryset = queryset.filter(pk__in=selected) response = func(self, request, queryset) # Actions may return an HttpResponse-like object, which will be # used as the response from the POST. If not, we'll be a good # little HTTP citizen and redirect back to the changelist page. if isinstance(response, HttpResponseBase): return response else: return HttpResponseRedirect(request.get_full_path()) else: msg = _("No action selected.") self.message_user(request, msg, messages.WARNING) return None def response_delete(self, request, obj_display, obj_id): """ Determine the HttpResponse for the delete_view stage. """ if IS_POPUP_VAR in request.POST: popup_response_data = json.dumps( { "action": "delete", "value": str(obj_id), } ) return TemplateResponse( request, self.popup_response_template or [ "admin/%s/%s/popup_response.html" % (self.opts.app_label, self.opts.model_name), "admin/%s/popup_response.html" % self.opts.app_label, "admin/popup_response.html", ], { "popup_response_data": popup_response_data, }, ) self.message_user( request, _("The %(name)s “%(obj)s” was deleted successfully.") % { "name": self.opts.verbose_name, "obj": obj_display, }, messages.SUCCESS, ) if self.has_change_permission(request, None): post_url = reverse( "admin:%s_%s_changelist" % (self.opts.app_label, self.opts.model_name), current_app=self.admin_site.name, ) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": self.opts}, post_url ) else: post_url = reverse("admin:index", current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def render_delete_form(self, request, context): app_label = self.opts.app_label request.current_app = self.admin_site.name context.update( to_field_var=TO_FIELD_VAR, is_popup_var=IS_POPUP_VAR, media=self.media, ) return TemplateResponse( request, self.delete_confirmation_template or [ "admin/{}/{}/delete_confirmation.html".format( app_label, self.opts.model_name ), "admin/{}/delete_confirmation.html".format(app_label), "admin/delete_confirmation.html", ], context, ) def get_inline_formsets(self, request, formsets, inline_instances, obj=None): # Edit permissions on parent model are required for editable inlines. can_edit_parent = ( self.has_change_permission(request, obj) if obj else self.has_add_permission(request) ) inline_admin_formsets = [] for inline, formset in zip(inline_instances, formsets): fieldsets = list(inline.get_fieldsets(request, obj)) readonly = list(inline.get_readonly_fields(request, obj)) if can_edit_parent: has_add_permission = inline.has_add_permission(request, obj) has_change_permission = inline.has_change_permission(request, obj) has_delete_permission = inline.has_delete_permission(request, obj) else: # Disable all edit-permissions, and override formset settings. has_add_permission = ( has_change_permission ) = has_delete_permission = False formset.extra = formset.max_num = 0 has_view_permission = inline.has_view_permission(request, obj) prepopulated = dict(inline.get_prepopulated_fields(request, obj)) inline_admin_formset = helpers.InlineAdminFormSet( inline, formset, fieldsets, prepopulated, readonly, model_admin=self, has_add_permission=has_add_permission, has_change_permission=has_change_permission, has_delete_permission=has_delete_permission, has_view_permission=has_view_permission, ) inline_admin_formsets.append(inline_admin_formset) return inline_admin_formsets def get_changeform_initial_data(self, request): """ Get the initial form data from the request's GET params. """ initial = dict(request.GET.items()) for k in initial: try: f = self.opts.get_field(k) except FieldDoesNotExist: continue # We have to special-case M2Ms as a list of comma-separated PKs. if isinstance(f, models.ManyToManyField): initial[k] = initial[k].split(",") return initial def _get_obj_does_not_exist_redirect(self, request, opts, object_id): """ Create a message informing the user that the object doesn't exist and return a redirect to the admin index page. """ msg = _("%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?") % { "name": opts.verbose_name, "key": unquote(object_id), } self.message_user(request, msg, messages.WARNING) url = reverse("admin:index", current_app=self.admin_site.name) return HttpResponseRedirect(url) @csrf_protect_m def changeform_view(self, request, object_id=None, form_url="", extra_context=None): with transaction.atomic(using=router.db_for_write(self.model)): return self._changeform_view(request, object_id, form_url, extra_context) def _changeform_view(self, request, object_id, form_url, extra_context): to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR)) if to_field and not self.to_field_allowed(request, to_field): raise DisallowedModelAdminToField( "The field %s cannot be referenced." % to_field ) if request.method == "POST" and "_saveasnew" in request.POST: object_id = None add = object_id is None if add: if not self.has_add_permission(request): raise PermissionDenied obj = None else: obj = self.get_object(request, unquote(object_id), to_field) if request.method == "POST": if not self.has_change_permission(request, obj): raise PermissionDenied else: if not self.has_view_or_change_permission(request, obj): raise PermissionDenied if obj is None: return self._get_obj_does_not_exist_redirect( request, self.opts, object_id ) fieldsets = self.get_fieldsets(request, obj) ModelForm = self.get_form( request, obj, change=not add, fields=flatten_fieldsets(fieldsets) ) if request.method == "POST": form = ModelForm(request.POST, request.FILES, instance=obj) formsets, inline_instances = self._create_formsets( request, form.instance, change=not add, ) form_validated = form.is_valid() if form_validated: new_object = self.save_form(request, form, change=not add) else: new_object = form.instance if all_valid(formsets) and form_validated: self.save_model(request, new_object, form, not add) self.save_related(request, form, formsets, not add) change_message = self.construct_change_message( request, form, formsets, add ) if add: self.log_addition(request, new_object, change_message) return self.response_add(request, new_object) else: self.log_change(request, new_object, change_message) return self.response_change(request, new_object) else: form_validated = False else: if add: initial = self.get_changeform_initial_data(request) form = ModelForm(initial=initial) formsets, inline_instances = self._create_formsets( request, form.instance, change=False ) else: form = ModelForm(instance=obj) formsets, inline_instances = self._create_formsets( request, obj, change=True ) if not add and not self.has_change_permission(request, obj): readonly_fields = flatten_fieldsets(fieldsets) else: readonly_fields = self.get_readonly_fields(request, obj) admin_form = helpers.AdminForm( form, list(fieldsets), # Clear prepopulated fields on a view-only form to avoid a crash. self.get_prepopulated_fields(request, obj) if add or self.has_change_permission(request, obj) else {}, readonly_fields, model_admin=self, ) media = self.media + admin_form.media inline_formsets = self.get_inline_formsets( request, formsets, inline_instances, obj ) for inline_formset in inline_formsets: media += inline_formset.media if add: title = _("Add %s") elif self.has_change_permission(request, obj): title = _("Change %s") else: title = _("View %s") context = { **self.admin_site.each_context(request), "title": title % self.opts.verbose_name, "subtitle": str(obj) if obj else None, "adminform": admin_form, "object_id": object_id, "original": obj, "is_popup": IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET, "to_field": to_field, "media": media, "inline_admin_formsets": inline_formsets, "errors": helpers.AdminErrorList(form, formsets), "preserved_filters": self.get_preserved_filters(request), } # Hide the "Save" and "Save and continue" buttons if "Save as New" was # previously chosen to prevent the interface from getting confusing. if ( request.method == "POST" and not form_validated and "_saveasnew" in request.POST ): context["show_save"] = False context["show_save_and_continue"] = False # Use the change template instead of the add template. add = False context.update(extra_context or {}) return self.render_change_form( request, context, add=add, change=not add, obj=obj, form_url=form_url ) def add_view(self, request, form_url="", extra_context=None): return self.changeform_view(request, None, form_url, extra_context) def change_view(self, request, object_id, form_url="", extra_context=None): return self.changeform_view(request, object_id, form_url, extra_context) def _get_edited_object_pks(self, request, prefix): """Return POST data values of list_editable primary keys.""" pk_pattern = re.compile( r"{}-\d+-{}$".format(re.escape(prefix), self.opts.pk.name) ) return [value for key, value in request.POST.items() if pk_pattern.match(key)] def _get_list_editable_queryset(self, request, prefix): """ Based on POST data, return a queryset of the objects that were edited via list_editable. """ object_pks = self._get_edited_object_pks(request, prefix) queryset = self.get_queryset(request) validate = queryset.model._meta.pk.to_python try: for pk in object_pks: validate(pk) except ValidationError: # Disable the optimization if the POST data was tampered with. return queryset return queryset.filter(pk__in=object_pks) @csrf_protect_m def changelist_view(self, request, extra_context=None): """ The 'change list' admin view for this model. """ from django.contrib.admin.views.main import ERROR_FLAG app_label = self.opts.app_label if not self.has_view_or_change_permission(request): raise PermissionDenied try: cl = self.get_changelist_instance(request) except IncorrectLookupParameters: # Wacky lookup parameters were given, so redirect to the main # changelist page, without parameters, and pass an 'invalid=1' # parameter via the query string. If wacky parameters were given # and the 'invalid=1' parameter was already in the query string, # something is screwed up with the database, so display an error # page. if ERROR_FLAG in request.GET: return SimpleTemplateResponse( "admin/invalid_setup.html", { "title": _("Database error"), }, ) return HttpResponseRedirect(request.path + "?" + ERROR_FLAG + "=1") # If the request was POSTed, this might be a bulk action or a bulk # edit. Try to look up an action or confirmation first, but if this # isn't an action the POST will fall through to the bulk edit check, # below. action_failed = False selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) actions = self.get_actions(request) # Actions with no confirmation if ( actions and request.method == "POST" and "index" in request.POST and "_save" not in request.POST ): if selected: response = self.response_action( request, queryset=cl.get_queryset(request) ) if response: return response else: action_failed = True else: msg = _( "Items must be selected in order to perform " "actions on them. No items have been changed." ) self.message_user(request, msg, messages.WARNING) action_failed = True # Actions with confirmation if ( actions and request.method == "POST" and helpers.ACTION_CHECKBOX_NAME in request.POST and "index" not in request.POST and "_save" not in request.POST ): if selected: response = self.response_action( request, queryset=cl.get_queryset(request) ) if response: return response else: action_failed = True if action_failed: # Redirect back to the changelist page to avoid resubmitting the # form if the user refreshes the browser or uses the "No, take # me back" button on the action confirmation page. return HttpResponseRedirect(request.get_full_path()) # If we're allowing changelist editing, we need to construct a formset # for the changelist given all the fields to be edited. Then we'll # use the formset to validate/process POSTed data. formset = cl.formset = None # Handle POSTed bulk-edit data. if request.method == "POST" and cl.list_editable and "_save" in request.POST: if not self.has_change_permission(request): raise PermissionDenied FormSet = self.get_changelist_formset(request) modified_objects = self._get_list_editable_queryset( request, FormSet.get_default_prefix() ) formset = cl.formset = FormSet( request.POST, request.FILES, queryset=modified_objects ) if formset.is_valid(): changecount = 0 with transaction.atomic(using=router.db_for_write(self.model)): for form in formset.forms: if form.has_changed(): obj = self.save_form(request, form, change=True) self.save_model(request, obj, form, change=True) self.save_related(request, form, formsets=[], change=True) change_msg = self.construct_change_message( request, form, None ) self.log_change(request, obj, change_msg) changecount += 1 if changecount: msg = ngettext( "%(count)s %(name)s was changed successfully.", "%(count)s %(name)s were changed successfully.", changecount, ) % { "count": changecount, "name": model_ngettext(self.opts, changecount), } self.message_user(request, msg, messages.SUCCESS) return HttpResponseRedirect(request.get_full_path()) # Handle GET -- construct a formset for display. elif cl.list_editable and self.has_change_permission(request): FormSet = self.get_changelist_formset(request) formset = cl.formset = FormSet(queryset=cl.result_list) # Build the list of media to be used by the formset. if formset: media = self.media + formset.media else: media = self.media # Build the action form and populate it with available actions. if actions: action_form = self.action_form(auto_id=None) action_form.fields["action"].choices = self.get_action_choices(request) media += action_form.media else: action_form = None selection_note_all = ngettext( "%(total_count)s selected", "All %(total_count)s selected", cl.result_count ) context = { **self.admin_site.each_context(request), "module_name": str(self.opts.verbose_name_plural), "selection_note": _("0 of %(cnt)s selected") % {"cnt": len(cl.result_list)}, "selection_note_all": selection_note_all % {"total_count": cl.result_count}, "title": cl.title, "subtitle": None, "is_popup": cl.is_popup, "to_field": cl.to_field, "cl": cl, "media": media, "has_add_permission": self.has_add_permission(request), "opts": cl.opts, "action_form": action_form, "actions_on_top": self.actions_on_top, "actions_on_bottom": self.actions_on_bottom, "actions_selection_counter": self.actions_selection_counter, "preserved_filters": self.get_preserved_filters(request), **(extra_context or {}), } request.current_app = self.admin_site.name return TemplateResponse( request, self.change_list_template or [ "admin/%s/%s/change_list.html" % (app_label, self.opts.model_name), "admin/%s/change_list.html" % app_label, "admin/change_list.html", ], context, ) def get_deleted_objects(self, objs, request): """ Hook for customizing the delete process for the delete view and the "delete selected" action. """ return get_deleted_objects(objs, request, self.admin_site) @csrf_protect_m def delete_view(self, request, object_id, extra_context=None): with transaction.atomic(using=router.db_for_write(self.model)): return self._delete_view(request, object_id, extra_context) def _delete_view(self, request, object_id, extra_context): "The 'delete' admin view for this model." app_label = self.opts.app_label to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR)) if to_field and not self.to_field_allowed(request, to_field): raise DisallowedModelAdminToField( "The field %s cannot be referenced." % to_field ) obj = self.get_object(request, unquote(object_id), to_field) if not self.has_delete_permission(request, obj): raise PermissionDenied if obj is None: return self._get_obj_does_not_exist_redirect(request, self.opts, object_id) # Populate deleted_objects, a data structure of all related objects that # will also be deleted. ( deleted_objects, model_count, perms_needed, protected, ) = self.get_deleted_objects([obj], request) if request.POST and not protected: # The user has confirmed the deletion. if perms_needed: raise PermissionDenied obj_display = str(obj) attr = str(to_field) if to_field else self.opts.pk.attname obj_id = obj.serializable_value(attr) self.log_deletion(request, obj, obj_display) self.delete_model(request, obj) return self.response_delete(request, obj_display, obj_id) object_name = str(self.opts.verbose_name) if perms_needed or protected: title = _("Cannot delete %(name)s") % {"name": object_name} else: title = _("Are you sure?") context = { **self.admin_site.each_context(request), "title": title, "subtitle": None, "object_name": object_name, "object": obj, "deleted_objects": deleted_objects, "model_count": dict(model_count).items(), "perms_lacking": perms_needed, "protected": protected, "opts": self.opts, "app_label": app_label, "preserved_filters": self.get_preserved_filters(request), "is_popup": IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET, "to_field": to_field, **(extra_context or {}), } return self.render_delete_form(request, context) def history_view(self, request, object_id, extra_context=None): "The 'history' admin view for this model." from django.contrib.admin.models import LogEntry from django.contrib.admin.views.main import PAGE_VAR # First check if the user can see this history. model = self.model obj = self.get_object(request, unquote(object_id)) if obj is None: return self._get_obj_does_not_exist_redirect( request, model._meta, object_id ) if not self.has_view_or_change_permission(request, obj): raise PermissionDenied # Then get the history for this object. app_label = self.opts.app_label action_list = ( LogEntry.objects.filter( object_id=unquote(object_id), content_type=get_content_type_for_model(model), ) .select_related() .order_by("action_time") ) paginator = self.get_paginator(request, action_list, 100) page_number = request.GET.get(PAGE_VAR, 1) page_obj = paginator.get_page(page_number) page_range = paginator.get_elided_page_range(page_obj.number) context = { **self.admin_site.each_context(request), "title": _("Change history: %s") % obj, "subtitle": None, "action_list": page_obj, "page_range": page_range, "page_var": PAGE_VAR, "pagination_required": paginator.count > 100, "module_name": str(capfirst(self.opts.verbose_name_plural)), "object": obj, "opts": self.opts, "preserved_filters": self.get_preserved_filters(request), **(extra_context or {}), } request.current_app = self.admin_site.name return TemplateResponse( request, self.object_history_template or [ "admin/%s/%s/object_history.html" % (app_label, self.opts.model_name), "admin/%s/object_history.html" % app_label, "admin/object_history.html", ], context, ) def get_formset_kwargs(self, request, obj, inline, prefix): formset_params = { "instance": obj, "prefix": prefix, "queryset": inline.get_queryset(request), } if request.method == "POST": formset_params.update( { "data": request.POST.copy(), "files": request.FILES, "save_as_new": "_saveasnew" in request.POST, } ) return formset_params def _create_formsets(self, request, obj, change): "Helper function to generate formsets for add/change_view." formsets = [] inline_instances = [] prefixes = {} get_formsets_args = [request] if change: get_formsets_args.append(obj) for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args): prefix = FormSet.get_default_prefix() prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1 or not prefix: prefix = "%s-%s" % (prefix, prefixes[prefix]) formset_params = self.get_formset_kwargs(request, obj, inline, prefix) formset = FormSet(**formset_params) def user_deleted_form(request, obj, formset, index, inline): """Return whether or not the user deleted the form.""" return ( inline.has_delete_permission(request, obj) and "{}-{}-DELETE".format(formset.prefix, index) in request.POST ) # Bypass validation of each view-only inline form (since the form's # data won't be in request.POST), unless the form was deleted. if not inline.has_change_permission(request, obj if change else None): for index, form in enumerate(formset.initial_forms): if user_deleted_form(request, obj, formset, index, inline): continue form._errors = {} form.cleaned_data = form.initial formsets.append(formset) inline_instances.append(inline) return formsets, inline_instances class InlineModelAdmin(BaseModelAdmin): """ Options for inline editing of ``model`` instances. Provide ``fk_name`` to specify the attribute name of the ``ForeignKey`` from ``model`` to its parent. This is required if ``model`` has more than one ``ForeignKey`` to its parent. """ model = None fk_name = None formset = BaseInlineFormSet extra = 3 min_num = None max_num = None template = None verbose_name = None verbose_name_plural = None can_delete = True show_change_link = False checks_class = InlineModelAdminChecks classes = None def __init__(self, parent_model, admin_site): self.admin_site = admin_site self.parent_model = parent_model self.opts = self.model._meta self.has_registered_model = admin_site.is_registered(self.model) super().__init__() if self.verbose_name_plural is None: if self.verbose_name is None: self.verbose_name_plural = self.opts.verbose_name_plural else: self.verbose_name_plural = format_lazy("{}s", self.verbose_name) if self.verbose_name is None: self.verbose_name = self.opts.verbose_name @property def media(self): extra = "" if settings.DEBUG else ".min" js = ["vendor/jquery/jquery%s.js" % extra, "jquery.init.js", "inlines.js"] if self.filter_vertical or self.filter_horizontal: js.extend(["SelectBox.js", "SelectFilter2.js"]) if self.classes and "collapse" in self.classes: js.append("collapse.js") return forms.Media(js=["admin/js/%s" % url for url in js]) def get_extra(self, request, obj=None, **kwargs): """Hook for customizing the number of extra inline forms.""" return self.extra def get_min_num(self, request, obj=None, **kwargs): """Hook for customizing the min number of inline forms.""" return self.min_num def get_max_num(self, request, obj=None, **kwargs): """Hook for customizing the max number of extra inline forms.""" return self.max_num def get_formset(self, request, obj=None, **kwargs): """Return a BaseInlineFormSet class for use in admin add/change views.""" if "fields" in kwargs: fields = kwargs.pop("fields") else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) excluded = self.get_exclude(request, obj) exclude = [] if excluded is None else list(excluded) exclude.extend(self.get_readonly_fields(request, obj)) if excluded is None and hasattr(self.form, "_meta") and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # InlineModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # If exclude is an empty list we use None, since that's the actual # default. exclude = exclude or None can_delete = self.can_delete and self.has_delete_permission(request, obj) defaults = { "form": self.form, "formset": self.formset, "fk_name": self.fk_name, "fields": fields, "exclude": exclude, "formfield_callback": partial(self.formfield_for_dbfield, request=request), "extra": self.get_extra(request, obj, **kwargs), "min_num": self.get_min_num(request, obj, **kwargs), "max_num": self.get_max_num(request, obj, **kwargs), "can_delete": can_delete, **kwargs, } base_model_form = defaults["form"] can_change = self.has_change_permission(request, obj) if request else True can_add = self.has_add_permission(request, obj) if request else True class DeleteProtectedModelForm(base_model_form): def hand_clean_DELETE(self): """ We don't validate the 'DELETE' field itself because on templates it's not rendered using the field information, but just using a generic "deletion_field" of the InlineModelAdmin. """ if self.cleaned_data.get(DELETION_FIELD_NAME, False): using = router.db_for_write(self._meta.model) collector = NestedObjects(using=using) if self.instance._state.adding: return collector.collect([self.instance]) if collector.protected: objs = [] for p in collector.protected: objs.append( # Translators: Model verbose name and instance # representation, suitable to be an item in a # list. _("%(class_name)s %(instance)s") % {"class_name": p._meta.verbose_name, "instance": p} ) params = { "class_name": self._meta.model._meta.verbose_name, "instance": self.instance, "related_objects": get_text_list(objs, _("and")), } msg = _( "Deleting %(class_name)s %(instance)s would require " "deleting the following protected related objects: " "%(related_objects)s" ) raise ValidationError( msg, code="deleting_protected", params=params ) def is_valid(self): result = super().is_valid() self.hand_clean_DELETE() return result def has_changed(self): # Protect against unauthorized edits. if not can_change and not self.instance._state.adding: return False if not can_add and self.instance._state.adding: return False return super().has_changed() defaults["form"] = DeleteProtectedModelForm if defaults["fields"] is None and not modelform_defines_fields( defaults["form"] ): defaults["fields"] = forms.ALL_FIELDS return inlineformset_factory(self.parent_model, self.model, **defaults) def _get_form_for_get_fields(self, request, obj=None): return self.get_formset(request, obj, fields=None).form def get_queryset(self, request): queryset = super().get_queryset(request) if not self.has_view_or_change_permission(request): queryset = queryset.none() return queryset def _has_any_perms_for_target_model(self, request, perms): """ This method is called only when the ModelAdmin's model is for an ManyToManyField's implicit through model (if self.opts.auto_created). Return True if the user has any of the given permissions ('add', 'change', etc.) for the model that points to the through model. """ opts = self.opts # Find the target model of an auto-created many-to-many relationship. for field in opts.fields: if field.remote_field and field.remote_field.model != self.parent_model: opts = field.remote_field.model._meta break return any( request.user.has_perm( "%s.%s" % (opts.app_label, get_permission_codename(perm, opts)) ) for perm in perms ) def has_add_permission(self, request, obj): if self.opts.auto_created: # Auto-created intermediate models don't have their own # permissions. The user needs to have the change permission for the # related model in order to be able to do anything with the # intermediate model. return self._has_any_perms_for_target_model(request, ["change"]) return super().has_add_permission(request) def has_change_permission(self, request, obj=None): if self.opts.auto_created: # Same comment as has_add_permission(). return self._has_any_perms_for_target_model(request, ["change"]) return super().has_change_permission(request) def has_delete_permission(self, request, obj=None): if self.opts.auto_created: # Same comment as has_add_permission(). return self._has_any_perms_for_target_model(request, ["change"]) return super().has_delete_permission(request, obj) def has_view_permission(self, request, obj=None): if self.opts.auto_created: # Same comment as has_add_permission(). The 'change' permission # also implies the 'view' permission. return self._has_any_perms_for_target_model(request, ["view", "change"]) return super().has_view_permission(request) class StackedInline(InlineModelAdmin): template = "admin/edit_inline/stacked.html" class TabularInline(InlineModelAdmin): template = "admin/edit_inline/tabular.html"
e4bd3ce5386f08771a378c53a254c9abcde11dfdcf286cb0194a50de38075fb2
import functools import itertools from collections import defaultdict from django.contrib.contenttypes.models import ContentType from django.core import checks from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist from django.db import DEFAULT_DB_ALIAS, models, router, transaction from django.db.models import DO_NOTHING, ForeignObject, ForeignObjectRel from django.db.models.base import ModelBase, make_foreign_order_accessors from django.db.models.fields.mixins import FieldCacheMixin from django.db.models.fields.related import ( ReverseManyToOneDescriptor, lazy_related_operation, ) from django.db.models.query_utils import PathInfo from django.db.models.sql import AND from django.db.models.sql.where import WhereNode from django.db.models.utils import AltersData from django.utils.functional import cached_property class GenericForeignKey(FieldCacheMixin): """ Provide a generic many-to-one relation through the ``content_type`` and ``object_id`` fields. This class also doubles as an accessor to the related object (similar to ForwardManyToOneDescriptor) by adding itself as a model attribute. """ # Field flags auto_created = False concrete = False editable = False hidden = False is_relation = True many_to_many = False many_to_one = True one_to_many = False one_to_one = False related_model = None remote_field = None def __init__( self, ct_field="content_type", fk_field="object_id", for_concrete_model=True ): self.ct_field = ct_field self.fk_field = fk_field self.for_concrete_model = for_concrete_model self.editable = False self.rel = None self.column = None def contribute_to_class(self, cls, name, **kwargs): self.name = name self.model = cls cls._meta.add_field(self, private=True) setattr(cls, name, self) def get_filter_kwargs_for_object(self, obj): """See corresponding method on Field""" return { self.fk_field: getattr(obj, self.fk_field), self.ct_field: getattr(obj, self.ct_field), } def get_forward_related_filter(self, obj): """See corresponding method on RelatedField""" return { self.fk_field: obj.pk, self.ct_field: ContentType.objects.get_for_model(obj).pk, } def __str__(self): model = self.model return "%s.%s" % (model._meta.label, self.name) def check(self, **kwargs): return [ *self._check_field_name(), *self._check_object_id_field(), *self._check_content_type_field(), ] def _check_field_name(self): if self.name.endswith("_"): return [ checks.Error( "Field names must not end with an underscore.", obj=self, id="fields.E001", ) ] else: return [] def _check_object_id_field(self): try: self.model._meta.get_field(self.fk_field) except FieldDoesNotExist: return [ checks.Error( "The GenericForeignKey object ID references the " "nonexistent field '%s'." % self.fk_field, obj=self, id="contenttypes.E001", ) ] else: return [] def _check_content_type_field(self): """ Check if field named `field_name` in model `model` exists and is a valid content_type field (is a ForeignKey to ContentType). """ try: field = self.model._meta.get_field(self.ct_field) except FieldDoesNotExist: return [ checks.Error( "The GenericForeignKey content type references the " "nonexistent field '%s.%s'." % (self.model._meta.object_name, self.ct_field), obj=self, id="contenttypes.E002", ) ] else: if not isinstance(field, models.ForeignKey): return [ checks.Error( "'%s.%s' is not a ForeignKey." % (self.model._meta.object_name, self.ct_field), hint=( "GenericForeignKeys must use a ForeignKey to " "'contenttypes.ContentType' as the 'content_type' field." ), obj=self, id="contenttypes.E003", ) ] elif field.remote_field.model != ContentType: return [ checks.Error( "'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (self.model._meta.object_name, self.ct_field), hint=( "GenericForeignKeys must use a ForeignKey to " "'contenttypes.ContentType' as the 'content_type' field." ), obj=self, id="contenttypes.E004", ) ] else: return [] def get_cache_name(self): return self.name def get_content_type(self, obj=None, id=None, using=None): if obj is not None: return ContentType.objects.db_manager(obj._state.db).get_for_model( obj, for_concrete_model=self.for_concrete_model ) elif id is not None: return ContentType.objects.db_manager(using).get_for_id(id) else: # This should never happen. I love comments like this, don't you? raise Exception("Impossible arguments to GFK.get_content_type!") def get_prefetch_queryset(self, instances, queryset=None): if queryset is not None: raise ValueError("Custom queryset can't be used for this lookup.") # For efficiency, group the instances by content type and then do one # query per model fk_dict = defaultdict(set) # We need one instance for each group in order to get the right db: instance_dict = {} ct_attname = self.model._meta.get_field(self.ct_field).get_attname() for instance in instances: # We avoid looking for values if either ct_id or fkey value is None ct_id = getattr(instance, ct_attname) if ct_id is not None: fk_val = getattr(instance, self.fk_field) if fk_val is not None: fk_dict[ct_id].add(fk_val) instance_dict[ct_id] = instance ret_val = [] for ct_id, fkeys in fk_dict.items(): instance = instance_dict[ct_id] ct = self.get_content_type(id=ct_id, using=instance._state.db) ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys)) # For doing the join in Python, we have to match both the FK val and the # content type, so we use a callable that returns a (fk, class) pair. def gfk_key(obj): ct_id = getattr(obj, ct_attname) if ct_id is None: return None else: model = self.get_content_type( id=ct_id, using=obj._state.db ).model_class() return ( model._meta.pk.get_prep_value(getattr(obj, self.fk_field)), model, ) return ( ret_val, lambda obj: (obj.pk, obj.__class__), gfk_key, True, self.name, False, ) def __get__(self, instance, cls=None): if instance is None: return self # Don't use getattr(instance, self.ct_field) here because that might # reload the same ContentType over and over (#5570). Instead, get the # content type ID here, and later when the actual instance is needed, # use ContentType.objects.get_for_id(), which has a global cache. f = self.model._meta.get_field(self.ct_field) ct_id = getattr(instance, f.get_attname(), None) pk_val = getattr(instance, self.fk_field) rel_obj = self.get_cached_value(instance, default=None) if rel_obj is None and self.is_cached(instance): return rel_obj if rel_obj is not None: ct_match = ( ct_id == self.get_content_type(obj=rel_obj, using=instance._state.db).id ) pk_match = rel_obj._meta.pk.to_python(pk_val) == rel_obj.pk if ct_match and pk_match: return rel_obj else: rel_obj = None if ct_id is not None: ct = self.get_content_type(id=ct_id, using=instance._state.db) try: rel_obj = ct.get_object_for_this_type(pk=pk_val) except ObjectDoesNotExist: pass self.set_cached_value(instance, rel_obj) return rel_obj def __set__(self, instance, value): ct = None fk = None if value is not None: ct = self.get_content_type(obj=value) fk = value.pk setattr(instance, self.ct_field, ct) setattr(instance, self.fk_field, fk) self.set_cached_value(instance, value) class GenericRel(ForeignObjectRel): """ Used by GenericRelation to store information about the relation. """ def __init__( self, field, to, related_name=None, related_query_name=None, limit_choices_to=None, ): super().__init__( field, to, related_name=related_query_name or "+", related_query_name=related_query_name, limit_choices_to=limit_choices_to, on_delete=DO_NOTHING, ) class GenericRelation(ForeignObject): """ Provide a reverse to a relation created by a GenericForeignKey. """ # Field flags auto_created = False empty_strings_allowed = False many_to_many = False many_to_one = False one_to_many = True one_to_one = False rel_class = GenericRel mti_inherited = False def __init__( self, to, object_id_field="object_id", content_type_field="content_type", for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs, ): kwargs["rel"] = self.rel_class( self, to, related_query_name=related_query_name, limit_choices_to=limit_choices_to, ) # Reverse relations are always nullable (Django can't enforce that a # foreign key on the related model points to this model). kwargs["null"] = True kwargs["blank"] = True kwargs["on_delete"] = models.CASCADE kwargs["editable"] = False kwargs["serialize"] = False # This construct is somewhat of an abuse of ForeignObject. This field # represents a relation from pk to object_id field. But, this relation # isn't direct, the join is generated reverse along foreign key. So, # the from_field is object_id field, to_field is pk because of the # reverse join. super().__init__(to, from_fields=[object_id_field], to_fields=[], **kwargs) self.object_id_field_name = object_id_field self.content_type_field_name = content_type_field self.for_concrete_model = for_concrete_model def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_generic_foreign_key_existence(), ] def _is_matching_generic_foreign_key(self, field): """ Return True if field is a GenericForeignKey whose content type and object id fields correspond to the equivalent attributes on this GenericRelation. """ return ( isinstance(field, GenericForeignKey) and field.ct_field == self.content_type_field_name and field.fk_field == self.object_id_field_name ) def _check_generic_foreign_key_existence(self): target = self.remote_field.model if isinstance(target, ModelBase): fields = target._meta.private_fields if any(self._is_matching_generic_foreign_key(field) for field in fields): return [] else: return [ checks.Error( "The GenericRelation defines a relation with the model " "'%s', but that model does not have a GenericForeignKey." % target._meta.label, obj=self, id="contenttypes.E004", ) ] else: return [] def resolve_related_fields(self): self.to_fields = [self.model._meta.pk.name] return [ ( self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk, ) ] def _get_path_info_with_parent(self, filtered_relation): """ Return the path that joins the current model through any parent models. The idea is that if you have a GFK defined on a parent model then we need to join the parent model first, then the child model. """ # With an inheritance chain ChildTag -> Tag and Tag defines the # GenericForeignKey, and a TaggedItem model has a GenericRelation to # ChildTag, then we need to generate a join from TaggedItem to Tag # (as Tag.object_id == TaggedItem.pk), and another join from Tag to # ChildTag (as that is where the relation is to). Do this by first # generating a join to the parent model, then generating joins to the # child models. path = [] opts = self.remote_field.model._meta.concrete_model._meta parent_opts = opts.get_field(self.object_id_field_name).model._meta target = parent_opts.pk path.append( PathInfo( from_opts=self.model._meta, to_opts=parent_opts, target_fields=(target,), join_field=self.remote_field, m2m=True, direct=False, filtered_relation=filtered_relation, ) ) # Collect joins needed for the parent -> child chain. This is easiest # to do if we collect joins for the child -> parent chain and then # reverse the direction (call to reverse() and use of # field.remote_field.get_path_info()). parent_field_chain = [] while parent_opts != opts: field = opts.get_ancestor_link(parent_opts.model) parent_field_chain.append(field) opts = field.remote_field.model._meta parent_field_chain.reverse() for field in parent_field_chain: path.extend(field.remote_field.path_infos) return path def get_path_info(self, filtered_relation=None): opts = self.remote_field.model._meta object_id_field = opts.get_field(self.object_id_field_name) if object_id_field.model != opts.model: return self._get_path_info_with_parent(filtered_relation) else: target = opts.pk return [ PathInfo( from_opts=self.model._meta, to_opts=opts, target_fields=(target,), join_field=self.remote_field, m2m=True, direct=False, filtered_relation=filtered_relation, ) ] def get_reverse_path_info(self, filtered_relation=None): opts = self.model._meta from_opts = self.remote_field.model._meta return [ PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, ) ] def value_to_string(self, obj): qs = getattr(obj, self.name).all() return str([instance.pk for instance in qs]) def contribute_to_class(self, cls, name, **kwargs): kwargs["private_only"] = True super().contribute_to_class(cls, name, **kwargs) self.model = cls # Disable the reverse relation for fields inherited by subclasses of a # model in multi-table inheritance. The reverse relation points to the # field of the base model. if self.mti_inherited: self.remote_field.related_name = "+" self.remote_field.related_query_name = None setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field)) # Add get_RELATED_order() and set_RELATED_order() to the model this # field belongs to, if the model on the other end of this relation # is ordered with respect to its corresponding GenericForeignKey. if not cls._meta.abstract: def make_generic_foreign_order_accessors(related_model, model): if self._is_matching_generic_foreign_key( model._meta.order_with_respect_to ): make_foreign_order_accessors(model, related_model) lazy_related_operation( make_generic_foreign_order_accessors, self.model, self.remote_field.model, ) def set_attributes_from_rel(self): pass def get_internal_type(self): return "ManyToManyField" def get_content_type(self): """ Return the content type associated with this field's model. """ return ContentType.objects.get_for_model( self.model, for_concrete_model=self.for_concrete_model ) def get_extra_restriction(self, alias, remote_alias): field = self.remote_field.model._meta.get_field(self.content_type_field_name) contenttype_pk = self.get_content_type().pk lookup = field.get_lookup("exact")(field.get_col(remote_alias), contenttype_pk) return WhereNode([lookup], connector=AND) def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS): """ Return all objects related to ``objs`` via this ``GenericRelation``. """ return self.remote_field.model._base_manager.db_manager(using).filter( **{ "%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using) .get_for_model(self.model, for_concrete_model=self.for_concrete_model) .pk, "%s__in" % self.object_id_field_name: [obj.pk for obj in objs], } ) class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor): """ Accessor to the related objects manager on the one-to-many relation created by GenericRelation. In the example:: class Post(Model): comments = GenericRelation(Comment) ``post.comments`` is a ReverseGenericManyToOneDescriptor instance. """ @cached_property def related_manager_cls(self): return create_generic_related_manager( self.rel.model._default_manager.__class__, self.rel, ) def create_generic_related_manager(superclass, rel): """ Factory function to create a manager that subclasses another manager (generally the default manager of a given model) and adds behaviors specific to generic relations. """ class GenericRelatedObjectManager(superclass, AltersData): def __init__(self, instance=None): super().__init__() self.instance = instance self.model = rel.model self.get_content_type = functools.partial( ContentType.objects.db_manager(instance._state.db).get_for_model, for_concrete_model=rel.field.for_concrete_model, ) self.content_type = self.get_content_type(instance) self.content_type_field_name = rel.field.content_type_field_name self.object_id_field_name = rel.field.object_id_field_name self.prefetch_cache_name = rel.field.attname self.pk_val = instance.pk self.core_filters = { "%s__pk" % self.content_type_field_name: self.content_type.id, self.object_id_field_name: self.pk_val, } def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_generic_related_manager(manager.__class__, rel) return manager_class(instance=self.instance) do_not_call_in_templates = True def __str__(self): return repr(self) def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ db = self._db or router.db_for_read(self.model, instance=self.instance) return queryset.using(db).filter(**self.core_filters) def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) # Group instances by content types. content_type_queries = [ models.Q.create( [ (f"{self.content_type_field_name}__pk", content_type_id), (f"{self.object_id_field_name}__in", {obj.pk for obj in objs}), ] ) for content_type_id, objs in itertools.groupby( sorted(instances, key=lambda obj: self.get_content_type(obj).pk), lambda obj: self.get_content_type(obj).pk, ) ] query = models.Q.create(content_type_queries, connector=models.Q.OR) # We (possibly) need to convert object IDs to the type of the # instances' PK in order to match up instances: object_id_converter = instances[0]._meta.pk.to_python content_type_id_field_name = "%s_id" % self.content_type_field_name return ( queryset.filter(query), lambda relobj: ( object_id_converter(getattr(relobj, self.object_id_field_name)), getattr(relobj, content_type_id_field_name), ), lambda obj: (obj.pk, self.get_content_type(obj).pk), False, self.prefetch_cache_name, False, ) def add(self, *objs, bulk=True): self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) def check_and_update_obj(obj): if not isinstance(obj, self.model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) setattr(obj, self.content_type_field_name, self.content_type) setattr(obj, self.object_id_field_name, self.pk_val) if bulk: pks = [] for obj in objs: if obj._state.adding or obj._state.db != db: raise ValueError( "%r instance isn't saved. Use bulk=False or save " "the object first." % obj ) check_and_update_obj(obj) pks.append(obj.pk) self.model._base_manager.using(db).filter(pk__in=pks).update( **{ self.content_type_field_name: self.content_type, self.object_id_field_name: self.pk_val, } ) else: with transaction.atomic(using=db, savepoint=False): for obj in objs: check_and_update_obj(obj) obj.save() add.alters_data = True def remove(self, *objs, bulk=True): if not objs: return self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk) remove.alters_data = True def clear(self, *, bulk=True): self._clear(self, bulk) clear.alters_data = True def _clear(self, queryset, bulk): self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) queryset = queryset.using(db) if bulk: # `QuerySet.delete()` creates its own atomic block which # contains the `pre_delete` and `post_delete` signal handlers. queryset.delete() else: with transaction.atomic(using=db, savepoint=False): for obj in queryset: obj.delete() _clear.alters_data = True def set(self, objs, *, bulk=True, clear=False): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) db = router.db_for_write(self.model, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, bulk=bulk) else: old_objs = set(self.using(db).all()) new_objs = [] for obj in objs: if obj in old_objs: old_objs.remove(obj) else: new_objs.append(obj) self.remove(*old_objs) self.add(*new_objs, bulk=bulk) set.alters_data = True def create(self, **kwargs): self._remove_prefetched_objects() kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super().using(db).create(**kwargs) create.alters_data = True def get_or_create(self, **kwargs): kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super().using(db).get_or_create(**kwargs) get_or_create.alters_data = True def update_or_create(self, **kwargs): kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super().using(db).update_or_create(**kwargs) update_or_create.alters_data = True return GenericRelatedObjectManager
80f30138f013daa72ae575d4fd5adb6f5fbf7115ce27b16ea41e130968851bbe
import json from django.contrib.postgres import lookups from django.contrib.postgres.forms import SimpleArrayField from django.contrib.postgres.validators import ArrayMaxLengthValidator from django.core import checks, exceptions from django.db.models import Field, Func, IntegerField, Transform, Value from django.db.models.fields.mixins import CheckFieldDefaultMixin from django.db.models.lookups import Exact, In from django.utils.translation import gettext_lazy as _ from ..utils import prefix_validation_error from .utils import AttributeSetter __all__ = ["ArrayField"] class ArrayField(CheckFieldDefaultMixin, Field): empty_strings_allowed = False default_error_messages = { "item_invalid": _("Item %(nth)s in the array did not validate:"), "nested_array_mismatch": _("Nested arrays must have the same length."), } _default_hint = ("list", "[]") def __init__(self, base_field, size=None, **kwargs): self.base_field = base_field self.db_collation = getattr(self.base_field, "db_collation", None) self.size = size if self.size: self.default_validators = [ *self.default_validators, ArrayMaxLengthValidator(self.size), ] # For performance, only add a from_db_value() method if the base field # implements it. if hasattr(self.base_field, "from_db_value"): self.from_db_value = self._from_db_value super().__init__(**kwargs) @property def model(self): try: return self.__dict__["model"] except KeyError: raise AttributeError( "'%s' object has no attribute 'model'" % self.__class__.__name__ ) @model.setter def model(self, model): self.__dict__["model"] = model self.base_field.model = model @classmethod def _choices_is_value(cls, value): return isinstance(value, (list, tuple)) or super()._choices_is_value(value) def check(self, **kwargs): errors = super().check(**kwargs) if self.base_field.remote_field: errors.append( checks.Error( "Base field for array cannot be a related field.", obj=self, id="postgres.E002", ) ) else: # Remove the field name checks as they are not needed here. base_checks = self.base_field.check() if base_checks: error_messages = "\n ".join( "%s (%s)" % (base_check.msg, base_check.id) for base_check in base_checks if isinstance(base_check, checks.Error) ) if error_messages: errors.append( checks.Error( "Base field for array has errors:\n %s" % error_messages, obj=self, id="postgres.E001", ) ) warning_messages = "\n ".join( "%s (%s)" % (base_check.msg, base_check.id) for base_check in base_checks if isinstance(base_check, checks.Warning) ) if warning_messages: errors.append( checks.Warning( "Base field for array has warnings:\n %s" % warning_messages, obj=self, id="postgres.W004", ) ) return errors def set_attributes_from_name(self, name): super().set_attributes_from_name(name) self.base_field.set_attributes_from_name(name) @property def description(self): return "Array of %s" % self.base_field.description def db_type(self, connection): size = self.size or "" return "%s[%s]" % (self.base_field.db_type(connection), size) def cast_db_type(self, connection): size = self.size or "" return "%s[%s]" % (self.base_field.cast_db_type(connection), size) def db_parameters(self, connection): db_params = super().db_parameters(connection) db_params["collation"] = self.db_collation return db_params def get_placeholder(self, value, compiler, connection): return "%s::{}".format(self.db_type(connection)) def get_db_prep_value(self, value, connection, prepared=False): if isinstance(value, (list, tuple)): return [ self.base_field.get_db_prep_value(i, connection, prepared=False) for i in value ] return value def deconstruct(self): name, path, args, kwargs = super().deconstruct() if path == "django.contrib.postgres.fields.array.ArrayField": path = "django.contrib.postgres.fields.ArrayField" kwargs.update( { "base_field": self.base_field.clone(), "size": self.size, } ) return name, path, args, kwargs def to_python(self, value): if isinstance(value, str): # Assume we're deserializing vals = json.loads(value) value = [self.base_field.to_python(val) for val in vals] return value def _from_db_value(self, value, expression, connection): if value is None: return value return [ self.base_field.from_db_value(item, expression, connection) for item in value ] def value_to_string(self, obj): values = [] vals = self.value_from_object(obj) base_field = self.base_field for val in vals: if val is None: values.append(None) else: obj = AttributeSetter(base_field.attname, val) values.append(base_field.value_to_string(obj)) return json.dumps(values) def get_transform(self, name): transform = super().get_transform(name) if transform: return transform if "_" not in name: try: index = int(name) except ValueError: pass else: index += 1 # postgres uses 1-indexing return IndexTransformFactory(index, self.base_field) try: start, end = name.split("_") start = int(start) + 1 end = int(end) # don't add one here because postgres slices are weird except ValueError: pass else: return SliceTransformFactory(start, end) def validate(self, value, model_instance): super().validate(value, model_instance) for index, part in enumerate(value): try: self.base_field.validate(part, model_instance) except exceptions.ValidationError as error: raise prefix_validation_error( error, prefix=self.error_messages["item_invalid"], code="item_invalid", params={"nth": index + 1}, ) if isinstance(self.base_field, ArrayField): if len({len(i) for i in value}) > 1: raise exceptions.ValidationError( self.error_messages["nested_array_mismatch"], code="nested_array_mismatch", ) def run_validators(self, value): super().run_validators(value) for index, part in enumerate(value): try: self.base_field.run_validators(part) except exceptions.ValidationError as error: raise prefix_validation_error( error, prefix=self.error_messages["item_invalid"], code="item_invalid", params={"nth": index + 1}, ) def formfield(self, **kwargs): return super().formfield( **{ "form_class": SimpleArrayField, "base_field": self.base_field.formfield(), "max_length": self.size, **kwargs, } ) class ArrayRHSMixin: def __init__(self, lhs, rhs): # Don't wrap arrays that contains only None values, psycopg2 doesn't # allow this. if isinstance(rhs, (tuple, list)) and any(self._rhs_not_none_values(rhs)): expressions = [] for value in rhs: if not hasattr(value, "resolve_expression"): field = lhs.output_field value = Value(field.base_field.get_prep_value(value)) expressions.append(value) rhs = Func( *expressions, function="ARRAY", template="%(function)s[%(expressions)s]", ) super().__init__(lhs, rhs) def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) cast_type = self.lhs.output_field.cast_db_type(connection) return "%s::%s" % (rhs, cast_type), rhs_params def _rhs_not_none_values(self, rhs): for x in rhs: if isinstance(x, (list, tuple)): yield from self._rhs_not_none_values(x) elif x is not None: yield True @ArrayField.register_lookup class ArrayContains(ArrayRHSMixin, lookups.DataContains): pass @ArrayField.register_lookup class ArrayContainedBy(ArrayRHSMixin, lookups.ContainedBy): pass @ArrayField.register_lookup class ArrayExact(ArrayRHSMixin, Exact): pass @ArrayField.register_lookup class ArrayOverlap(ArrayRHSMixin, lookups.Overlap): pass @ArrayField.register_lookup class ArrayLenTransform(Transform): lookup_name = "len" output_field = IntegerField() def as_sql(self, compiler, connection): lhs, params = compiler.compile(self.lhs) # Distinguish NULL and empty arrays return ( "CASE WHEN %(lhs)s IS NULL THEN NULL ELSE " "coalesce(array_length(%(lhs)s, 1), 0) END" ) % {"lhs": lhs}, params @ArrayField.register_lookup class ArrayInLookup(In): def get_prep_lookup(self): values = super().get_prep_lookup() if hasattr(values, "resolve_expression"): return values # In.process_rhs() expects values to be hashable, so convert lists # to tuples. prepared_values = [] for value in values: if hasattr(value, "resolve_expression"): prepared_values.append(value) else: prepared_values.append(tuple(value)) return prepared_values class IndexTransform(Transform): def __init__(self, index, base_field, *args, **kwargs): super().__init__(*args, **kwargs) self.index = index self.base_field = base_field def as_sql(self, compiler, connection): lhs, params = compiler.compile(self.lhs) return "%s[%%s]" % lhs, params + [self.index] @property def output_field(self): return self.base_field class IndexTransformFactory: def __init__(self, index, base_field): self.index = index self.base_field = base_field def __call__(self, *args, **kwargs): return IndexTransform(self.index, self.base_field, *args, **kwargs) class SliceTransform(Transform): def __init__(self, start, end, *args, **kwargs): super().__init__(*args, **kwargs) self.start = start self.end = end def as_sql(self, compiler, connection): lhs, params = compiler.compile(self.lhs) return "%s[%%s:%%s]" % lhs, params + [self.start, self.end] class SliceTransformFactory: def __init__(self, start, end): self.start = start self.end = end def __call__(self, *args, **kwargs): return SliceTransform(self.start, self.end, *args, **kwargs)
85a5dde708dc4b6111e1bc71ce723ecfba7b1f8aedecfd130900ff7782765436
import sys from django.core.management.color import color_style from django.db import IntegrityError, migrations, transaction from django.db.models import Q WARNING = """ A problem arose migrating proxy model permissions for {old} to {new}. Permission(s) for {new} already existed. Codenames Q: {query} Ensure to audit ALL permissions for {old} and {new}. """ def update_proxy_model_permissions(apps, schema_editor, reverse=False): """ Update the content_type of proxy model permissions to use the ContentType of the proxy model. """ style = color_style() Permission = apps.get_model("auth", "Permission") ContentType = apps.get_model("contenttypes", "ContentType") alias = schema_editor.connection.alias for Model in apps.get_models(): opts = Model._meta if not opts.proxy: continue proxy_default_permissions_codenames = [ "%s_%s" % (action, opts.model_name) for action in opts.default_permissions ] permissions_query = Q(codename__in=proxy_default_permissions_codenames) for codename, name in opts.permissions: permissions_query |= Q(codename=codename, name=name) content_type_manager = ContentType.objects.db_manager(alias) concrete_content_type = content_type_manager.get_for_model( Model, for_concrete_model=True ) proxy_content_type = content_type_manager.get_for_model( Model, for_concrete_model=False ) old_content_type = proxy_content_type if reverse else concrete_content_type new_content_type = concrete_content_type if reverse else proxy_content_type try: with transaction.atomic(using=alias): Permission.objects.using(alias).filter( permissions_query, content_type=old_content_type, ).update(content_type=new_content_type) except IntegrityError: old = "{}_{}".format(old_content_type.app_label, old_content_type.model) new = "{}_{}".format(new_content_type.app_label, new_content_type.model) sys.stdout.write( style.WARNING(WARNING.format(old=old, new=new, query=permissions_query)) ) def revert_proxy_model_permissions(apps, schema_editor): """ Update the content_type of proxy model permissions to use the ContentType of the concrete model. """ update_proxy_model_permissions(apps, schema_editor, reverse=True) class Migration(migrations.Migration): dependencies = [ ("auth", "0010_alter_group_name_max_length"), ("contenttypes", "0002_remove_content_type_name"), ] operations = [ migrations.RunPython( update_proxy_model_permissions, revert_proxy_model_permissions ), ]
0d9aba11df5b240dccaa1a4343dbbddba847c5c9f10639db292a691e06d2871c
from django.contrib.gis.geos import prototypes as capi from django.contrib.gis.geos.geometry import GEOSGeometry from django.contrib.gis.geos.libgeos import GEOM_PTR from django.contrib.gis.geos.linestring import LinearRing class Polygon(GEOSGeometry): _minlength = 1 def __init__(self, *args, **kwargs): """ Initialize on an exterior ring and a sequence of holes (both instances may be either LinearRing instances, or a tuple/list that may be constructed into a LinearRing). Examples of initialization, where shell, hole1, and hole2 are valid LinearRing geometries: >>> from django.contrib.gis.geos import LinearRing, Polygon >>> shell = hole1 = hole2 = LinearRing() >>> poly = Polygon(shell, hole1, hole2) >>> poly = Polygon(shell, (hole1, hole2)) >>> # Example where a tuple parameters are used: >>> poly = Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0)), ... ((4, 4), (4, 6), (6, 6), (6, 4), (4, 4))) """ if not args: super().__init__(self._create_polygon(0, None), **kwargs) return # Getting the ext_ring and init_holes parameters from the argument list ext_ring, *init_holes = args n_holes = len(init_holes) # If initialized as Polygon(shell, (LinearRing, LinearRing)) # [for backward-compatibility] if n_holes == 1 and isinstance(init_holes[0], (tuple, list)): if not init_holes[0]: init_holes = () n_holes = 0 elif isinstance(init_holes[0][0], LinearRing): init_holes = init_holes[0] n_holes = len(init_holes) polygon = self._create_polygon(n_holes + 1, [ext_ring, *init_holes]) super().__init__(polygon, **kwargs) def __iter__(self): "Iterate over each ring in the polygon." for i in range(len(self)): yield self[i] def __len__(self): "Return the number of rings in this Polygon." return self.num_interior_rings + 1 @classmethod def from_bbox(cls, bbox): "Construct a Polygon from a bounding box (4-tuple)." x0, y0, x1, y1 = bbox for z in bbox: if not isinstance(z, (float, int)): return GEOSGeometry( "POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))" % (x0, y0, x0, y1, x1, y1, x1, y0, x0, y0) ) return Polygon(((x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0))) # ### These routines are needed for list-like operation w/ListMixin ### def _create_polygon(self, length, items): # Instantiate LinearRing objects if necessary, but don't clone them yet # _construct_ring will throw a TypeError if a parameter isn't a valid ring # If we cloned the pointers here, we wouldn't be able to clean up # in case of error. if not length: return capi.create_empty_polygon() rings = [] for r in items: if isinstance(r, GEOM_PTR): rings.append(r) else: rings.append(self._construct_ring(r)) shell = self._clone(rings.pop(0)) n_holes = length - 1 if n_holes: holes_param = (GEOM_PTR * n_holes)(*[self._clone(r) for r in rings]) else: holes_param = None return capi.create_polygon(shell, holes_param, n_holes) def _clone(self, g): if isinstance(g, GEOM_PTR): return capi.geom_clone(g) else: return capi.geom_clone(g.ptr) def _construct_ring( self, param, msg=( "Parameter must be a sequence of LinearRings or objects that can " "initialize to LinearRings" ), ): "Try to construct a ring from the given parameter." if isinstance(param, LinearRing): return param try: return LinearRing(param) except TypeError: raise TypeError(msg) def _set_list(self, length, items): # Getting the current pointer, replacing with the newly constructed # geometry, and destroying the old geometry. prev_ptr = self.ptr srid = self.srid self.ptr = self._create_polygon(length, items) if srid: self.srid = srid capi.destroy_geom(prev_ptr) def _get_single_internal(self, index): """ Return the ring at the specified index. The first index, 0, will always return the exterior ring. Indices > 0 will return the interior ring at the given index (e.g., poly[1] and poly[2] would return the first and second interior ring, respectively). CAREFUL: Internal/External are not the same as Interior/Exterior! Return a pointer from the existing geometries for use internally by the object's methods. _get_single_external() returns a clone of the same geometry for use by external code. """ if index == 0: return capi.get_extring(self.ptr) else: # Getting the interior ring, have to subtract 1 from the index. return capi.get_intring(self.ptr, index - 1) def _get_single_external(self, index): return GEOSGeometry( capi.geom_clone(self._get_single_internal(index)), srid=self.srid ) _set_single = GEOSGeometry._set_single_rebuild _assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild # #### Polygon Properties #### @property def num_interior_rings(self): "Return the number of interior rings." # Getting the number of rings return capi.get_nrings(self.ptr) def _get_ext_ring(self): "Get the exterior ring of the Polygon." return self[0] def _set_ext_ring(self, ring): "Set the exterior ring of the Polygon." self[0] = ring # Properties for the exterior ring/shell. exterior_ring = property(_get_ext_ring, _set_ext_ring) shell = exterior_ring @property def tuple(self): "Get the tuple for each ring in this Polygon." return tuple(self[i].tuple for i in range(len(self))) coords = tuple @property def kml(self): "Return the KML representation of this Polygon." inner_kml = "".join( "<innerBoundaryIs>%s</innerBoundaryIs>" % self[i + 1].kml for i in range(self.num_interior_rings) ) return "<Polygon><outerBoundaryIs>%s</outerBoundaryIs>%s</Polygon>" % ( self[0].kml, inner_kml, )
866b0ed54aa26e83b649b14e4fde409f78fdc319782529a04e675799f8ed1f25
import compileall import os from importlib import import_module from django.db import connection, connections from django.db.migrations.exceptions import ( AmbiguityError, InconsistentMigrationHistory, NodeNotFoundError, ) from django.db.migrations.loader import MigrationLoader from django.db.migrations.recorder import MigrationRecorder from django.test import TestCase, modify_settings, override_settings from .test_base import MigrationTestBase class RecorderTests(TestCase): """ Tests recording migrations as applied or not. """ databases = {"default", "other"} def test_apply(self): """ Tests marking migrations as applied/unapplied. """ recorder = MigrationRecorder(connection) self.assertEqual( {(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"}, set(), ) recorder.record_applied("myapp", "0432_ponies") self.assertEqual( {(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"}, {("myapp", "0432_ponies")}, ) # That should not affect records of another database recorder_other = MigrationRecorder(connections["other"]) self.assertEqual( {(x, y) for (x, y) in recorder_other.applied_migrations() if x == "myapp"}, set(), ) recorder.record_unapplied("myapp", "0432_ponies") self.assertEqual( {(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"}, set(), ) class LoaderTests(TestCase): """ Tests the disk and database loader, and running through migrations in memory. """ def setUp(self): self.applied_records = [] def tearDown(self): # Unapply records on databases that don't roll back changes after each # test method. if not connection.features.supports_transactions: for recorder, app, name in self.applied_records: recorder.record_unapplied(app, name) def record_applied(self, recorder, app, name): recorder.record_applied(app, name) self.applied_records.append((recorder, app, name)) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) @modify_settings(INSTALLED_APPS={"append": "basic"}) def test_load(self): """ Makes sure the loader can load the migrations for the test apps, and then render them out to a new Apps. """ # Load and test the plan migration_loader = MigrationLoader(connection) self.assertEqual( migration_loader.graph.forwards_plan(("migrations", "0002_second")), [ ("migrations", "0001_initial"), ("migrations", "0002_second"), ], ) # Now render it out! project_state = migration_loader.project_state(("migrations", "0002_second")) self.assertEqual(len(project_state.models), 2) author_state = project_state.models["migrations", "author"] self.assertEqual( list(author_state.fields), ["id", "name", "slug", "age", "rating"] ) book_state = project_state.models["migrations", "book"] self.assertEqual(list(book_state.fields), ["id", "author"]) # Ensure we've included unmigrated apps in there too self.assertIn("basic", project_state.real_apps) @override_settings( MIGRATION_MODULES={ "migrations": "migrations.test_migrations", "migrations2": "migrations2.test_migrations_2", } ) @modify_settings(INSTALLED_APPS={"append": "migrations2"}) def test_plan_handles_repeated_migrations(self): """ _generate_plan() doesn't readd migrations already in the plan (#29180). """ migration_loader = MigrationLoader(connection) nodes = [("migrations", "0002_second"), ("migrations2", "0001_initial")] self.assertEqual( migration_loader.graph._generate_plan(nodes, at_end=True), [ ("migrations", "0001_initial"), ("migrations", "0002_second"), ("migrations2", "0001_initial"), ], ) @override_settings( MIGRATION_MODULES={"migrations": "migrations.test_migrations_unmigdep"} ) def test_load_unmigrated_dependency(self): """ The loader can load migrations with a dependency on an unmigrated app. """ # Load and test the plan migration_loader = MigrationLoader(connection) self.assertEqual( migration_loader.graph.forwards_plan(("migrations", "0001_initial")), [ ("contenttypes", "0001_initial"), ("auth", "0001_initial"), ("migrations", "0001_initial"), ], ) # Now render it out! project_state = migration_loader.project_state(("migrations", "0001_initial")) self.assertEqual( len([m for a, m in project_state.models if a == "migrations"]), 1 ) book_state = project_state.models["migrations", "book"] self.assertEqual(list(book_state.fields), ["id", "user"]) @override_settings( MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"} ) def test_run_before(self): """ Makes sure the loader uses Migration.run_before. """ # Load and test the plan migration_loader = MigrationLoader(connection) self.assertEqual( migration_loader.graph.forwards_plan(("migrations", "0002_second")), [ ("migrations", "0001_initial"), ("migrations", "0003_third"), ("migrations", "0002_second"), ], ) @override_settings( MIGRATION_MODULES={ "migrations": "migrations.test_migrations_first", "migrations2": "migrations2.test_migrations_2_first", } ) @modify_settings(INSTALLED_APPS={"append": "migrations2"}) def test_first(self): """ Makes sure the '__first__' migrations build correctly. """ migration_loader = MigrationLoader(connection) self.assertEqual( migration_loader.graph.forwards_plan(("migrations", "second")), [ ("migrations", "thefirst"), ("migrations2", "0001_initial"), ("migrations2", "0002_second"), ("migrations", "second"), ], ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_name_match(self): "Tests prefix name matching" migration_loader = MigrationLoader(connection) self.assertEqual( migration_loader.get_migration_by_prefix("migrations", "0001").name, "0001_initial", ) msg = "There is more than one migration for 'migrations' with the prefix '0'" with self.assertRaisesMessage(AmbiguityError, msg): migration_loader.get_migration_by_prefix("migrations", "0") msg = "There is no migration for 'migrations' with the prefix 'blarg'" with self.assertRaisesMessage(KeyError, msg): migration_loader.get_migration_by_prefix("migrations", "blarg") def test_load_import_error(self): with override_settings( MIGRATION_MODULES={"migrations": "import_error_package"} ): with self.assertRaises(ImportError): MigrationLoader(connection) def test_load_module_file(self): with override_settings( MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"} ): loader = MigrationLoader(connection) self.assertIn( "migrations", loader.unmigrated_apps, "App with migrations module file not in unmigrated apps.", ) def test_load_empty_dir(self): with override_settings( MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"} ): loader = MigrationLoader(connection) self.assertIn( "migrations", loader.unmigrated_apps, "App missing __init__.py in migrations module not in unmigrated apps.", ) @override_settings( INSTALLED_APPS=["migrations.migrations_test_apps.migrated_app"], ) def test_marked_as_migrated(self): """ Undefined MIGRATION_MODULES implies default migration module. """ migration_loader = MigrationLoader(connection) self.assertEqual(migration_loader.migrated_apps, {"migrated_app"}) self.assertEqual(migration_loader.unmigrated_apps, set()) @override_settings( INSTALLED_APPS=["migrations.migrations_test_apps.migrated_app"], MIGRATION_MODULES={"migrated_app": None}, ) def test_marked_as_unmigrated(self): """ MIGRATION_MODULES allows disabling of migrations for a particular app. """ migration_loader = MigrationLoader(connection) self.assertEqual(migration_loader.migrated_apps, set()) self.assertEqual(migration_loader.unmigrated_apps, {"migrated_app"}) @override_settings( INSTALLED_APPS=["migrations.migrations_test_apps.migrated_app"], MIGRATION_MODULES={"migrated_app": "missing-module"}, ) def test_explicit_missing_module(self): """ If a MIGRATION_MODULES override points to a missing module, the error raised during the importation attempt should be propagated unless `ignore_no_migrations=True`. """ with self.assertRaisesMessage(ImportError, "missing-module"): migration_loader = MigrationLoader(connection) migration_loader = MigrationLoader(connection, ignore_no_migrations=True) self.assertEqual(migration_loader.migrated_apps, set()) self.assertEqual(migration_loader.unmigrated_apps, {"migrated_app"}) @override_settings( MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"} ) def test_loading_squashed(self): "Tests loading a squashed migration" migration_loader = MigrationLoader(connection) recorder = MigrationRecorder(connection) self.addCleanup(recorder.flush) # Loading with nothing applied should just give us the one node self.assertEqual( len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]), 1, ) # However, fake-apply one migration and it should now use the old two self.record_applied(recorder, "migrations", "0001_initial") migration_loader.build_graph() self.assertEqual( len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]), 2, ) @override_settings( MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"} ) def test_loading_squashed_complex(self): "Tests loading a complex set of squashed migrations" loader = MigrationLoader(connection) recorder = MigrationRecorder(connection) self.addCleanup(recorder.flush) def num_nodes(): plan = set(loader.graph.forwards_plan(("migrations", "7_auto"))) return len(plan - loader.applied_migrations.keys()) # Empty database: use squashed migration loader.build_graph() self.assertEqual(num_nodes(), 5) # Starting at 1 or 2 should use the squashed migration too self.record_applied(recorder, "migrations", "1_auto") loader.build_graph() self.assertEqual(num_nodes(), 4) self.record_applied(recorder, "migrations", "2_auto") loader.build_graph() self.assertEqual(num_nodes(), 3) # However, starting at 3 to 5 cannot use the squashed migration self.record_applied(recorder, "migrations", "3_auto") loader.build_graph() self.assertEqual(num_nodes(), 4) self.record_applied(recorder, "migrations", "4_auto") loader.build_graph() self.assertEqual(num_nodes(), 3) # Starting at 5 to 7 we are past the squashed migrations. self.record_applied(recorder, "migrations", "5_auto") loader.build_graph() self.assertEqual(num_nodes(), 2) self.record_applied(recorder, "migrations", "6_auto") loader.build_graph() self.assertEqual(num_nodes(), 1) self.record_applied(recorder, "migrations", "7_auto") loader.build_graph() self.assertEqual(num_nodes(), 0) @override_settings( MIGRATION_MODULES={ "app1": "migrations.test_migrations_squashed_complex_multi_apps.app1", "app2": "migrations.test_migrations_squashed_complex_multi_apps.app2", } ) @modify_settings( INSTALLED_APPS={ "append": [ "migrations.test_migrations_squashed_complex_multi_apps.app1", "migrations.test_migrations_squashed_complex_multi_apps.app2", ] } ) def test_loading_squashed_complex_multi_apps(self): loader = MigrationLoader(connection) loader.build_graph() plan = set(loader.graph.forwards_plan(("app1", "4_auto"))) expected_plan = { ("app1", "1_auto"), ("app2", "1_squashed_2"), ("app1", "2_squashed_3"), ("app1", "4_auto"), } self.assertEqual(plan, expected_plan) @override_settings( MIGRATION_MODULES={ "app1": "migrations.test_migrations_squashed_complex_multi_apps.app1", "app2": "migrations.test_migrations_squashed_complex_multi_apps.app2", } ) @modify_settings( INSTALLED_APPS={ "append": [ "migrations.test_migrations_squashed_complex_multi_apps.app1", "migrations.test_migrations_squashed_complex_multi_apps.app2", ] } ) def test_loading_squashed_complex_multi_apps_partially_applied(self): loader = MigrationLoader(connection) recorder = MigrationRecorder(connection) self.record_applied(recorder, "app1", "1_auto") self.record_applied(recorder, "app1", "2_auto") loader.build_graph() plan = set(loader.graph.forwards_plan(("app1", "4_auto"))) plan -= loader.applied_migrations.keys() expected_plan = { ("app2", "1_squashed_2"), ("app1", "3_auto"), ("app1", "4_auto"), } self.assertEqual(plan, expected_plan) @override_settings( MIGRATION_MODULES={ "migrations": "migrations.test_migrations_squashed_erroneous" } ) def test_loading_squashed_erroneous(self): "Tests loading a complex but erroneous set of squashed migrations" loader = MigrationLoader(connection) recorder = MigrationRecorder(connection) self.addCleanup(recorder.flush) def num_nodes(): plan = set(loader.graph.forwards_plan(("migrations", "7_auto"))) return len(plan - loader.applied_migrations.keys()) # Empty database: use squashed migration loader.build_graph() self.assertEqual(num_nodes(), 5) # Starting at 1 or 2 should use the squashed migration too self.record_applied(recorder, "migrations", "1_auto") loader.build_graph() self.assertEqual(num_nodes(), 4) self.record_applied(recorder, "migrations", "2_auto") loader.build_graph() self.assertEqual(num_nodes(), 3) # However, starting at 3 or 4, nonexistent migrations would be needed. msg = ( "Migration migrations.6_auto depends on nonexistent node " "('migrations', '5_auto'). Django tried to replace migration " "migrations.5_auto with any of [migrations.3_squashed_5] but wasn't able " "to because some of the replaced migrations are already applied." ) self.record_applied(recorder, "migrations", "3_auto") with self.assertRaisesMessage(NodeNotFoundError, msg): loader.build_graph() self.record_applied(recorder, "migrations", "4_auto") with self.assertRaisesMessage(NodeNotFoundError, msg): loader.build_graph() # Starting at 5 to 7 we are passed the squashed migrations self.record_applied(recorder, "migrations", "5_auto") loader.build_graph() self.assertEqual(num_nodes(), 2) self.record_applied(recorder, "migrations", "6_auto") loader.build_graph() self.assertEqual(num_nodes(), 1) self.record_applied(recorder, "migrations", "7_auto") loader.build_graph() self.assertEqual(num_nodes(), 0) @override_settings( MIGRATION_MODULES={"migrations": "migrations.test_migrations"}, INSTALLED_APPS=["migrations"], ) def test_check_consistent_history(self): loader = MigrationLoader(connection=None) loader.check_consistent_history(connection) recorder = MigrationRecorder(connection) self.record_applied(recorder, "migrations", "0002_second") msg = ( "Migration migrations.0002_second is applied before its dependency " "migrations.0001_initial on database 'default'." ) with self.assertRaisesMessage(InconsistentMigrationHistory, msg): loader.check_consistent_history(connection) @override_settings( MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_extra"}, INSTALLED_APPS=["migrations"], ) def test_check_consistent_history_squashed(self): """ MigrationLoader.check_consistent_history() should ignore unapplied squashed migrations that have all of their `replaces` applied. """ loader = MigrationLoader(connection=None) recorder = MigrationRecorder(connection) self.record_applied(recorder, "migrations", "0001_initial") self.record_applied(recorder, "migrations", "0002_second") loader.check_consistent_history(connection) self.record_applied(recorder, "migrations", "0003_third") loader.check_consistent_history(connection) @override_settings( MIGRATION_MODULES={ "app1": "migrations.test_migrations_squashed_ref_squashed.app1", "app2": "migrations.test_migrations_squashed_ref_squashed.app2", } ) @modify_settings( INSTALLED_APPS={ "append": [ "migrations.test_migrations_squashed_ref_squashed.app1", "migrations.test_migrations_squashed_ref_squashed.app2", ] } ) def test_loading_squashed_ref_squashed(self): "Tests loading a squashed migration with a new migration referencing it" r""" The sample migrations are structured like this: app_1 1 --> 2 ---------------------*--> 3 *--> 4 \ / / *-------------------*----/--> 2_sq_3 --* \ / / =============== \ ============= / == / ====================== app_2 *--> 1_sq_2 --* / \ / *--> 1 --> 2 --* Where 2_sq_3 is a replacing migration for 2 and 3 in app_1, as 1_sq_2 is a replacing migration for 1 and 2 in app_2. """ loader = MigrationLoader(connection) recorder = MigrationRecorder(connection) self.addCleanup(recorder.flush) # Load with nothing applied: both migrations squashed. loader.build_graph() plan = set(loader.graph.forwards_plan(("app1", "4_auto"))) plan -= loader.applied_migrations.keys() expected_plan = { ("app1", "1_auto"), ("app2", "1_squashed_2"), ("app1", "2_squashed_3"), ("app1", "4_auto"), } self.assertEqual(plan, expected_plan) # Load with nothing applied and migrate to a replaced migration. # Not possible if loader.replace_migrations is True (default). loader.build_graph() msg = "Node ('app1', '3_auto') not a valid node" with self.assertRaisesMessage(NodeNotFoundError, msg): loader.graph.forwards_plan(("app1", "3_auto")) # Possible if loader.replace_migrations is False. loader.replace_migrations = False loader.build_graph() plan = set(loader.graph.forwards_plan(("app1", "3_auto"))) plan -= loader.applied_migrations.keys() expected_plan = { ("app1", "1_auto"), ("app2", "1_auto"), ("app2", "2_auto"), ("app1", "2_auto"), ("app1", "3_auto"), } self.assertEqual(plan, expected_plan) loader.replace_migrations = True # Fake-apply a few from app1: unsquashes migration in app1. self.record_applied(recorder, "app1", "1_auto") self.record_applied(recorder, "app1", "2_auto") loader.build_graph() plan = set(loader.graph.forwards_plan(("app1", "4_auto"))) plan -= loader.applied_migrations.keys() expected_plan = { ("app2", "1_squashed_2"), ("app1", "3_auto"), ("app1", "4_auto"), } self.assertEqual(plan, expected_plan) # Fake-apply one from app2: unsquashes migration in app2 too. self.record_applied(recorder, "app2", "1_auto") loader.build_graph() plan = set(loader.graph.forwards_plan(("app1", "4_auto"))) plan -= loader.applied_migrations.keys() expected_plan = { ("app2", "2_auto"), ("app1", "3_auto"), ("app1", "4_auto"), } self.assertEqual(plan, expected_plan) @override_settings( MIGRATION_MODULES={"migrations": "migrations.test_migrations_private"} ) def test_ignore_files(self): """Files prefixed with underscore, tilde, or dot aren't loaded.""" loader = MigrationLoader(connection) loader.load_disk() migrations = [ name for app, name in loader.disk_migrations if app == "migrations" ] self.assertEqual(migrations, ["0001_initial"]) @override_settings( MIGRATION_MODULES={ "migrations": "migrations.test_migrations_namespace_package" }, ) def test_loading_namespace_package(self): """Migration directories without an __init__.py file are ignored.""" loader = MigrationLoader(connection) loader.load_disk() migrations = [ name for app, name in loader.disk_migrations if app == "migrations" ] self.assertEqual(migrations, []) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_loading_package_without__file__(self): """ To support frozen environments, MigrationLoader loads migrations from regular packages with no __file__ attribute. """ test_module = import_module("migrations.test_migrations") loader = MigrationLoader(connection) # __file__ == __spec__.origin or the latter is None and former is # undefined. module_file = test_module.__file__ module_origin = test_module.__spec__.origin module_has_location = test_module.__spec__.has_location try: del test_module.__file__ test_module.__spec__.origin = None test_module.__spec__.has_location = False loader.load_disk() migrations = [ name for app, name in loader.disk_migrations if app == "migrations" ] self.assertCountEqual(migrations, ["0001_initial", "0002_second"]) finally: test_module.__file__ = module_file test_module.__spec__.origin = module_origin test_module.__spec__.has_location = module_has_location class PycLoaderTests(MigrationTestBase): def test_valid(self): """ To support frozen environments, MigrationLoader loads .pyc migrations. """ with self.temporary_migration_module( module="migrations.test_migrations" ) as migration_dir: # Compile .py files to .pyc files and delete .py files. compileall.compile_dir(migration_dir, force=True, quiet=1, legacy=True) for name in os.listdir(migration_dir): if name.endswith(".py"): os.remove(os.path.join(migration_dir, name)) loader = MigrationLoader(connection) self.assertIn(("migrations", "0001_initial"), loader.disk_migrations) def test_invalid(self): """ MigrationLoader reraises ImportErrors caused by "bad magic number" pyc files with a more helpful message. """ with self.temporary_migration_module( module="migrations.test_migrations_bad_pyc" ) as migration_dir: # The -tpl suffix is to avoid the pyc exclusion in MANIFEST.in. os.rename( os.path.join(migration_dir, "0001_initial.pyc-tpl"), os.path.join(migration_dir, "0001_initial.pyc"), ) msg = ( r"Couldn't import '\w+.migrations.0001_initial' as it appears " "to be a stale .pyc file." ) with self.assertRaisesRegex(ImportError, msg): MigrationLoader(connection)
7b7a828eee525886ab62c516c010355e3eae7fc693be1bd9d502047a60fa7c65
import datetime import re import urllib.parse from unittest import mock from django.contrib.auth.forms import ( AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm, PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget, SetPasswordForm, UserChangeForm, UserCreationForm, ) from django.contrib.auth.models import User from django.contrib.auth.signals import user_login_failed from django.contrib.sites.models import Site from django.core import mail from django.core.exceptions import ValidationError from django.core.mail import EmailMultiAlternatives from django.forms import forms from django.forms.fields import CharField, Field, IntegerField from django.test import SimpleTestCase, TestCase, override_settings from django.urls import reverse from django.utils import translation from django.utils.text import capfirst from django.utils.translation import gettext as _ from .models.custom_user import ( CustomUser, CustomUserWithoutIsActiveField, ExtensionUser, ) from .models.with_custom_email_field import CustomEmailField from .models.with_integer_username import IntegerUsernameUser from .settings import AUTH_TEMPLATES class TestDataMixin: @classmethod def setUpTestData(cls): cls.u1 = User.objects.create_user( username="testclient", password="password", email="[email protected]" ) cls.u2 = User.objects.create_user( username="inactive", password="password", is_active=False ) cls.u3 = User.objects.create_user(username="staff", password="password") cls.u4 = User.objects.create(username="empty_password", password="") cls.u5 = User.objects.create(username="unmanageable_password", password="$") cls.u6 = User.objects.create(username="unknown_password", password="foo$bar") class UserCreationFormTest(TestDataMixin, TestCase): def test_user_already_exists(self): data = { "username": "testclient", "password1": "test123", "password2": "test123", } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual( form["username"].errors, [str(User._meta.get_field("username").error_messages["unique"])], ) def test_invalid_data(self): data = { "username": "jsmith!", "password1": "test123", "password2": "test123", } form = UserCreationForm(data) self.assertFalse(form.is_valid()) validator = next( v for v in User._meta.get_field("username").validators if v.code == "invalid" ) self.assertEqual(form["username"].errors, [str(validator.message)]) def test_password_verification(self): # The verification password is incorrect. data = { "username": "jsmith", "password1": "test123", "password2": "test", } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual( form["password2"].errors, [str(form.error_messages["password_mismatch"])] ) def test_both_passwords(self): # One (or both) passwords weren't given data = {"username": "jsmith"} form = UserCreationForm(data) required_error = [str(Field.default_error_messages["required"])] self.assertFalse(form.is_valid()) self.assertEqual(form["password1"].errors, required_error) self.assertEqual(form["password2"].errors, required_error) data["password2"] = "test123" form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form["password1"].errors, required_error) self.assertEqual(form["password2"].errors, []) @mock.patch("django.contrib.auth.password_validation.password_changed") def test_success(self, password_changed): # The success case. data = { "username": "[email protected]", "password1": "test123", "password2": "test123", } form = UserCreationForm(data) self.assertTrue(form.is_valid()) form.save(commit=False) self.assertEqual(password_changed.call_count, 0) u = form.save() self.assertEqual(password_changed.call_count, 1) self.assertEqual(repr(u), "<User: [email protected]>") def test_unicode_username(self): data = { "username": "宝", "password1": "test123", "password2": "test123", } form = UserCreationForm(data) self.assertTrue(form.is_valid()) u = form.save() self.assertEqual(u.username, "宝") def test_normalize_username(self): # The normalization happens in AbstractBaseUser.clean() and ModelForm # validation calls Model.clean(). ohm_username = "testΩ" # U+2126 OHM SIGN data = { "username": ohm_username, "password1": "pwd2", "password2": "pwd2", } form = UserCreationForm(data) self.assertTrue(form.is_valid()) user = form.save() self.assertNotEqual(user.username, ohm_username) self.assertEqual(user.username, "testΩ") # U+03A9 GREEK CAPITAL LETTER OMEGA def test_duplicate_normalized_unicode(self): """ To prevent almost identical usernames, visually identical but differing by their unicode code points only, Unicode NFKC normalization should make appear them equal to Django. """ omega_username = "iamtheΩ" # U+03A9 GREEK CAPITAL LETTER OMEGA ohm_username = "iamtheΩ" # U+2126 OHM SIGN self.assertNotEqual(omega_username, ohm_username) User.objects.create_user(username=omega_username, password="pwd") data = { "username": ohm_username, "password1": "pwd2", "password2": "pwd2", } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual( form.errors["username"], ["A user with that username already exists."] ) @override_settings( AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, { "NAME": ( "django.contrib.auth.password_validation.MinimumLengthValidator" ), "OPTIONS": { "min_length": 12, }, }, ] ) def test_validates_password(self): data = { "username": "testclient", "password1": "testclient", "password2": "testclient", } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(len(form["password2"].errors), 2) self.assertIn( "The password is too similar to the username.", form["password2"].errors ) self.assertIn( "This password is too short. It must contain at least 12 characters.", form["password2"].errors, ) def test_custom_form(self): class CustomUserCreationForm(UserCreationForm): class Meta(UserCreationForm.Meta): model = ExtensionUser fields = UserCreationForm.Meta.fields + ("date_of_birth",) data = { "username": "testclient", "password1": "testclient", "password2": "testclient", "date_of_birth": "1988-02-24", } form = CustomUserCreationForm(data) self.assertTrue(form.is_valid()) def test_custom_form_with_different_username_field(self): class CustomUserCreationForm(UserCreationForm): class Meta(UserCreationForm.Meta): model = CustomUser fields = ("email", "date_of_birth") data = { "email": "[email protected]", "password1": "testclient", "password2": "testclient", "date_of_birth": "1988-02-24", } form = CustomUserCreationForm(data) self.assertTrue(form.is_valid()) def test_custom_form_hidden_username_field(self): class CustomUserCreationForm(UserCreationForm): class Meta(UserCreationForm.Meta): model = CustomUserWithoutIsActiveField fields = ("email",) # without USERNAME_FIELD data = { "email": "[email protected]", "password1": "testclient", "password2": "testclient", } form = CustomUserCreationForm(data) self.assertTrue(form.is_valid()) def test_password_whitespace_not_stripped(self): data = { "username": "testuser", "password1": " testpassword ", "password2": " testpassword ", } form = UserCreationForm(data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data["password1"], data["password1"]) self.assertEqual(form.cleaned_data["password2"], data["password2"]) @override_settings( AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, ] ) def test_password_help_text(self): form = UserCreationForm() self.assertEqual( form.fields["password1"].help_text, "<ul><li>" "Your password can’t be too similar to your other personal information." "</li></ul>", ) @override_settings( AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, ] ) def test_user_create_form_validates_password_with_all_data(self): """UserCreationForm password validation uses all of the form's data.""" class CustomUserCreationForm(UserCreationForm): class Meta(UserCreationForm.Meta): model = User fields = ("username", "email", "first_name", "last_name") form = CustomUserCreationForm( { "username": "testuser", "password1": "testpassword", "password2": "testpassword", "first_name": "testpassword", "last_name": "lastname", } ) self.assertFalse(form.is_valid()) self.assertEqual( form.errors["password2"], ["The password is too similar to the first name."], ) def test_username_field_autocapitalize_none(self): form = UserCreationForm() self.assertEqual( form.fields["username"].widget.attrs.get("autocapitalize"), "none" ) def test_html_autocomplete_attributes(self): form = UserCreationForm() tests = ( ("username", "username"), ("password1", "new-password"), ("password2", "new-password"), ) for field_name, autocomplete in tests: with self.subTest(field_name=field_name, autocomplete=autocomplete): self.assertEqual( form.fields[field_name].widget.attrs["autocomplete"], autocomplete ) # To verify that the login form rejects inactive users, use an authentication # backend that allows them. @override_settings( AUTHENTICATION_BACKENDS=["django.contrib.auth.backends.AllowAllUsersModelBackend"] ) class AuthenticationFormTest(TestDataMixin, TestCase): def test_invalid_username(self): # The user submits an invalid username. data = { "username": "jsmith_does_not_exist", "password": "test123", } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual( form.non_field_errors(), [ form.error_messages["invalid_login"] % {"username": User._meta.get_field("username").verbose_name} ], ) def test_inactive_user(self): # The user is inactive. data = { "username": "inactive", "password": "password", } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual( form.non_field_errors(), [str(form.error_messages["inactive"])] ) # Use an authentication backend that rejects inactive users. @override_settings( AUTHENTICATION_BACKENDS=["django.contrib.auth.backends.ModelBackend"] ) def test_inactive_user_incorrect_password(self): """An invalid login doesn't leak the inactive status of a user.""" data = { "username": "inactive", "password": "incorrect", } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual( form.non_field_errors(), [ form.error_messages["invalid_login"] % {"username": User._meta.get_field("username").verbose_name} ], ) def test_login_failed(self): signal_calls = [] def signal_handler(**kwargs): signal_calls.append(kwargs) user_login_failed.connect(signal_handler) fake_request = object() try: form = AuthenticationForm( fake_request, { "username": "testclient", "password": "incorrect", }, ) self.assertFalse(form.is_valid()) self.assertIs(signal_calls[0]["request"], fake_request) finally: user_login_failed.disconnect(signal_handler) def test_inactive_user_i18n(self): with self.settings(USE_I18N=True), translation.override( "pt-br", deactivate=True ): # The user is inactive. data = { "username": "inactive", "password": "password", } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual( form.non_field_errors(), [str(form.error_messages["inactive"])] ) # Use an authentication backend that allows inactive users. @override_settings( AUTHENTICATION_BACKENDS=[ "django.contrib.auth.backends.AllowAllUsersModelBackend" ] ) def test_custom_login_allowed_policy(self): # The user is inactive, but our custom form policy allows them to log in. data = { "username": "inactive", "password": "password", } class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm): def confirm_login_allowed(self, user): pass form = AuthenticationFormWithInactiveUsersOkay(None, data) self.assertTrue(form.is_valid()) # Raise a ValidationError in the form to disallow some logins according # to custom logic. class PickyAuthenticationForm(AuthenticationForm): def confirm_login_allowed(self, user): if user.username == "inactive": raise ValidationError("This user is disallowed.") raise ValidationError("Sorry, nobody's allowed in.") form = PickyAuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), ["This user is disallowed."]) data = { "username": "testclient", "password": "password", } form = PickyAuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."]) def test_success(self): # The success case data = { "username": "testclient", "password": "password", } form = AuthenticationForm(None, data) self.assertTrue(form.is_valid()) self.assertEqual(form.non_field_errors(), []) def test_unicode_username(self): User.objects.create_user(username="Σαρα", password="pwd") data = { "username": "Σαρα", "password": "pwd", } form = AuthenticationForm(None, data) self.assertTrue(form.is_valid()) self.assertEqual(form.non_field_errors(), []) @override_settings(AUTH_USER_MODEL="auth_tests.CustomEmailField") def test_username_field_max_length_matches_user_model(self): self.assertEqual(CustomEmailField._meta.get_field("username").max_length, 255) data = { "username": "u" * 255, "password": "pwd", "email": "[email protected]", } CustomEmailField.objects.create_user(**data) form = AuthenticationForm(None, data) self.assertEqual(form.fields["username"].max_length, 255) self.assertEqual(form.fields["username"].widget.attrs.get("maxlength"), 255) self.assertEqual(form.errors, {}) @override_settings(AUTH_USER_MODEL="auth_tests.IntegerUsernameUser") def test_username_field_max_length_defaults_to_254(self): self.assertIsNone(IntegerUsernameUser._meta.get_field("username").max_length) data = { "username": "0123456", "password": "password", } IntegerUsernameUser.objects.create_user(**data) form = AuthenticationForm(None, data) self.assertEqual(form.fields["username"].max_length, 254) self.assertEqual(form.fields["username"].widget.attrs.get("maxlength"), 254) self.assertEqual(form.errors, {}) def test_username_field_label(self): class CustomAuthenticationForm(AuthenticationForm): username = CharField(label="Name", max_length=75) form = CustomAuthenticationForm() self.assertEqual(form["username"].label, "Name") def test_username_field_label_not_set(self): class CustomAuthenticationForm(AuthenticationForm): username = CharField() form = CustomAuthenticationForm() username_field = User._meta.get_field(User.USERNAME_FIELD) self.assertEqual( form.fields["username"].label, capfirst(username_field.verbose_name) ) def test_username_field_autocapitalize_none(self): form = AuthenticationForm() self.assertEqual( form.fields["username"].widget.attrs.get("autocapitalize"), "none" ) def test_username_field_label_empty_string(self): class CustomAuthenticationForm(AuthenticationForm): username = CharField(label="") form = CustomAuthenticationForm() self.assertEqual(form.fields["username"].label, "") def test_password_whitespace_not_stripped(self): data = { "username": "testuser", "password": " pass ", } form = AuthenticationForm(None, data) form.is_valid() # Not necessary to have valid credentails for the test. self.assertEqual(form.cleaned_data["password"], data["password"]) @override_settings(AUTH_USER_MODEL="auth_tests.IntegerUsernameUser") def test_integer_username(self): class CustomAuthenticationForm(AuthenticationForm): username = IntegerField() user = IntegerUsernameUser.objects.create_user(username=0, password="pwd") data = { "username": 0, "password": "pwd", } form = CustomAuthenticationForm(None, data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data["username"], data["username"]) self.assertEqual(form.cleaned_data["password"], data["password"]) self.assertEqual(form.errors, {}) self.assertEqual(form.user_cache, user) def test_get_invalid_login_error(self): error = AuthenticationForm().get_invalid_login_error() self.assertIsInstance(error, ValidationError) self.assertEqual( error.message, "Please enter a correct %(username)s and password. Note that both " "fields may be case-sensitive.", ) self.assertEqual(error.code, "invalid_login") self.assertEqual(error.params, {"username": "username"}) def test_html_autocomplete_attributes(self): form = AuthenticationForm() tests = ( ("username", "username"), ("password", "current-password"), ) for field_name, autocomplete in tests: with self.subTest(field_name=field_name, autocomplete=autocomplete): self.assertEqual( form.fields[field_name].widget.attrs["autocomplete"], autocomplete ) def test_no_password(self): data = {"username": "username"} form = AuthenticationForm(None, data) self.assertIs(form.is_valid(), False) self.assertEqual( form["password"].errors, [Field.default_error_messages["required"]] ) class SetPasswordFormTest(TestDataMixin, TestCase): def test_password_verification(self): # The two new passwords do not match. user = User.objects.get(username="testclient") data = { "new_password1": "abc123", "new_password2": "abc", } form = SetPasswordForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual( form["new_password2"].errors, [str(form.error_messages["password_mismatch"])], ) @mock.patch("django.contrib.auth.password_validation.password_changed") def test_success(self, password_changed): user = User.objects.get(username="testclient") data = { "new_password1": "abc123", "new_password2": "abc123", } form = SetPasswordForm(user, data) self.assertTrue(form.is_valid()) form.save(commit=False) self.assertEqual(password_changed.call_count, 0) form.save() self.assertEqual(password_changed.call_count, 1) @override_settings( AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, { "NAME": ( "django.contrib.auth.password_validation.MinimumLengthValidator" ), "OPTIONS": { "min_length": 12, }, }, ] ) def test_validates_password(self): user = User.objects.get(username="testclient") data = { "new_password1": "testclient", "new_password2": "testclient", } form = SetPasswordForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual(len(form["new_password2"].errors), 2) self.assertIn( "The password is too similar to the username.", form["new_password2"].errors ) self.assertIn( "This password is too short. It must contain at least 12 characters.", form["new_password2"].errors, ) def test_no_password(self): user = User.objects.get(username="testclient") data = {"new_password1": "new-password"} form = SetPasswordForm(user, data) self.assertIs(form.is_valid(), False) self.assertEqual( form["new_password2"].errors, [Field.default_error_messages["required"]] ) form = SetPasswordForm(user, {}) self.assertIs(form.is_valid(), False) self.assertEqual( form["new_password1"].errors, [Field.default_error_messages["required"]] ) self.assertEqual( form["new_password2"].errors, [Field.default_error_messages["required"]] ) def test_password_whitespace_not_stripped(self): user = User.objects.get(username="testclient") data = { "new_password1": " password ", "new_password2": " password ", } form = SetPasswordForm(user, data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data["new_password1"], data["new_password1"]) self.assertEqual(form.cleaned_data["new_password2"], data["new_password2"]) @override_settings( AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, { "NAME": ( "django.contrib.auth.password_validation.MinimumLengthValidator" ), "OPTIONS": { "min_length": 12, }, }, ] ) def test_help_text_translation(self): french_help_texts = [ "Votre mot de passe ne peut pas trop ressembler à vos autres informations " "personnelles.", "Votre mot de passe doit contenir au minimum 12 caractères.", ] form = SetPasswordForm(self.u1) with translation.override("fr"): html = form.as_p() for french_text in french_help_texts: self.assertIn(french_text, html) def test_html_autocomplete_attributes(self): form = SetPasswordForm(self.u1) tests = ( ("new_password1", "new-password"), ("new_password2", "new-password"), ) for field_name, autocomplete in tests: with self.subTest(field_name=field_name, autocomplete=autocomplete): self.assertEqual( form.fields[field_name].widget.attrs["autocomplete"], autocomplete ) class PasswordChangeFormTest(TestDataMixin, TestCase): def test_incorrect_password(self): user = User.objects.get(username="testclient") data = { "old_password": "test", "new_password1": "abc123", "new_password2": "abc123", } form = PasswordChangeForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual( form["old_password"].errors, [str(form.error_messages["password_incorrect"])], ) def test_password_verification(self): # The two new passwords do not match. user = User.objects.get(username="testclient") data = { "old_password": "password", "new_password1": "abc123", "new_password2": "abc", } form = PasswordChangeForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual( form["new_password2"].errors, [str(form.error_messages["password_mismatch"])], ) @mock.patch("django.contrib.auth.password_validation.password_changed") def test_success(self, password_changed): # The success case. user = User.objects.get(username="testclient") data = { "old_password": "password", "new_password1": "abc123", "new_password2": "abc123", } form = PasswordChangeForm(user, data) self.assertTrue(form.is_valid()) form.save(commit=False) self.assertEqual(password_changed.call_count, 0) form.save() self.assertEqual(password_changed.call_count, 1) def test_field_order(self): # Regression test - check the order of fields: user = User.objects.get(username="testclient") self.assertEqual( list(PasswordChangeForm(user, {}).fields), ["old_password", "new_password1", "new_password2"], ) def test_password_whitespace_not_stripped(self): user = User.objects.get(username="testclient") user.set_password(" oldpassword ") data = { "old_password": " oldpassword ", "new_password1": " pass ", "new_password2": " pass ", } form = PasswordChangeForm(user, data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data["old_password"], data["old_password"]) self.assertEqual(form.cleaned_data["new_password1"], data["new_password1"]) self.assertEqual(form.cleaned_data["new_password2"], data["new_password2"]) def test_html_autocomplete_attributes(self): user = User.objects.get(username="testclient") form = PasswordChangeForm(user) self.assertEqual( form.fields["old_password"].widget.attrs["autocomplete"], "current-password" ) class UserChangeFormTest(TestDataMixin, TestCase): def test_username_validity(self): user = User.objects.get(username="testclient") data = {"username": "not valid"} form = UserChangeForm(data, instance=user) self.assertFalse(form.is_valid()) validator = next( v for v in User._meta.get_field("username").validators if v.code == "invalid" ) self.assertEqual(form["username"].errors, [str(validator.message)]) def test_bug_14242(self): # A regression test, introduce by adding an optimization for the # UserChangeForm. class MyUserForm(UserChangeForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields[ "groups" ].help_text = "These groups give users different permissions" class Meta(UserChangeForm.Meta): fields = ("groups",) # Just check we can create it MyUserForm({}) def test_unusable_password(self): user = User.objects.get(username="empty_password") user.set_unusable_password() user.save() form = UserChangeForm(instance=user) self.assertIn(_("No password set."), form.as_table()) def test_bug_17944_empty_password(self): user = User.objects.get(username="empty_password") form = UserChangeForm(instance=user) self.assertIn(_("No password set."), form.as_table()) def test_bug_17944_unmanageable_password(self): user = User.objects.get(username="unmanageable_password") form = UserChangeForm(instance=user) self.assertIn( _("Invalid password format or unknown hashing algorithm."), form.as_table() ) def test_bug_17944_unknown_password_algorithm(self): user = User.objects.get(username="unknown_password") form = UserChangeForm(instance=user) self.assertIn( _("Invalid password format or unknown hashing algorithm."), form.as_table() ) def test_bug_19133(self): "The change form does not return the password value" # Use the form to construct the POST data user = User.objects.get(username="testclient") form_for_data = UserChangeForm(instance=user) post_data = form_for_data.initial # The password field should be readonly, so anything # posted here should be ignored; the form will be # valid, and give back the 'initial' value for the # password field. post_data["password"] = "new password" form = UserChangeForm(instance=user, data=post_data) self.assertTrue(form.is_valid()) # original hashed password contains $ self.assertIn("$", form.cleaned_data["password"]) def test_bug_19349_bound_password_field(self): user = User.objects.get(username="testclient") form = UserChangeForm(data={}, instance=user) # When rendering the bound password field, # ReadOnlyPasswordHashWidget needs the initial # value to render correctly self.assertEqual(form.initial["password"], form["password"].value()) @override_settings(ROOT_URLCONF="auth_tests.urls_admin") def test_link_to_password_reset_in_helptext_via_to_field(self): user = User.objects.get(username="testclient") form = UserChangeForm(data={}, instance=user) password_help_text = form.fields["password"].help_text matches = re.search('<a href="(.*?)">', password_help_text) # URL to UserChangeForm in admin via to_field (instead of pk). admin_user_change_url = reverse( f"admin:{user._meta.app_label}_{user._meta.model_name}_change", args=(user.username,), ) joined_url = urllib.parse.urljoin(admin_user_change_url, matches.group(1)) pw_change_url = reverse( f"admin:{user._meta.app_label}_{user._meta.model_name}_password_change", args=(user.pk,), ) self.assertEqual(joined_url, pw_change_url) def test_custom_form(self): class CustomUserChangeForm(UserChangeForm): class Meta(UserChangeForm.Meta): model = ExtensionUser fields = ( "username", "password", "date_of_birth", ) user = User.objects.get(username="testclient") data = { "username": "testclient", "password": "testclient", "date_of_birth": "1998-02-24", } form = CustomUserChangeForm(data, instance=user) self.assertTrue(form.is_valid()) form.save() self.assertEqual(form.cleaned_data["username"], "testclient") self.assertEqual(form.cleaned_data["date_of_birth"], datetime.date(1998, 2, 24)) def test_password_excluded(self): class UserChangeFormWithoutPassword(UserChangeForm): password = None class Meta: model = User exclude = ["password"] form = UserChangeFormWithoutPassword() self.assertNotIn("password", form.fields) def test_username_field_autocapitalize_none(self): form = UserChangeForm() self.assertEqual( form.fields["username"].widget.attrs.get("autocapitalize"), "none" ) @override_settings(TEMPLATES=AUTH_TEMPLATES) class PasswordResetFormTest(TestDataMixin, TestCase): @classmethod def setUpClass(cls): super().setUpClass() # This cleanup is necessary because contrib.sites cache # makes tests interfere with each other, see #11505 Site.objects.clear_cache() def create_dummy_user(self): """ Create a user and return a tuple (user_object, username, email). """ username = "jsmith" email = "[email protected]" user = User.objects.create_user(username, email, "test123") return (user, username, email) def test_invalid_email(self): data = {"email": "not valid"} form = PasswordResetForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form["email"].errors, [_("Enter a valid email address.")]) def test_user_email_unicode_collision(self): User.objects.create_user("mike123", "[email protected]", "test123") User.objects.create_user("mike456", "mı[email protected]", "test123") data = {"email": "mı[email protected]"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ["mı[email protected]"]) def test_user_email_domain_unicode_collision(self): User.objects.create_user("mike123", "[email protected]", "test123") User.objects.create_user("mike456", "mike@ıxample.org", "test123") data = {"email": "mike@ıxample.org"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ["mike@ıxample.org"]) def test_user_email_unicode_collision_nonexistent(self): User.objects.create_user("mike123", "[email protected]", "test123") data = {"email": "mı[email protected]"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 0) def test_user_email_domain_unicode_collision_nonexistent(self): User.objects.create_user("mike123", "[email protected]", "test123") data = {"email": "mike@ıxample.org"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 0) def test_nonexistent_email(self): """ Test nonexistent email address. This should not fail because it would expose information about registered users. """ data = {"email": "[email protected]"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) self.assertEqual(len(mail.outbox), 0) def test_cleaned_data(self): (user, username, email) = self.create_dummy_user() data = {"email": email} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) form.save(domain_override="example.com") self.assertEqual(form.cleaned_data["email"], email) self.assertEqual(len(mail.outbox), 1) def test_custom_email_subject(self): data = {"email": "[email protected]"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) # Since we're not providing a request object, we must provide a # domain_override to prevent the save operation from failing in the # potential case where contrib.sites is not installed. Refs #16412. form.save(domain_override="example.com") self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, "Custom password reset on example.com") def test_custom_email_constructor(self): data = {"email": "[email protected]"} class CustomEmailPasswordResetForm(PasswordResetForm): def send_mail( self, subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None, ): EmailMultiAlternatives( "Forgot your password?", "Sorry to hear you forgot your password.", None, [to_email], ["[email protected]"], headers={"Reply-To": "[email protected]"}, alternatives=[ ("Really sorry to hear you forgot your password.", "text/html") ], ).send() form = CustomEmailPasswordResetForm(data) self.assertTrue(form.is_valid()) # Since we're not providing a request object, we must provide a # domain_override to prevent the save operation from failing in the # potential case where contrib.sites is not installed. Refs #16412. form.save(domain_override="example.com") self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, "Forgot your password?") self.assertEqual(mail.outbox[0].bcc, ["[email protected]"]) self.assertEqual(mail.outbox[0].content_subtype, "plain") def test_preserve_username_case(self): """ Preserve the case of the user name (before the @ in the email address) when creating a user (#5605). """ user = User.objects.create_user("forms_test2", "[email protected]", "test") self.assertEqual(user.email, "[email protected]") user = User.objects.create_user("forms_test3", "tesT", "test") self.assertEqual(user.email, "tesT") def test_inactive_user(self): """ Inactive user cannot receive password reset email. """ (user, username, email) = self.create_dummy_user() user.is_active = False user.save() form = PasswordResetForm({"email": email}) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 0) def test_unusable_password(self): user = User.objects.create_user("testuser", "[email protected]", "test") data = {"email": "[email protected]"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) user.set_unusable_password() user.save() form = PasswordResetForm(data) # The form itself is valid, but no email is sent self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 0) def test_save_plaintext_email(self): """ Test the PasswordResetForm.save() method with no html_email_template_name parameter passed in. Test to ensure original behavior is unchanged after the parameter was added. """ (user, username, email) = self.create_dummy_user() form = PasswordResetForm({"email": email}) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 1) message = mail.outbox[0].message() self.assertFalse(message.is_multipart()) self.assertEqual(message.get_content_type(), "text/plain") self.assertEqual(message.get("subject"), "Custom password reset on example.com") self.assertEqual(len(mail.outbox[0].alternatives), 0) self.assertEqual(message.get_all("to"), [email]) self.assertTrue( re.match(r"^http://example.com/reset/[\w+/-]", message.get_payload()) ) def test_save_html_email_template_name(self): """ Test the PasswordResetForm.save() method with html_email_template_name parameter specified. Test to ensure that a multipart email is sent with both text/plain and text/html parts. """ (user, username, email) = self.create_dummy_user() form = PasswordResetForm({"email": email}) self.assertTrue(form.is_valid()) form.save( html_email_template_name="registration/html_password_reset_email.html" ) self.assertEqual(len(mail.outbox), 1) self.assertEqual(len(mail.outbox[0].alternatives), 1) message = mail.outbox[0].message() self.assertEqual(message.get("subject"), "Custom password reset on example.com") self.assertEqual(len(message.get_payload()), 2) self.assertTrue(message.is_multipart()) self.assertEqual(message.get_payload(0).get_content_type(), "text/plain") self.assertEqual(message.get_payload(1).get_content_type(), "text/html") self.assertEqual(message.get_all("to"), [email]) self.assertTrue( re.match( r"^http://example.com/reset/[\w/-]+", message.get_payload(0).get_payload(), ) ) self.assertTrue( re.match( r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$', message.get_payload(1).get_payload(), ) ) @override_settings(AUTH_USER_MODEL="auth_tests.CustomEmailField") def test_custom_email_field(self): email = "[email protected]" CustomEmailField.objects.create_user("test name", "test password", email) form = PasswordResetForm({"email": email}) self.assertTrue(form.is_valid()) form.save() self.assertEqual(form.cleaned_data["email"], email) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, [email]) def test_html_autocomplete_attributes(self): form = PasswordResetForm() self.assertEqual(form.fields["email"].widget.attrs["autocomplete"], "email") class ReadOnlyPasswordHashTest(SimpleTestCase): def test_bug_19349_render_with_none_value(self): # Rendering the widget with value set to None # mustn't raise an exception. widget = ReadOnlyPasswordHashWidget() html = widget.render(name="password", value=None, attrs={}) self.assertIn(_("No password set."), html) @override_settings( PASSWORD_HASHERS=["django.contrib.auth.hashers.PBKDF2PasswordHasher"] ) def test_render(self): widget = ReadOnlyPasswordHashWidget() value = ( "pbkdf2_sha256$100000$a6Pucb1qSFcD$WmCkn9Hqidj48NVe5x0FEM6A9YiOqQcl/83m2Z5u" "dm0=" ) self.assertHTMLEqual( widget.render("name", value, {"id": "id_password"}), '<div id="id_password">' " <strong>algorithm</strong>: <bdi>pbkdf2_sha256</bdi>" " <strong>iterations</strong>: <bdi>100000</bdi>" " <strong>salt</strong>: <bdi>a6Pucb******</bdi>" " <strong>hash</strong>: " " <bdi>WmCkn9**************************************</bdi>" "</div>", ) def test_readonly_field_has_changed(self): field = ReadOnlyPasswordHashField() self.assertIs(field.disabled, True) self.assertFalse(field.has_changed("aaa", "bbb")) def test_label(self): """ ReadOnlyPasswordHashWidget doesn't contain a for attribute in the <label> because it doesn't have any labelable elements. """ class TestForm(forms.Form): hash_field = ReadOnlyPasswordHashField() bound_field = TestForm()["hash_field"] self.assertIsNone(bound_field.field.widget.id_for_label("id")) self.assertEqual(bound_field.label_tag(), "<label>Hash field:</label>") class AdminPasswordChangeFormTest(TestDataMixin, TestCase): @mock.patch("django.contrib.auth.password_validation.password_changed") def test_success(self, password_changed): user = User.objects.get(username="testclient") data = { "password1": "test123", "password2": "test123", } form = AdminPasswordChangeForm(user, data) self.assertTrue(form.is_valid()) form.save(commit=False) self.assertEqual(password_changed.call_count, 0) form.save() self.assertEqual(password_changed.call_count, 1) self.assertEqual(form.changed_data, ["password"]) def test_password_whitespace_not_stripped(self): user = User.objects.get(username="testclient") data = { "password1": " pass ", "password2": " pass ", } form = AdminPasswordChangeForm(user, data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data["password1"], data["password1"]) self.assertEqual(form.cleaned_data["password2"], data["password2"]) self.assertEqual(form.changed_data, ["password"]) def test_non_matching_passwords(self): user = User.objects.get(username="testclient") data = {"password1": "password1", "password2": "password2"} form = AdminPasswordChangeForm(user, data) self.assertEqual( form.errors["password2"], [form.error_messages["password_mismatch"]] ) self.assertEqual(form.changed_data, ["password"]) def test_missing_passwords(self): user = User.objects.get(username="testclient") data = {"password1": "", "password2": ""} form = AdminPasswordChangeForm(user, data) required_error = [Field.default_error_messages["required"]] self.assertEqual(form.errors["password1"], required_error) self.assertEqual(form.errors["password2"], required_error) self.assertEqual(form.changed_data, []) def test_one_password(self): user = User.objects.get(username="testclient") form1 = AdminPasswordChangeForm(user, {"password1": "", "password2": "test"}) required_error = [Field.default_error_messages["required"]] self.assertEqual(form1.errors["password1"], required_error) self.assertNotIn("password2", form1.errors) self.assertEqual(form1.changed_data, []) form2 = AdminPasswordChangeForm(user, {"password1": "test", "password2": ""}) self.assertEqual(form2.errors["password2"], required_error) self.assertNotIn("password1", form2.errors) self.assertEqual(form2.changed_data, []) def test_html_autocomplete_attributes(self): user = User.objects.get(username="testclient") form = AdminPasswordChangeForm(user) tests = ( ("password1", "new-password"), ("password2", "new-password"), ) for field_name, autocomplete in tests: with self.subTest(field_name=field_name, autocomplete=autocomplete): self.assertEqual( form.fields[field_name].widget.attrs["autocomplete"], autocomplete )
70aa44f6bd74d0b532df528a0cd53aab56c4437207175a0157f5d71315037801
import decimal import enum import json import unittest import uuid from django import forms from django.contrib.admin.utils import display_for_field from django.core import checks, exceptions, serializers, validators from django.core.exceptions import FieldError from django.core.management import call_command from django.db import IntegrityError, connection, models from django.db.models.expressions import Exists, OuterRef, RawSQL, Value from django.db.models.functions import Cast, JSONObject, Upper from django.test import ( TransactionTestCase, modify_settings, override_settings, skipUnlessDBFeature, ) from django.test.utils import isolate_apps from django.utils import timezone from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase, PostgreSQLWidgetTestCase from .models import ( ArrayEnumModel, ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel, NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel, PostgreSQLModel, Tag, ) try: from psycopg2.extras import NumericRange from django.contrib.postgres.aggregates import ArrayAgg from django.contrib.postgres.expressions import ArraySubquery from django.contrib.postgres.fields import ArrayField from django.contrib.postgres.fields.array import IndexTransform, SliceTransform from django.contrib.postgres.forms import ( SimpleArrayField, SplitArrayField, SplitArrayWidget, ) except ImportError: pass @isolate_apps("postgres_tests") class BasicTests(PostgreSQLSimpleTestCase): def test_get_field_display(self): class MyModel(PostgreSQLModel): field = ArrayField( models.CharField(max_length=16), choices=[ ["Media", [(["vinyl", "cd"], "Audio")]], (("mp3", "mp4"), "Digital"), ], ) tests = ( (["vinyl", "cd"], "Audio"), (("mp3", "mp4"), "Digital"), (("a", "b"), "('a', 'b')"), (["c", "d"], "['c', 'd']"), ) for value, display in tests: with self.subTest(value=value, display=display): instance = MyModel(field=value) self.assertEqual(instance.get_field_display(), display) def test_get_field_display_nested_array(self): class MyModel(PostgreSQLModel): field = ArrayField( ArrayField(models.CharField(max_length=16)), choices=[ [ "Media", [([["vinyl", "cd"], ("x",)], "Audio")], ], ((["mp3"], ("mp4",)), "Digital"), ], ) tests = ( ([["vinyl", "cd"], ("x",)], "Audio"), ((["mp3"], ("mp4",)), "Digital"), ((("a", "b"), ("c",)), "(('a', 'b'), ('c',))"), ([["a", "b"], ["c"]], "[['a', 'b'], ['c']]"), ) for value, display in tests: with self.subTest(value=value, display=display): instance = MyModel(field=value) self.assertEqual(instance.get_field_display(), display) class TestSaveLoad(PostgreSQLTestCase): def test_integer(self): instance = IntegerArrayModel(field=[1, 2, 3]) instance.save() loaded = IntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_char(self): instance = CharArrayModel(field=["hello", "goodbye"]) instance.save() loaded = CharArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_dates(self): instance = DateTimeArrayModel( datetimes=[timezone.now()], dates=[timezone.now().date()], times=[timezone.now().time()], ) instance.save() loaded = DateTimeArrayModel.objects.get() self.assertEqual(instance.datetimes, loaded.datetimes) self.assertEqual(instance.dates, loaded.dates) self.assertEqual(instance.times, loaded.times) def test_tuples(self): instance = IntegerArrayModel(field=(1,)) instance.save() loaded = IntegerArrayModel.objects.get() self.assertSequenceEqual(instance.field, loaded.field) def test_integers_passed_as_strings(self): # This checks that get_prep_value is deferred properly instance = IntegerArrayModel(field=["1"]) instance.save() loaded = IntegerArrayModel.objects.get() self.assertEqual(loaded.field, [1]) def test_default_null(self): instance = NullableIntegerArrayModel() instance.save() loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk) self.assertIsNone(loaded.field) self.assertEqual(instance.field, loaded.field) def test_null_handling(self): instance = NullableIntegerArrayModel(field=None) instance.save() loaded = NullableIntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) instance = IntegerArrayModel(field=None) with self.assertRaises(IntegrityError): instance.save() def test_nested(self): instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]]) instance.save() loaded = NestedIntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_other_array_types(self): instance = OtherTypesArrayModel( ips=["192.168.0.1", "::1"], uuids=[uuid.uuid4()], decimals=[decimal.Decimal(1.25), 1.75], tags=[Tag(1), Tag(2), Tag(3)], json=[{"a": 1}, {"b": 2}], int_ranges=[NumericRange(10, 20), NumericRange(30, 40)], bigint_ranges=[ NumericRange(7000000000, 10000000000), NumericRange(50000000000, 70000000000), ], ) instance.save() loaded = OtherTypesArrayModel.objects.get() self.assertEqual(instance.ips, loaded.ips) self.assertEqual(instance.uuids, loaded.uuids) self.assertEqual(instance.decimals, loaded.decimals) self.assertEqual(instance.tags, loaded.tags) self.assertEqual(instance.json, loaded.json) self.assertEqual(instance.int_ranges, loaded.int_ranges) self.assertEqual(instance.bigint_ranges, loaded.bigint_ranges) def test_null_from_db_value_handling(self): instance = OtherTypesArrayModel.objects.create( ips=["192.168.0.1", "::1"], uuids=[uuid.uuid4()], decimals=[decimal.Decimal(1.25), 1.75], tags=None, ) instance.refresh_from_db() self.assertIsNone(instance.tags) self.assertEqual(instance.json, []) self.assertIsNone(instance.int_ranges) self.assertIsNone(instance.bigint_ranges) def test_model_set_on_base_field(self): instance = IntegerArrayModel() field = instance._meta.get_field("field") self.assertEqual(field.model, IntegerArrayModel) self.assertEqual(field.base_field.model, IntegerArrayModel) def test_nested_nullable_base_field(self): instance = NullableIntegerArrayModel.objects.create( field_nested=[[None, None], [None, None]], ) self.assertEqual(instance.field_nested, [[None, None], [None, None]]) class TestQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.objs = NullableIntegerArrayModel.objects.bulk_create( [ NullableIntegerArrayModel(order=1, field=[1]), NullableIntegerArrayModel(order=2, field=[2]), NullableIntegerArrayModel(order=3, field=[2, 3]), NullableIntegerArrayModel(order=4, field=[20, 30, 40]), NullableIntegerArrayModel(order=5, field=None), ] ) def test_empty_list(self): NullableIntegerArrayModel.objects.create(field=[]) obj = ( NullableIntegerArrayModel.objects.annotate( empty_array=models.Value( [], output_field=ArrayField(models.IntegerField()) ), ) .filter(field=models.F("empty_array")) .get() ) self.assertEqual(obj.field, []) self.assertEqual(obj.empty_array, []) def test_exact(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__exact=[1]), self.objs[:1] ) def test_exact_null_only_array(self): obj = NullableIntegerArrayModel.objects.create( field=[None], field_nested=[None, None] ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__exact=[None]), [obj] ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field_nested__exact=[None, None]), [obj], ) def test_exact_null_only_nested_array(self): obj1 = NullableIntegerArrayModel.objects.create(field_nested=[[None, None]]) obj2 = NullableIntegerArrayModel.objects.create( field_nested=[[None, None], [None, None]], ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( field_nested__exact=[[None, None]], ), [obj1], ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( field_nested__exact=[[None, None], [None, None]], ), [obj2], ) def test_exact_with_expression(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__exact=[Value(1)]), self.objs[:1], ) def test_exact_charfield(self): instance = CharArrayModel.objects.create(field=["text"]) self.assertSequenceEqual( CharArrayModel.objects.filter(field=["text"]), [instance] ) def test_exact_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]), [instance] ) def test_isnull(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__isnull=True), self.objs[-1:] ) def test_gt(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__gt=[0]), self.objs[:4] ) def test_lt(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__lt=[2]), self.objs[:1] ) def test_in(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]), self.objs[:2], ) def test_in_subquery(self): IntegerArrayModel.objects.create(field=[2, 3]) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( field__in=IntegerArrayModel.objects.values_list("field", flat=True) ), self.objs[2:3], ) @unittest.expectedFailure def test_in_including_F_object(self): # This test asserts that Array objects passed to filters can be # constructed to contain F objects. This currently doesn't work as the # psycopg2 mogrify method that generates the ARRAY() syntax is # expecting literals, not column references (#27095). self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__in=[[models.F("id")]]), self.objs[:2], ) def test_in_as_F_object(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__in=[models.F("field")]), self.objs[:4], ) def test_contained_by(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]), self.objs[:2], ) def test_contained_by_including_F_object(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( field__contained_by=[models.F("order"), 2] ), self.objs[:3], ) def test_contains(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contains=[2]), self.objs[1:3], ) def test_contains_subquery(self): IntegerArrayModel.objects.create(field=[2, 3]) inner_qs = IntegerArrayModel.objects.values_list("field", flat=True) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contains=inner_qs[:1]), self.objs[2:3], ) inner_qs = IntegerArrayModel.objects.filter(field__contains=OuterRef("field")) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(Exists(inner_qs)), self.objs[1:3], ) def test_contains_including_expression(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( field__contains=[2, Value(6) / Value(2)], ), self.objs[2:3], ) def test_icontains(self): # Using the __icontains lookup with ArrayField is inefficient. instance = CharArrayModel.objects.create(field=["FoO"]) self.assertSequenceEqual( CharArrayModel.objects.filter(field__icontains="foo"), [instance] ) def test_contains_charfield(self): # Regression for #22907 self.assertSequenceEqual( CharArrayModel.objects.filter(field__contains=["text"]), [] ) def test_contained_by_charfield(self): self.assertSequenceEqual( CharArrayModel.objects.filter(field__contained_by=["text"]), [] ) def test_overlap_charfield(self): self.assertSequenceEqual( CharArrayModel.objects.filter(field__overlap=["text"]), [] ) def test_overlap_charfield_including_expression(self): obj_1 = CharArrayModel.objects.create(field=["TEXT", "lower text"]) obj_2 = CharArrayModel.objects.create(field=["lower text", "TEXT"]) CharArrayModel.objects.create(field=["lower text", "text"]) self.assertSequenceEqual( CharArrayModel.objects.filter( field__overlap=[ Upper(Value("text")), "other", ] ), [obj_1, obj_2], ) def test_lookups_autofield_array(self): qs = ( NullableIntegerArrayModel.objects.filter( field__0__isnull=False, ) .values("field__0") .annotate( arrayagg=ArrayAgg("id"), ) .order_by("field__0") ) tests = ( ("contained_by", [self.objs[1].pk, self.objs[2].pk, 0], [2]), ("contains", [self.objs[2].pk], [2]), ("exact", [self.objs[3].pk], [20]), ("overlap", [self.objs[1].pk, self.objs[3].pk], [2, 20]), ) for lookup, value, expected in tests: with self.subTest(lookup=lookup): self.assertSequenceEqual( qs.filter( **{"arrayagg__" + lookup: value}, ).values_list("field__0", flat=True), expected, ) @skipUnlessDBFeature("allows_group_by_refs") def test_group_by_order_by_aliases(self): with self.assertNumQueries(1) as ctx: self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( field__0__isnull=False, ) .values("field__0") .annotate(arrayagg=ArrayAgg("id")) .order_by("field__0"), [ {"field__0": 1, "arrayagg": [self.objs[0].pk]}, {"field__0": 2, "arrayagg": [self.objs[1].pk, self.objs[2].pk]}, {"field__0": 20, "arrayagg": [self.objs[3].pk]}, ], ) alias = connection.ops.quote_name("field__0") sql = ctx[0]["sql"] self.assertIn(f"GROUP BY {alias}", sql) self.assertIn(f"ORDER BY {alias}", sql) def test_index(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0=2), self.objs[1:3] ) def test_index_chained(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0__lt=3), self.objs[0:3] ) def test_index_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0__0=1), [instance] ) @unittest.expectedFailure def test_index_used_on_nested_data(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0=[1, 2]), [instance] ) def test_index_transform_expression(self): expr = RawSQL("string_to_array(%s, ';')", ["1;2"]) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( field__0=Cast( IndexTransform(1, models.IntegerField, expr), output_field=models.IntegerField(), ), ), self.objs[:1], ) def test_index_annotation(self): qs = NullableIntegerArrayModel.objects.annotate(second=models.F("field__1")) self.assertCountEqual( qs.values_list("second", flat=True), [None, None, None, 3, 30], ) def test_overlap(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]), self.objs[0:3], ) def test_len(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__len__lte=2), self.objs[0:3] ) def test_len_empty_array(self): obj = NullableIntegerArrayModel.objects.create(field=[]) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__len=0), [obj] ) def test_slice(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0_1=[2]), self.objs[1:3] ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]), self.objs[2:3] ) def test_order_by_slice(self): more_objs = ( NullableIntegerArrayModel.objects.create(field=[1, 637]), NullableIntegerArrayModel.objects.create(field=[2, 1]), NullableIntegerArrayModel.objects.create(field=[3, -98123]), NullableIntegerArrayModel.objects.create(field=[4, 2]), ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.order_by("field__1"), [ more_objs[2], more_objs[1], more_objs[3], self.objs[2], self.objs[3], more_objs[0], self.objs[4], self.objs[1], self.objs[0], ], ) @unittest.expectedFailure def test_slice_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]), [instance] ) def test_slice_transform_expression(self): expr = RawSQL("string_to_array(%s, ';')", ["9;2;3"]) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( field__0_2=SliceTransform(2, 3, expr) ), self.objs[2:3], ) def test_slice_annotation(self): qs = NullableIntegerArrayModel.objects.annotate( first_two=models.F("field__0_2"), ) self.assertCountEqual( qs.values_list("first_two", flat=True), [None, [1], [2], [2, 3], [20, 30]], ) def test_usage_in_subquery(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( id__in=NullableIntegerArrayModel.objects.filter(field__len=3) ), [self.objs[3]], ) def test_enum_lookup(self): class TestEnum(enum.Enum): VALUE_1 = "value_1" instance = ArrayEnumModel.objects.create(array_of_enums=[TestEnum.VALUE_1]) self.assertSequenceEqual( ArrayEnumModel.objects.filter(array_of_enums__contains=[TestEnum.VALUE_1]), [instance], ) def test_unsupported_lookup(self): msg = ( "Unsupported lookup '0_bar' for ArrayField or join on the field not " "permitted." ) with self.assertRaisesMessage(FieldError, msg): list(NullableIntegerArrayModel.objects.filter(field__0_bar=[2])) msg = ( "Unsupported lookup '0bar' for ArrayField or join on the field not " "permitted." ) with self.assertRaisesMessage(FieldError, msg): list(NullableIntegerArrayModel.objects.filter(field__0bar=[2])) def test_grouping_by_annotations_with_array_field_param(self): value = models.Value([1], output_field=ArrayField(models.IntegerField())) self.assertEqual( NullableIntegerArrayModel.objects.annotate( array_length=models.Func( value, 1, function="ARRAY_LENGTH", output_field=models.IntegerField(), ), ) .values("array_length") .annotate( count=models.Count("pk"), ) .get()["array_length"], 1, ) def test_filter_by_array_subquery(self): inner_qs = NullableIntegerArrayModel.objects.filter( field__len=models.OuterRef("field__len"), ).values("field") self.assertSequenceEqual( NullableIntegerArrayModel.objects.alias( same_sized_fields=ArraySubquery(inner_qs), ).filter(same_sized_fields__len__gt=1), self.objs[0:2], ) def test_annotated_array_subquery(self): inner_qs = NullableIntegerArrayModel.objects.exclude( pk=models.OuterRef("pk") ).values("order") self.assertSequenceEqual( NullableIntegerArrayModel.objects.annotate( sibling_ids=ArraySubquery(inner_qs), ) .get(order=1) .sibling_ids, [2, 3, 4, 5], ) def test_group_by_with_annotated_array_subquery(self): inner_qs = NullableIntegerArrayModel.objects.exclude( pk=models.OuterRef("pk") ).values("order") self.assertSequenceEqual( NullableIntegerArrayModel.objects.annotate( sibling_ids=ArraySubquery(inner_qs), sibling_count=models.Max("sibling_ids__len"), ).values_list("sibling_count", flat=True), [len(self.objs) - 1] * len(self.objs), ) def test_annotated_ordered_array_subquery(self): inner_qs = NullableIntegerArrayModel.objects.order_by("-order").values("order") self.assertSequenceEqual( NullableIntegerArrayModel.objects.annotate( ids=ArraySubquery(inner_qs), ) .first() .ids, [5, 4, 3, 2, 1], ) def test_annotated_array_subquery_with_json_objects(self): inner_qs = NullableIntegerArrayModel.objects.exclude( pk=models.OuterRef("pk") ).values(json=JSONObject(order="order", field="field")) siblings_json = ( NullableIntegerArrayModel.objects.annotate( siblings_json=ArraySubquery(inner_qs), ) .values_list("siblings_json", flat=True) .get(order=1) ) self.assertSequenceEqual( siblings_json, [ {"field": [2], "order": 2}, {"field": [2, 3], "order": 3}, {"field": [20, 30, 40], "order": 4}, {"field": None, "order": 5}, ], ) class TestDateTimeExactQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): now = timezone.now() cls.datetimes = [now] cls.dates = [now.date()] cls.times = [now.time()] cls.objs = [ DateTimeArrayModel.objects.create( datetimes=cls.datetimes, dates=cls.dates, times=cls.times ), ] def test_exact_datetimes(self): self.assertSequenceEqual( DateTimeArrayModel.objects.filter(datetimes=self.datetimes), self.objs ) def test_exact_dates(self): self.assertSequenceEqual( DateTimeArrayModel.objects.filter(dates=self.dates), self.objs ) def test_exact_times(self): self.assertSequenceEqual( DateTimeArrayModel.objects.filter(times=self.times), self.objs ) class TestOtherTypesExactQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.ips = ["192.168.0.1", "::1"] cls.uuids = [uuid.uuid4()] cls.decimals = [decimal.Decimal(1.25), 1.75] cls.tags = [Tag(1), Tag(2), Tag(3)] cls.objs = [ OtherTypesArrayModel.objects.create( ips=cls.ips, uuids=cls.uuids, decimals=cls.decimals, tags=cls.tags, ) ] def test_exact_ip_addresses(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(ips=self.ips), self.objs ) def test_exact_uuids(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(uuids=self.uuids), self.objs ) def test_exact_decimals(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(decimals=self.decimals), self.objs ) def test_exact_tags(self): self.assertSequenceEqual( OtherTypesArrayModel.objects.filter(tags=self.tags), self.objs ) @isolate_apps("postgres_tests") class TestChecks(PostgreSQLSimpleTestCase): def test_field_checks(self): class MyModel(PostgreSQLModel): field = ArrayField(models.CharField()) model = MyModel() errors = model.check() self.assertEqual(len(errors), 1) # The inner CharField is missing a max_length. self.assertEqual(errors[0].id, "postgres.E001") self.assertIn("max_length", errors[0].msg) def test_invalid_base_fields(self): class MyModel(PostgreSQLModel): field = ArrayField( models.ManyToManyField("postgres_tests.IntegerArrayModel") ) model = MyModel() errors = model.check() self.assertEqual(len(errors), 1) self.assertEqual(errors[0].id, "postgres.E002") def test_invalid_default(self): class MyModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=[]) model = MyModel() self.assertEqual( model.check(), [ checks.Warning( msg=( "ArrayField default should be a callable instead of an " "instance so that it's not shared between all field " "instances." ), hint="Use a callable instead, e.g., use `list` instead of `[]`.", obj=MyModel._meta.get_field("field"), id="fields.E010", ) ], ) def test_valid_default(self): class MyModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=list) model = MyModel() self.assertEqual(model.check(), []) def test_valid_default_none(self): class MyModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=None) model = MyModel() self.assertEqual(model.check(), []) def test_nested_field_checks(self): """ Nested ArrayFields are permitted. """ class MyModel(PostgreSQLModel): field = ArrayField(ArrayField(models.CharField())) model = MyModel() errors = model.check() self.assertEqual(len(errors), 1) # The inner CharField is missing a max_length. self.assertEqual(errors[0].id, "postgres.E001") self.assertIn("max_length", errors[0].msg) def test_choices_tuple_list(self): class MyModel(PostgreSQLModel): field = ArrayField( models.CharField(max_length=16), choices=[ [ "Media", [(["vinyl", "cd"], "Audio"), (("vhs", "dvd"), "Video")], ], (["mp3", "mp4"], "Digital"), ], ) self.assertEqual(MyModel._meta.get_field("field").check(), []) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific tests") class TestMigrations(TransactionTestCase): available_apps = ["postgres_tests"] def test_deconstruct(self): field = ArrayField(models.IntegerField()) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(type(new.base_field), type(field.base_field)) self.assertIsNot(new.base_field, field.base_field) def test_deconstruct_with_size(self): field = ArrayField(models.IntegerField(), size=3) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(new.size, field.size) def test_deconstruct_args(self): field = ArrayField(models.CharField(max_length=20)) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(new.base_field.max_length, field.base_field.max_length) def test_subclass_deconstruct(self): field = ArrayField(models.IntegerField()) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.contrib.postgres.fields.ArrayField") field = ArrayFieldSubclass() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "postgres_tests.models.ArrayFieldSubclass") @override_settings( MIGRATION_MODULES={ "postgres_tests": "postgres_tests.array_default_migrations", } ) def test_adding_field_with_default(self): # See #22962 table_name = "postgres_tests_integerarraydefaultmodel" with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) call_command("migrate", "postgres_tests", verbosity=0) with connection.cursor() as cursor: self.assertIn(table_name, connection.introspection.table_names(cursor)) call_command("migrate", "postgres_tests", "zero", verbosity=0) with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) @override_settings( MIGRATION_MODULES={ "postgres_tests": "postgres_tests.array_index_migrations", } ) def test_adding_arrayfield_with_index(self): """ ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes. """ table_name = "postgres_tests_chartextarrayindexmodel" call_command("migrate", "postgres_tests", verbosity=0) with connection.cursor() as cursor: like_constraint_columns_list = [ v["columns"] for k, v in list( connection.introspection.get_constraints(cursor, table_name).items() ) if k.endswith("_like") ] # Only the CharField should have a LIKE index. self.assertEqual(like_constraint_columns_list, [["char2"]]) # All fields should have regular indexes. with connection.cursor() as cursor: indexes = [ c["columns"][0] for c in connection.introspection.get_constraints( cursor, table_name ).values() if c["index"] and len(c["columns"]) == 1 ] self.assertIn("char", indexes) self.assertIn("char2", indexes) self.assertIn("text", indexes) call_command("migrate", "postgres_tests", "zero", verbosity=0) with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) class TestSerialization(PostgreSQLSimpleTestCase): test_data = ( '[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, ' '"model": "postgres_tests.integerarraymodel", "pk": null}]' ) def test_dumping(self): instance = IntegerArrayModel(field=[1, 2, None]) data = serializers.serialize("json", [instance]) self.assertEqual(json.loads(data), json.loads(self.test_data)) def test_loading(self): instance = list(serializers.deserialize("json", self.test_data))[0].object self.assertEqual(instance.field, [1, 2, None]) class TestValidation(PostgreSQLSimpleTestCase): def test_unbounded(self): field = ArrayField(models.IntegerField()) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([1, None], None) self.assertEqual(cm.exception.code, "item_invalid") self.assertEqual( cm.exception.message % cm.exception.params, "Item 2 in the array did not validate: This field cannot be null.", ) def test_blank_true(self): field = ArrayField(models.IntegerField(blank=True, null=True)) # This should not raise a validation error field.clean([1, None], None) def test_with_size(self): field = ArrayField(models.IntegerField(), size=3) field.clean([1, 2, 3], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([1, 2, 3, 4], None) self.assertEqual( cm.exception.messages[0], "List contains 4 items, it should contain no more than 3.", ) def test_nested_array_mismatch(self): field = ArrayField(ArrayField(models.IntegerField())) field.clean([[1, 2], [3, 4]], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([[1, 2], [3, 4, 5]], None) self.assertEqual(cm.exception.code, "nested_array_mismatch") self.assertEqual( cm.exception.messages[0], "Nested arrays must have the same length." ) def test_with_base_field_error_params(self): field = ArrayField(models.CharField(max_length=2)) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(["abc"], None) self.assertEqual(len(cm.exception.error_list), 1) exception = cm.exception.error_list[0] self.assertEqual( exception.message, "Item 1 in the array did not validate: Ensure this value has at most 2 " "characters (it has 3).", ) self.assertEqual(exception.code, "item_invalid") self.assertEqual( exception.params, {"nth": 1, "value": "abc", "limit_value": 2, "show_value": 3}, ) def test_with_validators(self): field = ArrayField( models.IntegerField(validators=[validators.MinValueValidator(1)]) ) field.clean([1, 2], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([0], None) self.assertEqual(len(cm.exception.error_list), 1) exception = cm.exception.error_list[0] self.assertEqual( exception.message, "Item 1 in the array did not validate: Ensure this value is greater than " "or equal to 1.", ) self.assertEqual(exception.code, "item_invalid") self.assertEqual( exception.params, {"nth": 1, "value": 0, "limit_value": 1, "show_value": 0} ) class TestSimpleFormField(PostgreSQLSimpleTestCase): def test_valid(self): field = SimpleArrayField(forms.CharField()) value = field.clean("a,b,c") self.assertEqual(value, ["a", "b", "c"]) def test_to_python_fail(self): field = SimpleArrayField(forms.IntegerField()) with self.assertRaises(exceptions.ValidationError) as cm: field.clean("a,b,9") self.assertEqual( cm.exception.messages[0], "Item 1 in the array did not validate: Enter a whole number.", ) def test_validate_fail(self): field = SimpleArrayField(forms.CharField(required=True)) with self.assertRaises(exceptions.ValidationError) as cm: field.clean("a,b,") self.assertEqual( cm.exception.messages[0], "Item 3 in the array did not validate: This field is required.", ) def test_validate_fail_base_field_error_params(self): field = SimpleArrayField(forms.CharField(max_length=2)) with self.assertRaises(exceptions.ValidationError) as cm: field.clean("abc,c,defg") errors = cm.exception.error_list self.assertEqual(len(errors), 2) first_error = errors[0] self.assertEqual( first_error.message, "Item 1 in the array did not validate: Ensure this value has at most 2 " "characters (it has 3).", ) self.assertEqual(first_error.code, "item_invalid") self.assertEqual( first_error.params, {"nth": 1, "value": "abc", "limit_value": 2, "show_value": 3}, ) second_error = errors[1] self.assertEqual( second_error.message, "Item 3 in the array did not validate: Ensure this value has at most 2 " "characters (it has 4).", ) self.assertEqual(second_error.code, "item_invalid") self.assertEqual( second_error.params, {"nth": 3, "value": "defg", "limit_value": 2, "show_value": 4}, ) def test_validators_fail(self): field = SimpleArrayField(forms.RegexField("[a-e]{2}")) with self.assertRaises(exceptions.ValidationError) as cm: field.clean("a,bc,de") self.assertEqual( cm.exception.messages[0], "Item 1 in the array did not validate: Enter a valid value.", ) def test_delimiter(self): field = SimpleArrayField(forms.CharField(), delimiter="|") value = field.clean("a|b|c") self.assertEqual(value, ["a", "b", "c"]) def test_delimiter_with_nesting(self): field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter="|") value = field.clean("a,b|c,d") self.assertEqual(value, [["a", "b"], ["c", "d"]]) def test_prepare_value(self): field = SimpleArrayField(forms.CharField()) value = field.prepare_value(["a", "b", "c"]) self.assertEqual(value, "a,b,c") def test_max_length(self): field = SimpleArrayField(forms.CharField(), max_length=2) with self.assertRaises(exceptions.ValidationError) as cm: field.clean("a,b,c") self.assertEqual( cm.exception.messages[0], "List contains 3 items, it should contain no more than 2.", ) def test_min_length(self): field = SimpleArrayField(forms.CharField(), min_length=4) with self.assertRaises(exceptions.ValidationError) as cm: field.clean("a,b,c") self.assertEqual( cm.exception.messages[0], "List contains 3 items, it should contain no fewer than 4.", ) def test_required(self): field = SimpleArrayField(forms.CharField(), required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean("") self.assertEqual(cm.exception.messages[0], "This field is required.") def test_model_field_formfield(self): model_field = ArrayField(models.CharField(max_length=27)) form_field = model_field.formfield() self.assertIsInstance(form_field, SimpleArrayField) self.assertIsInstance(form_field.base_field, forms.CharField) self.assertEqual(form_field.base_field.max_length, 27) def test_model_field_formfield_size(self): model_field = ArrayField(models.CharField(max_length=27), size=4) form_field = model_field.formfield() self.assertIsInstance(form_field, SimpleArrayField) self.assertEqual(form_field.max_length, 4) def test_model_field_choices(self): model_field = ArrayField(models.IntegerField(choices=((1, "A"), (2, "B")))) form_field = model_field.formfield() self.assertEqual(form_field.clean("1,2"), [1, 2]) def test_already_converted_value(self): field = SimpleArrayField(forms.CharField()) vals = ["a", "b", "c"] self.assertEqual(field.clean(vals), vals) def test_has_changed(self): field = SimpleArrayField(forms.IntegerField()) self.assertIs(field.has_changed([1, 2], [1, 2]), False) self.assertIs(field.has_changed([1, 2], "1,2"), False) self.assertIs(field.has_changed([1, 2], "1,2,3"), True) self.assertIs(field.has_changed([1, 2], "a,b"), True) def test_has_changed_empty(self): field = SimpleArrayField(forms.CharField()) self.assertIs(field.has_changed(None, None), False) self.assertIs(field.has_changed(None, ""), False) self.assertIs(field.has_changed(None, []), False) self.assertIs(field.has_changed([], None), False) self.assertIs(field.has_changed([], ""), False) class TestSplitFormField(PostgreSQLSimpleTestCase): def test_valid(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) data = {"array_0": "a", "array_1": "b", "array_2": "c"} form = SplitForm(data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {"array": ["a", "b", "c"]}) def test_required(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), required=True, size=3) data = {"array_0": "", "array_1": "", "array_2": ""} form = SplitForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {"array": ["This field is required."]}) def test_remove_trailing_nulls(self): class SplitForm(forms.Form): array = SplitArrayField( forms.CharField(required=False), size=5, remove_trailing_nulls=True ) data = { "array_0": "a", "array_1": "", "array_2": "b", "array_3": "", "array_4": "", } form = SplitForm(data) self.assertTrue(form.is_valid(), form.errors) self.assertEqual(form.cleaned_data, {"array": ["a", "", "b"]}) def test_remove_trailing_nulls_not_required(self): class SplitForm(forms.Form): array = SplitArrayField( forms.CharField(required=False), size=2, remove_trailing_nulls=True, required=False, ) data = {"array_0": "", "array_1": ""} form = SplitForm(data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {"array": []}) def test_required_field(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) data = {"array_0": "a", "array_1": "b", "array_2": ""} form = SplitForm(data) self.assertFalse(form.is_valid()) self.assertEqual( form.errors, { "array": [ "Item 3 in the array did not validate: This field is required." ] }, ) def test_invalid_integer(self): msg = ( "Item 2 in the array did not validate: Ensure this value is less than or " "equal to 100." ) with self.assertRaisesMessage(exceptions.ValidationError, msg): SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101]) # To locate the widget's template. @modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"}) def test_rendering(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) self.assertHTMLEqual( str(SplitForm()), """ <div> <label for="id_array_0">Array:</label> <input id="id_array_0" name="array_0" type="text" required> <input id="id_array_1" name="array_1" type="text" required> <input id="id_array_2" name="array_2" type="text" required> </div> """, ) def test_invalid_char_length(self): field = SplitArrayField(forms.CharField(max_length=2), size=3) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(["abc", "c", "defg"]) self.assertEqual( cm.exception.messages, [ "Item 1 in the array did not validate: Ensure this value has at most 2 " "characters (it has 3).", "Item 3 in the array did not validate: Ensure this value has at most 2 " "characters (it has 4).", ], ) def test_splitarraywidget_value_omitted_from_data(self): class Form(forms.ModelForm): field = SplitArrayField(forms.IntegerField(), required=False, size=2) class Meta: model = IntegerArrayModel fields = ("field",) form = Form({"field_0": "1", "field_1": "2"}) self.assertEqual(form.errors, {}) obj = form.save(commit=False) self.assertEqual(obj.field, [1, 2]) def test_splitarrayfield_has_changed(self): class Form(forms.ModelForm): field = SplitArrayField(forms.IntegerField(), required=False, size=2) class Meta: model = IntegerArrayModel fields = ("field",) tests = [ ({}, {"field_0": "", "field_1": ""}, True), ({"field": None}, {"field_0": "", "field_1": ""}, True), ({"field": [1]}, {"field_0": "", "field_1": ""}, True), ({"field": [1]}, {"field_0": "1", "field_1": "0"}, True), ({"field": [1, 2]}, {"field_0": "1", "field_1": "2"}, False), ({"field": [1, 2]}, {"field_0": "a", "field_1": "b"}, True), ] for initial, data, expected_result in tests: with self.subTest(initial=initial, data=data): obj = IntegerArrayModel(**initial) form = Form(data, instance=obj) self.assertIs(form.has_changed(), expected_result) def test_splitarrayfield_remove_trailing_nulls_has_changed(self): class Form(forms.ModelForm): field = SplitArrayField( forms.IntegerField(), required=False, size=2, remove_trailing_nulls=True ) class Meta: model = IntegerArrayModel fields = ("field",) tests = [ ({}, {"field_0": "", "field_1": ""}, False), ({"field": None}, {"field_0": "", "field_1": ""}, False), ({"field": []}, {"field_0": "", "field_1": ""}, False), ({"field": [1]}, {"field_0": "1", "field_1": ""}, False), ] for initial, data, expected_result in tests: with self.subTest(initial=initial, data=data): obj = IntegerArrayModel(**initial) form = Form(data, instance=obj) self.assertIs(form.has_changed(), expected_result) class TestSplitFormWidget(PostgreSQLWidgetTestCase): def test_get_context(self): self.assertEqual( SplitArrayWidget(forms.TextInput(), size=2).get_context( "name", ["val1", "val2"] ), { "widget": { "name": "name", "is_hidden": False, "required": False, "value": "['val1', 'val2']", "attrs": {}, "template_name": "postgres/widgets/split_array.html", "subwidgets": [ { "name": "name_0", "is_hidden": False, "required": False, "value": "val1", "attrs": {}, "template_name": "django/forms/widgets/text.html", "type": "text", }, { "name": "name_1", "is_hidden": False, "required": False, "value": "val2", "attrs": {}, "template_name": "django/forms/widgets/text.html", "type": "text", }, ], } }, ) def test_checkbox_get_context_attrs(self): context = SplitArrayWidget( forms.CheckboxInput(), size=2, ).get_context("name", [True, False]) self.assertEqual(context["widget"]["value"], "[True, False]") self.assertEqual( [subwidget["attrs"] for subwidget in context["widget"]["subwidgets"]], [{"checked": True}, {}], ) def test_render(self): self.check_html( SplitArrayWidget(forms.TextInput(), size=2), "array", None, """ <input name="array_0" type="text"> <input name="array_1" type="text"> """, ) def test_render_attrs(self): self.check_html( SplitArrayWidget(forms.TextInput(), size=2), "array", ["val1", "val2"], attrs={"id": "foo"}, html=( """ <input id="foo_0" name="array_0" type="text" value="val1"> <input id="foo_1" name="array_1" type="text" value="val2"> """ ), ) def test_value_omitted_from_data(self): widget = SplitArrayWidget(forms.TextInput(), size=2) self.assertIs(widget.value_omitted_from_data({}, {}, "field"), True) self.assertIs( widget.value_omitted_from_data({"field_0": "value"}, {}, "field"), False ) self.assertIs( widget.value_omitted_from_data({"field_1": "value"}, {}, "field"), False ) self.assertIs( widget.value_omitted_from_data( {"field_0": "value", "field_1": "value"}, {}, "field" ), False, ) class TestAdminUtils(PostgreSQLTestCase): empty_value = "-empty-" def test_array_display_for_field(self): array_field = ArrayField(models.IntegerField()) display_value = display_for_field( [1, 2], array_field, self.empty_value, ) self.assertEqual(display_value, "1, 2") def test_array_with_choices_display_for_field(self): array_field = ArrayField( models.IntegerField(), choices=[ ([1, 2, 3], "1st choice"), ([1, 2], "2nd choice"), ], ) display_value = display_for_field( [1, 2], array_field, self.empty_value, ) self.assertEqual(display_value, "2nd choice") display_value = display_for_field( [99, 99], array_field, self.empty_value, ) self.assertEqual(display_value, self.empty_value)
9d04db14899a75c0fbf0eddeacbc5207b56b37e7436bde2b3e7004f042eb538f
import time import traceback from datetime import date, datetime, timedelta from threading import Thread from django.core.exceptions import FieldError from django.db import DatabaseError, IntegrityError, connection from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature from django.test.utils import CaptureQueriesContext from django.utils.functional import lazy from .models import ( Author, Book, DefaultPerson, Journalist, ManualPrimaryKeyTest, Person, Profile, Publisher, Tag, Thing, ) class GetOrCreateTests(TestCase): @classmethod def setUpTestData(cls): Person.objects.create( first_name="John", last_name="Lennon", birthday=date(1940, 10, 9) ) def test_get_or_create_method_with_get(self): created = Person.objects.get_or_create( first_name="John", last_name="Lennon", defaults={"birthday": date(1940, 10, 9)}, )[1] self.assertFalse(created) self.assertEqual(Person.objects.count(), 1) def test_get_or_create_method_with_create(self): created = Person.objects.get_or_create( first_name="George", last_name="Harrison", defaults={"birthday": date(1943, 2, 25)}, )[1] self.assertTrue(created) self.assertEqual(Person.objects.count(), 2) def test_get_or_create_redundant_instance(self): """ If we execute the exact same statement twice, the second time, it won't create a Person. """ Person.objects.get_or_create( first_name="George", last_name="Harrison", defaults={"birthday": date(1943, 2, 25)}, ) created = Person.objects.get_or_create( first_name="George", last_name="Harrison", defaults={"birthday": date(1943, 2, 25)}, )[1] self.assertFalse(created) self.assertEqual(Person.objects.count(), 2) def test_get_or_create_invalid_params(self): """ If you don't specify a value or default value for all required fields, you will get an error. """ with self.assertRaises(IntegrityError): Person.objects.get_or_create(first_name="Tom", last_name="Smith") def test_get_or_create_with_pk_property(self): """ Using the pk property of a model is allowed. """ Thing.objects.get_or_create(pk=1) def test_get_or_create_with_model_property_defaults(self): """Using a property with a setter implemented is allowed.""" t, _ = Thing.objects.get_or_create( defaults={"capitalized_name_property": "annie"}, pk=1 ) self.assertEqual(t.name, "Annie") def test_get_or_create_on_related_manager(self): p = Publisher.objects.create(name="Acme Publishing") # Create a book through the publisher. book, created = p.books.get_or_create(name="The Book of Ed & Fred") self.assertTrue(created) # The publisher should have one book. self.assertEqual(p.books.count(), 1) # Try get_or_create again, this time nothing should be created. book, created = p.books.get_or_create(name="The Book of Ed & Fred") self.assertFalse(created) # And the publisher should still have one book. self.assertEqual(p.books.count(), 1) # Add an author to the book. ed, created = book.authors.get_or_create(name="Ed") self.assertTrue(created) # The book should have one author. self.assertEqual(book.authors.count(), 1) # Try get_or_create again, this time nothing should be created. ed, created = book.authors.get_or_create(name="Ed") self.assertFalse(created) # And the book should still have one author. self.assertEqual(book.authors.count(), 1) # Add a second author to the book. fred, created = book.authors.get_or_create(name="Fred") self.assertTrue(created) # The book should have two authors now. self.assertEqual(book.authors.count(), 2) # Create an Author not tied to any books. Author.objects.create(name="Ted") # There should be three Authors in total. The book object should have two. self.assertEqual(Author.objects.count(), 3) self.assertEqual(book.authors.count(), 2) # Try creating a book through an author. _, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p) self.assertTrue(created) # Now Ed has two Books, Fred just one. self.assertEqual(ed.books.count(), 2) self.assertEqual(fred.books.count(), 1) # Use the publisher's primary key value instead of a model instance. _, created = ed.books.get_or_create( name="The Great Book of Ed", publisher_id=p.id ) self.assertTrue(created) # Try get_or_create again, this time nothing should be created. _, created = ed.books.get_or_create( name="The Great Book of Ed", publisher_id=p.id ) self.assertFalse(created) # The publisher should have three books. self.assertEqual(p.books.count(), 3) def test_defaults_exact(self): """ If you have a field named defaults and want to use it as an exact lookup, you need to use 'defaults__exact'. """ obj, created = Person.objects.get_or_create( first_name="George", last_name="Harrison", defaults__exact="testing", defaults={ "birthday": date(1943, 2, 25), "defaults": "testing", }, ) self.assertTrue(created) self.assertEqual(obj.defaults, "testing") obj2, created = Person.objects.get_or_create( first_name="George", last_name="Harrison", defaults__exact="testing", defaults={ "birthday": date(1943, 2, 25), "defaults": "testing", }, ) self.assertFalse(created) self.assertEqual(obj, obj2) def test_callable_defaults(self): """ Callables in `defaults` are evaluated if the instance is created. """ obj, created = Person.objects.get_or_create( first_name="George", defaults={"last_name": "Harrison", "birthday": lambda: date(1943, 2, 25)}, ) self.assertTrue(created) self.assertEqual(date(1943, 2, 25), obj.birthday) def test_callable_defaults_not_called(self): def raise_exception(): raise AssertionError obj, created = Person.objects.get_or_create( first_name="John", last_name="Lennon", defaults={"birthday": lambda: raise_exception()}, ) def test_defaults_not_evaluated_unless_needed(self): """`defaults` aren't evaluated if the instance isn't created.""" def raise_exception(): raise AssertionError obj, created = Person.objects.get_or_create( first_name="John", defaults=lazy(raise_exception, object)(), ) self.assertFalse(created) class GetOrCreateTestsWithManualPKs(TestCase): @classmethod def setUpTestData(cls): ManualPrimaryKeyTest.objects.create(id=1, data="Original") def test_create_with_duplicate_primary_key(self): """ If you specify an existing primary key, but different other fields, then you will get an error and data will not be updated. """ with self.assertRaises(IntegrityError): ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different") self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original") def test_get_or_create_raises_IntegrityError_plus_traceback(self): """ get_or_create should raise IntegrityErrors with the full traceback. This is tested by checking that a known method call is in the traceback. We cannot use assertRaises here because we need to inspect the actual traceback. Refs #16340. """ try: ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different") except IntegrityError: formatted_traceback = traceback.format_exc() self.assertIn("obj.save", formatted_traceback) def test_savepoint_rollback(self): """ The database connection is still usable after a DatabaseError in get_or_create() (#20463). """ Tag.objects.create(text="foo") with self.assertRaises(DatabaseError): # pk 123456789 doesn't exist, so the tag object will be created. # Saving triggers a unique constraint violation on 'text'. Tag.objects.get_or_create(pk=123456789, defaults={"text": "foo"}) # Tag objects can be created after the error. Tag.objects.create(text="bar") def test_get_or_create_empty(self): """ If all the attributes on a model have defaults, get_or_create() doesn't require any arguments. """ DefaultPerson.objects.get_or_create() class GetOrCreateTransactionTests(TransactionTestCase): available_apps = ["get_or_create"] def test_get_or_create_integrityerror(self): """ Regression test for #15117. Requires a TransactionTestCase on databases that delay integrity checks until the end of transactions, otherwise the exception is never raised. """ try: Profile.objects.get_or_create(person=Person(id=1)) except IntegrityError: pass else: self.skipTest("This backend does not support integrity checks.") class GetOrCreateThroughManyToMany(TestCase): def test_get_get_or_create(self): tag = Tag.objects.create(text="foo") a_thing = Thing.objects.create(name="a") a_thing.tags.add(tag) obj, created = a_thing.tags.get_or_create(text="foo") self.assertFalse(created) self.assertEqual(obj.pk, tag.pk) def test_create_get_or_create(self): a_thing = Thing.objects.create(name="a") obj, created = a_thing.tags.get_or_create(text="foo") self.assertTrue(created) self.assertEqual(obj.text, "foo") self.assertIn(obj, a_thing.tags.all()) def test_something(self): Tag.objects.create(text="foo") a_thing = Thing.objects.create(name="a") with self.assertRaises(IntegrityError): a_thing.tags.get_or_create(text="foo") class UpdateOrCreateTests(TestCase): def test_update(self): Person.objects.create( first_name="John", last_name="Lennon", birthday=date(1940, 10, 9) ) p, created = Person.objects.update_or_create( first_name="John", last_name="Lennon", defaults={"birthday": date(1940, 10, 10)}, ) self.assertFalse(created) self.assertEqual(p.first_name, "John") self.assertEqual(p.last_name, "Lennon") self.assertEqual(p.birthday, date(1940, 10, 10)) def test_create(self): p, created = Person.objects.update_or_create( first_name="John", last_name="Lennon", defaults={"birthday": date(1940, 10, 10)}, ) self.assertTrue(created) self.assertEqual(p.first_name, "John") self.assertEqual(p.last_name, "Lennon") self.assertEqual(p.birthday, date(1940, 10, 10)) def test_create_twice(self): params = { "first_name": "John", "last_name": "Lennon", "birthday": date(1940, 10, 10), } Person.objects.update_or_create(**params) # If we execute the exact same statement, it won't create a Person. p, created = Person.objects.update_or_create(**params) self.assertFalse(created) def test_integrity(self): """ If you don't specify a value or default value for all required fields, you will get an error. """ with self.assertRaises(IntegrityError): Person.objects.update_or_create(first_name="Tom", last_name="Smith") def test_manual_primary_key_test(self): """ If you specify an existing primary key, but different other fields, then you will get an error and data will not be updated. """ ManualPrimaryKeyTest.objects.create(id=1, data="Original") with self.assertRaises(IntegrityError): ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different") self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original") def test_with_pk_property(self): """ Using the pk property of a model is allowed. """ Thing.objects.update_or_create(pk=1) def test_update_or_create_with_model_property_defaults(self): """Using a property with a setter implemented is allowed.""" t, _ = Thing.objects.update_or_create( defaults={"capitalized_name_property": "annie"}, pk=1 ) self.assertEqual(t.name, "Annie") def test_error_contains_full_traceback(self): """ update_or_create should raise IntegrityErrors with the full traceback. This is tested by checking that a known method call is in the traceback. We cannot use assertRaises/assertRaises here because we need to inspect the actual traceback. Refs #16340. """ try: ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different") except IntegrityError: formatted_traceback = traceback.format_exc() self.assertIn("obj.save", formatted_traceback) def test_create_with_related_manager(self): """ Should be able to use update_or_create from the related manager to create a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") book, created = p.books.update_or_create(name="The Book of Ed & Fred") self.assertTrue(created) self.assertEqual(p.books.count(), 1) def test_update_with_related_manager(self): """ Should be able to use update_or_create from the related manager to update a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") book = Book.objects.create(name="The Book of Ed & Fred", publisher=p) self.assertEqual(p.books.count(), 1) name = "The Book of Django" book, created = p.books.update_or_create(defaults={"name": name}, id=book.id) self.assertFalse(created) self.assertEqual(book.name, name) self.assertEqual(p.books.count(), 1) def test_create_with_many(self): """ Should be able to use update_or_create from the m2m related manager to create a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") author = Author.objects.create(name="Ted") book, created = author.books.update_or_create( name="The Book of Ed & Fred", publisher=p ) self.assertTrue(created) self.assertEqual(author.books.count(), 1) def test_update_with_many(self): """ Should be able to use update_or_create from the m2m related manager to update a book. Refs #23611. """ p = Publisher.objects.create(name="Acme Publishing") author = Author.objects.create(name="Ted") book = Book.objects.create(name="The Book of Ed & Fred", publisher=p) book.authors.add(author) self.assertEqual(author.books.count(), 1) name = "The Book of Django" book, created = author.books.update_or_create( defaults={"name": name}, id=book.id ) self.assertFalse(created) self.assertEqual(book.name, name) self.assertEqual(author.books.count(), 1) def test_defaults_exact(self): """ If you have a field named defaults and want to use it as an exact lookup, you need to use 'defaults__exact'. """ obj, created = Person.objects.update_or_create( first_name="George", last_name="Harrison", defaults__exact="testing", defaults={ "birthday": date(1943, 2, 25), "defaults": "testing", }, ) self.assertTrue(created) self.assertEqual(obj.defaults, "testing") obj, created = Person.objects.update_or_create( first_name="George", last_name="Harrison", defaults__exact="testing", defaults={ "birthday": date(1943, 2, 25), "defaults": "another testing", }, ) self.assertFalse(created) self.assertEqual(obj.defaults, "another testing") def test_create_callable_default(self): obj, created = Person.objects.update_or_create( first_name="George", last_name="Harrison", defaults={"birthday": lambda: date(1943, 2, 25)}, ) self.assertIs(created, True) self.assertEqual(obj.birthday, date(1943, 2, 25)) def test_update_callable_default(self): Person.objects.update_or_create( first_name="George", last_name="Harrison", birthday=date(1942, 2, 25), ) obj, created = Person.objects.update_or_create( first_name="George", defaults={"last_name": lambda: "NotHarrison"}, ) self.assertIs(created, False) self.assertEqual(obj.last_name, "NotHarrison") def test_defaults_not_evaluated_unless_needed(self): """`defaults` aren't evaluated if the instance isn't created.""" Person.objects.create( first_name="John", last_name="Lennon", birthday=date(1940, 10, 9) ) def raise_exception(): raise AssertionError obj, created = Person.objects.get_or_create( first_name="John", defaults=lazy(raise_exception, object)(), ) self.assertFalse(created) def test_mti_update_non_local_concrete_fields(self): journalist = Journalist.objects.create(name="Jane", specialty="Politics") journalist, created = Journalist.objects.update_or_create( pk=journalist.pk, defaults={"name": "John"}, ) self.assertIs(created, False) self.assertEqual(journalist.name, "John") def test_update_only_defaults_and_pre_save_fields_when_local_fields(self): publisher = Publisher.objects.create(name="Acme Publishing") book = Book.objects.create(publisher=publisher, name="The Book of Ed & Fred") for defaults in [{"publisher": publisher}, {"publisher_id": publisher}]: with self.subTest(defaults=defaults): with CaptureQueriesContext(connection) as captured_queries: book, created = Book.objects.update_or_create( pk=book.pk, defaults=defaults, ) self.assertIs(created, False) update_sqls = [ q["sql"] for q in captured_queries if q["sql"].startswith("UPDATE") ] self.assertEqual(len(update_sqls), 1) update_sql = update_sqls[0] self.assertIsNotNone(update_sql) self.assertIn( connection.ops.quote_name("publisher_id_column"), update_sql ) self.assertIn(connection.ops.quote_name("updated"), update_sql) # Name should not be updated. self.assertNotIn(connection.ops.quote_name("name"), update_sql) class UpdateOrCreateTestsWithManualPKs(TestCase): def test_create_with_duplicate_primary_key(self): """ If an existing primary key is specified with different values for other fields, then IntegrityError is raised and data isn't updated. """ ManualPrimaryKeyTest.objects.create(id=1, data="Original") with self.assertRaises(IntegrityError): ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different") self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original") class UpdateOrCreateTransactionTests(TransactionTestCase): available_apps = ["get_or_create"] @skipUnlessDBFeature("has_select_for_update") @skipUnlessDBFeature("supports_transactions") def test_updates_in_transaction(self): """ Objects are selected and updated in a transaction to avoid race conditions. This test forces update_or_create() to hold the lock in another thread for a relatively long time so that it can update while it holds the lock. The updated field isn't a field in 'defaults', so update_or_create() shouldn't have an effect on it. """ lock_status = {"has_grabbed_lock": False} def birthday_sleep(): lock_status["has_grabbed_lock"] = True time.sleep(0.5) return date(1940, 10, 10) def update_birthday_slowly(): Person.objects.update_or_create( first_name="John", defaults={"birthday": birthday_sleep} ) # Avoid leaking connection for Oracle connection.close() def lock_wait(): # timeout after ~0.5 seconds for i in range(20): time.sleep(0.025) if lock_status["has_grabbed_lock"]: return True return False Person.objects.create( first_name="John", last_name="Lennon", birthday=date(1940, 10, 9) ) # update_or_create in a separate thread t = Thread(target=update_birthday_slowly) before_start = datetime.now() t.start() if not lock_wait(): self.skipTest("Database took too long to lock the row") # Update during lock Person.objects.filter(first_name="John").update(last_name="NotLennon") after_update = datetime.now() # Wait for thread to finish t.join() # The update remains and it blocked. updated_person = Person.objects.get(first_name="John") self.assertGreater(after_update - before_start, timedelta(seconds=0.5)) self.assertEqual(updated_person.last_name, "NotLennon") @skipUnlessDBFeature("has_select_for_update") @skipUnlessDBFeature("supports_transactions") def test_creation_in_transaction(self): """ Objects are selected and updated in a transaction to avoid race conditions. This test checks the behavior of update_or_create() when the object doesn't already exist, but another thread creates the object before update_or_create() does and then attempts to update the object, also before update_or_create(). It forces update_or_create() to hold the lock in another thread for a relatively long time so that it can update while it holds the lock. The updated field isn't a field in 'defaults', so update_or_create() shouldn't have an effect on it. """ lock_status = {"lock_count": 0} def birthday_sleep(): lock_status["lock_count"] += 1 time.sleep(0.5) return date(1940, 10, 10) def update_birthday_slowly(): try: Person.objects.update_or_create( first_name="John", defaults={"birthday": birthday_sleep} ) finally: # Avoid leaking connection for Oracle connection.close() def lock_wait(expected_lock_count): # timeout after ~0.5 seconds for i in range(20): time.sleep(0.025) if lock_status["lock_count"] == expected_lock_count: return True self.skipTest("Database took too long to lock the row") # update_or_create in a separate thread. t = Thread(target=update_birthday_slowly) before_start = datetime.now() t.start() lock_wait(1) # Create object *after* initial attempt by update_or_create to get obj # but before creation attempt. Person.objects.create( first_name="John", last_name="Lennon", birthday=date(1940, 10, 9) ) lock_wait(2) # At this point, the thread is pausing for 0.5 seconds, so now attempt # to modify object before update_or_create() calls save(). This should # be blocked until after the save(). Person.objects.filter(first_name="John").update(last_name="NotLennon") after_update = datetime.now() # Wait for thread to finish t.join() # Check call to update_or_create() succeeded and the subsequent # (blocked) call to update(). updated_person = Person.objects.get(first_name="John") self.assertEqual( updated_person.birthday, date(1940, 10, 10) ) # set by update_or_create() self.assertEqual(updated_person.last_name, "NotLennon") # set by update() self.assertGreater(after_update - before_start, timedelta(seconds=1)) class InvalidCreateArgumentsTests(TransactionTestCase): available_apps = ["get_or_create"] msg = "Invalid field name(s) for model Thing: 'nonexistent'." bad_field_msg = ( "Cannot resolve keyword 'nonexistent' into field. Choices are: id, name, tags" ) def test_get_or_create_with_invalid_defaults(self): with self.assertRaisesMessage(FieldError, self.msg): Thing.objects.get_or_create(name="a", defaults={"nonexistent": "b"}) def test_get_or_create_with_invalid_kwargs(self): with self.assertRaisesMessage(FieldError, self.bad_field_msg): Thing.objects.get_or_create(name="a", nonexistent="b") def test_update_or_create_with_invalid_defaults(self): with self.assertRaisesMessage(FieldError, self.msg): Thing.objects.update_or_create(name="a", defaults={"nonexistent": "b"}) def test_update_or_create_with_invalid_kwargs(self): with self.assertRaisesMessage(FieldError, self.bad_field_msg): Thing.objects.update_or_create(name="a", nonexistent="b") def test_multiple_invalid_fields(self): with self.assertRaisesMessage(FieldError, self.bad_field_msg): Thing.objects.update_or_create( name="a", nonexistent="b", defaults={"invalid": "c"} ) def test_property_attribute_without_setter_defaults(self): with self.assertRaisesMessage( FieldError, "Invalid field name(s) for model Thing: 'name_in_all_caps'" ): Thing.objects.update_or_create( name="a", defaults={"name_in_all_caps": "FRANK"} ) def test_property_attribute_without_setter_kwargs(self): msg = ( "Cannot resolve keyword 'name_in_all_caps' into field. Choices are: id, " "name, tags" ) with self.assertRaisesMessage(FieldError, msg): Thing.objects.update_or_create( name_in_all_caps="FRANK", defaults={"name": "Frank"} )
97b51d778dbe4c67b50cadc2960606e5f584f60eea174671a8403154597f4bc6
import gzip import random import re import struct from io import BytesIO from urllib.parse import quote from django.conf import settings from django.core import mail from django.core.exceptions import PermissionDenied from django.http import ( FileResponse, HttpRequest, HttpResponse, HttpResponseNotFound, HttpResponsePermanentRedirect, HttpResponseRedirect, StreamingHttpResponse, ) from django.middleware.clickjacking import XFrameOptionsMiddleware from django.middleware.common import BrokenLinkEmailsMiddleware, CommonMiddleware from django.middleware.gzip import GZipMiddleware from django.middleware.http import ConditionalGetMiddleware from django.test import RequestFactory, SimpleTestCase, override_settings int2byte = struct.Struct(">B").pack def get_response_empty(request): return HttpResponse() def get_response_404(request): return HttpResponseNotFound() @override_settings(ROOT_URLCONF="middleware.urls") class CommonMiddlewareTest(SimpleTestCase): rf = RequestFactory() @override_settings(APPEND_SLASH=True) def test_append_slash_have_slash(self): """ URLs with slashes should go unmolested. """ request = self.rf.get("/slash/") self.assertIsNone(CommonMiddleware(get_response_404).process_request(request)) self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_slashless_resource(self): """ Matches to explicit slashless URLs should go unmolested. """ def get_response(req): return HttpResponse("Here's the text of the web page.") request = self.rf.get("/noslash") self.assertIsNone(CommonMiddleware(get_response).process_request(request)) self.assertEqual( CommonMiddleware(get_response)(request).content, b"Here's the text of the web page.", ) @override_settings(APPEND_SLASH=True) def test_append_slash_slashless_unknown(self): """ APPEND_SLASH should not redirect to unknown resources. """ request = self.rf.get("/unknown") response = CommonMiddleware(get_response_404)(request) self.assertEqual(response.status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_redirect(self): """ APPEND_SLASH should redirect slashless URLs to a valid pattern. """ request = self.rf.get("/slash") r = CommonMiddleware(get_response_empty).process_request(request) self.assertIsNone(r) response = HttpResponseNotFound() r = CommonMiddleware(get_response_empty).process_response(request, response) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "/slash/") @override_settings(APPEND_SLASH=True) def test_append_slash_redirect_querystring(self): """ APPEND_SLASH should preserve querystrings when redirecting. """ request = self.rf.get("/slash?test=1") resp = CommonMiddleware(get_response_404)(request) self.assertEqual(resp.url, "/slash/?test=1") @override_settings(APPEND_SLASH=True) def test_append_slash_redirect_querystring_have_slash(self): """ APPEND_SLASH should append slash to path when redirecting a request with a querystring ending with slash. """ request = self.rf.get("/slash?test=slash/") resp = CommonMiddleware(get_response_404)(request) self.assertIsInstance(resp, HttpResponsePermanentRedirect) self.assertEqual(resp.url, "/slash/?test=slash/") @override_settings(APPEND_SLASH=True, DEBUG=True) def test_append_slash_no_redirect_on_POST_in_DEBUG(self): """ While in debug mode, an exception is raised with a warning when a failed attempt is made to POST, PUT, or PATCH to an URL which would normally be redirected to a slashed version. """ msg = "maintaining %s data. Change your form to point to testserver/slash/" request = self.rf.get("/slash") request.method = "POST" with self.assertRaisesMessage(RuntimeError, msg % request.method): CommonMiddleware(get_response_404)(request) request = self.rf.get("/slash") request.method = "PUT" with self.assertRaisesMessage(RuntimeError, msg % request.method): CommonMiddleware(get_response_404)(request) request = self.rf.get("/slash") request.method = "PATCH" with self.assertRaisesMessage(RuntimeError, msg % request.method): CommonMiddleware(get_response_404)(request) @override_settings(APPEND_SLASH=False) def test_append_slash_disabled(self): """ Disabling append slash functionality should leave slashless URLs alone. """ request = self.rf.get("/slash") self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_opt_out(self): """ Views marked with @no_append_slash should be left alone. """ request = self.rf.get("/sensitive_fbv") self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) request = self.rf.get("/sensitive_cbv") self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_quoted(self): """ URLs which require quoting should be redirected to their slash version. """ request = self.rf.get(quote("/needsquoting#")) r = CommonMiddleware(get_response_404)(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "/needsquoting%23/") @override_settings(APPEND_SLASH=True) def test_append_slash_leading_slashes(self): """ Paths starting with two slashes are escaped to prevent open redirects. If there's a URL pattern that allows paths to start with two slashes, a request with path //evil.com must not redirect to //evil.com/ (appended slash) which is a schemaless absolute URL. The browser would navigate to evil.com/. """ # Use 4 slashes because of RequestFactory behavior. request = self.rf.get("////evil.com/security") r = CommonMiddleware(get_response_404).process_request(request) self.assertIsNone(r) response = HttpResponseNotFound() r = CommonMiddleware(get_response_404).process_response(request, response) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "/%2Fevil.com/security/") r = CommonMiddleware(get_response_404)(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "/%2Fevil.com/security/") @override_settings(APPEND_SLASH=False, PREPEND_WWW=True) def test_prepend_www(self): request = self.rf.get("/path/") r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "http://www.testserver/path/") @override_settings(APPEND_SLASH=True, PREPEND_WWW=True) def test_prepend_www_append_slash_have_slash(self): request = self.rf.get("/slash/") r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "http://www.testserver/slash/") @override_settings(APPEND_SLASH=True, PREPEND_WWW=True) def test_prepend_www_append_slash_slashless(self): request = self.rf.get("/slash") r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "http://www.testserver/slash/") # The following tests examine expected behavior given a custom URLconf that # overrides the default one through the request object. @override_settings(APPEND_SLASH=True) def test_append_slash_have_slash_custom_urlconf(self): """ URLs with slashes should go unmolested. """ request = self.rf.get("/customurlconf/slash/") request.urlconf = "middleware.extra_urls" self.assertIsNone(CommonMiddleware(get_response_404).process_request(request)) self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_slashless_resource_custom_urlconf(self): """ Matches to explicit slashless URLs should go unmolested. """ def get_response(req): return HttpResponse("web content") request = self.rf.get("/customurlconf/noslash") request.urlconf = "middleware.extra_urls" self.assertIsNone(CommonMiddleware(get_response).process_request(request)) self.assertEqual( CommonMiddleware(get_response)(request).content, b"web content" ) @override_settings(APPEND_SLASH=True) def test_append_slash_slashless_unknown_custom_urlconf(self): """ APPEND_SLASH should not redirect to unknown resources. """ request = self.rf.get("/customurlconf/unknown") request.urlconf = "middleware.extra_urls" self.assertIsNone(CommonMiddleware(get_response_404).process_request(request)) self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_redirect_custom_urlconf(self): """ APPEND_SLASH should redirect slashless URLs to a valid pattern. """ request = self.rf.get("/customurlconf/slash") request.urlconf = "middleware.extra_urls" r = CommonMiddleware(get_response_404)(request) self.assertIsNotNone( r, "CommonMiddleware failed to return APPEND_SLASH redirect using " "request.urlconf", ) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "/customurlconf/slash/") @override_settings(APPEND_SLASH=True, DEBUG=True) def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self): """ While in debug mode, an exception is raised with a warning when a failed attempt is made to POST to an URL which would normally be redirected to a slashed version. """ request = self.rf.get("/customurlconf/slash") request.urlconf = "middleware.extra_urls" request.method = "POST" with self.assertRaisesMessage(RuntimeError, "end in a slash"): CommonMiddleware(get_response_404)(request) @override_settings(APPEND_SLASH=False) def test_append_slash_disabled_custom_urlconf(self): """ Disabling append slash functionality should leave slashless URLs alone. """ request = self.rf.get("/customurlconf/slash") request.urlconf = "middleware.extra_urls" self.assertIsNone(CommonMiddleware(get_response_404).process_request(request)) self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404) @override_settings(APPEND_SLASH=True) def test_append_slash_quoted_custom_urlconf(self): """ URLs which require quoting should be redirected to their slash version. """ request = self.rf.get(quote("/customurlconf/needsquoting#")) request.urlconf = "middleware.extra_urls" r = CommonMiddleware(get_response_404)(request) self.assertIsNotNone( r, "CommonMiddleware failed to return APPEND_SLASH redirect using " "request.urlconf", ) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "/customurlconf/needsquoting%23/") @override_settings(APPEND_SLASH=False, PREPEND_WWW=True) def test_prepend_www_custom_urlconf(self): request = self.rf.get("/customurlconf/path/") request.urlconf = "middleware.extra_urls" r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "http://www.testserver/customurlconf/path/") @override_settings(APPEND_SLASH=True, PREPEND_WWW=True) def test_prepend_www_append_slash_have_slash_custom_urlconf(self): request = self.rf.get("/customurlconf/slash/") request.urlconf = "middleware.extra_urls" r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "http://www.testserver/customurlconf/slash/") @override_settings(APPEND_SLASH=True, PREPEND_WWW=True) def test_prepend_www_append_slash_slashless_custom_urlconf(self): request = self.rf.get("/customurlconf/slash") request.urlconf = "middleware.extra_urls" r = CommonMiddleware(get_response_empty).process_request(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "http://www.testserver/customurlconf/slash/") # Tests for the Content-Length header def test_content_length_header_added(self): def get_response(req): response = HttpResponse("content") self.assertNotIn("Content-Length", response) return response response = CommonMiddleware(get_response)(self.rf.get("/")) self.assertEqual(int(response.headers["Content-Length"]), len(response.content)) def test_content_length_header_not_added_for_streaming_response(self): def get_response(req): response = StreamingHttpResponse("content") self.assertNotIn("Content-Length", response) return response response = CommonMiddleware(get_response)(self.rf.get("/")) self.assertNotIn("Content-Length", response) def test_content_length_header_not_changed(self): bad_content_length = 500 def get_response(req): response = HttpResponse() response.headers["Content-Length"] = bad_content_length return response response = CommonMiddleware(get_response)(self.rf.get("/")) self.assertEqual(int(response.headers["Content-Length"]), bad_content_length) # Other tests @override_settings(DISALLOWED_USER_AGENTS=[re.compile(r"foo")]) def test_disallowed_user_agents(self): request = self.rf.get("/slash") request.META["HTTP_USER_AGENT"] = "foo" with self.assertRaisesMessage(PermissionDenied, "Forbidden user agent"): CommonMiddleware(get_response_empty).process_request(request) def test_non_ascii_query_string_does_not_crash(self): """Regression test for #15152""" request = self.rf.get("/slash") request.META["QUERY_STRING"] = "drink=café" r = CommonMiddleware(get_response_empty).process_request(request) self.assertIsNone(r) response = HttpResponseNotFound() r = CommonMiddleware(get_response_empty).process_response(request, response) self.assertEqual(r.status_code, 301) def test_response_redirect_class(self): request = self.rf.get("/slash") r = CommonMiddleware(get_response_404)(request) self.assertEqual(r.status_code, 301) self.assertEqual(r.url, "/slash/") self.assertIsInstance(r, HttpResponsePermanentRedirect) def test_response_redirect_class_subclass(self): class MyCommonMiddleware(CommonMiddleware): response_redirect_class = HttpResponseRedirect request = self.rf.get("/slash") r = MyCommonMiddleware(get_response_404)(request) self.assertEqual(r.status_code, 302) self.assertEqual(r.url, "/slash/") self.assertIsInstance(r, HttpResponseRedirect) @override_settings( IGNORABLE_404_URLS=[re.compile(r"foo")], MANAGERS=[("PHD", "[email protected]")], ) class BrokenLinkEmailsMiddlewareTest(SimpleTestCase): rf = RequestFactory() def setUp(self): self.req = self.rf.get("/regular_url/that/does/not/exist") def get_response(self, req): return self.client.get(req.path) def test_404_error_reporting(self): self.req.META["HTTP_REFERER"] = "/another/url/" BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 1) self.assertIn("Broken", mail.outbox[0].subject) def test_404_error_reporting_no_referer(self): BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) def test_404_error_reporting_ignored_url(self): self.req.path = self.req.path_info = "foo_url/that/does/not/exist" BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) def test_custom_request_checker(self): class SubclassedMiddleware(BrokenLinkEmailsMiddleware): ignored_user_agent_patterns = ( re.compile(r"Spider.*"), re.compile(r"Robot.*"), ) def is_ignorable_request(self, request, uri, domain, referer): """Check user-agent in addition to normal checks.""" if super().is_ignorable_request(request, uri, domain, referer): return True user_agent = request.META["HTTP_USER_AGENT"] return any( pattern.search(user_agent) for pattern in self.ignored_user_agent_patterns ) self.req.META["HTTP_REFERER"] = "/another/url/" self.req.META["HTTP_USER_AGENT"] = "Spider machine 3.4" SubclassedMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) self.req.META["HTTP_USER_AGENT"] = "My user agent" SubclassedMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 1) def test_referer_equal_to_requested_url(self): """ Some bots set the referer to the current URL to avoid being blocked by an referer check (#25302). """ self.req.META["HTTP_REFERER"] = self.req.path BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) # URL with scheme and domain should also be ignored self.req.META["HTTP_REFERER"] = "http://testserver%s" % self.req.path BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) # URL with a different scheme should be ignored as well because bots # tend to use http:// in referers even when browsing HTTPS websites. self.req.META["HTTP_X_PROTO"] = "https" self.req.META["SERVER_PORT"] = 443 with self.settings(SECURE_PROXY_SSL_HEADER=("HTTP_X_PROTO", "https")): BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) def test_referer_equal_to_requested_url_on_another_domain(self): self.req.META["HTTP_REFERER"] = "http://anotherserver%s" % self.req.path BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 1) @override_settings(APPEND_SLASH=True) def test_referer_equal_to_requested_url_without_trailing_slash_with_append_slash( self, ): self.req.path = self.req.path_info = "/regular_url/that/does/not/exist/" self.req.META["HTTP_REFERER"] = self.req.path_info[:-1] BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 0) @override_settings(APPEND_SLASH=False) def test_referer_equal_to_requested_url_without_trailing_slash_with_no_append_slash( self, ): self.req.path = self.req.path_info = "/regular_url/that/does/not/exist/" self.req.META["HTTP_REFERER"] = self.req.path_info[:-1] BrokenLinkEmailsMiddleware(self.get_response)(self.req) self.assertEqual(len(mail.outbox), 1) @override_settings(ROOT_URLCONF="middleware.cond_get_urls") class ConditionalGetMiddlewareTest(SimpleTestCase): request_factory = RequestFactory() def setUp(self): self.req = self.request_factory.get("/") self.resp_headers = {} def get_response(self, req): resp = self.client.get(req.path_info) for key, value in self.resp_headers.items(): resp[key] = value return resp # Tests for the ETag header def test_middleware_calculates_etag(self): resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) self.assertNotEqual("", resp["ETag"]) def test_middleware_wont_overwrite_etag(self): self.resp_headers["ETag"] = "eggs" resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) self.assertEqual("eggs", resp["ETag"]) def test_no_etag_streaming_response(self): def get_response(req): return StreamingHttpResponse(["content"]) self.assertFalse( ConditionalGetMiddleware(get_response)(self.req).has_header("ETag") ) def test_no_etag_response_empty_content(self): def get_response(req): return HttpResponse() self.assertFalse( ConditionalGetMiddleware(get_response)(self.req).has_header("ETag") ) def test_no_etag_no_store_cache(self): self.resp_headers["Cache-Control"] = "No-Cache, No-Store, Max-age=0" self.assertFalse( ConditionalGetMiddleware(self.get_response)(self.req).has_header("ETag") ) def test_etag_extended_cache_control(self): self.resp_headers["Cache-Control"] = 'my-directive="my-no-store"' self.assertTrue( ConditionalGetMiddleware(self.get_response)(self.req).has_header("ETag") ) def test_if_none_match_and_no_etag(self): self.req.META["HTTP_IF_NONE_MATCH"] = "spam" resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) def test_no_if_none_match_and_etag(self): self.resp_headers["ETag"] = "eggs" resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) def test_if_none_match_and_same_etag(self): self.req.META["HTTP_IF_NONE_MATCH"] = '"spam"' self.resp_headers["ETag"] = '"spam"' resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 304) def test_if_none_match_and_different_etag(self): self.req.META["HTTP_IF_NONE_MATCH"] = "spam" self.resp_headers["ETag"] = "eggs" resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) def test_if_none_match_and_redirect(self): def get_response(req): resp = self.client.get(req.path_info) resp["ETag"] = "spam" resp["Location"] = "/" resp.status_code = 301 return resp self.req.META["HTTP_IF_NONE_MATCH"] = "spam" resp = ConditionalGetMiddleware(get_response)(self.req) self.assertEqual(resp.status_code, 301) def test_if_none_match_and_client_error(self): def get_response(req): resp = self.client.get(req.path_info) resp["ETag"] = "spam" resp.status_code = 400 return resp self.req.META["HTTP_IF_NONE_MATCH"] = "spam" resp = ConditionalGetMiddleware(get_response)(self.req) self.assertEqual(resp.status_code, 400) # Tests for the Last-Modified header def test_if_modified_since_and_no_last_modified(self): self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT" resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) def test_no_if_modified_since_and_last_modified(self): self.resp_headers["Last-Modified"] = "Sat, 12 Feb 2011 17:38:44 GMT" resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 200) def test_if_modified_since_and_same_last_modified(self): self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT" self.resp_headers["Last-Modified"] = "Sat, 12 Feb 2011 17:38:44 GMT" self.resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(self.resp.status_code, 304) def test_if_modified_since_and_last_modified_in_the_past(self): self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT" self.resp_headers["Last-Modified"] = "Sat, 12 Feb 2011 17:35:44 GMT" resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(resp.status_code, 304) def test_if_modified_since_and_last_modified_in_the_future(self): self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT" self.resp_headers["Last-Modified"] = "Sat, 12 Feb 2011 17:41:44 GMT" self.resp = ConditionalGetMiddleware(self.get_response)(self.req) self.assertEqual(self.resp.status_code, 200) def test_if_modified_since_and_redirect(self): def get_response(req): resp = self.client.get(req.path_info) resp["Last-Modified"] = "Sat, 12 Feb 2011 17:35:44 GMT" resp["Location"] = "/" resp.status_code = 301 return resp self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT" resp = ConditionalGetMiddleware(get_response)(self.req) self.assertEqual(resp.status_code, 301) def test_if_modified_since_and_client_error(self): def get_response(req): resp = self.client.get(req.path_info) resp["Last-Modified"] = "Sat, 12 Feb 2011 17:35:44 GMT" resp.status_code = 400 return resp self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT" resp = ConditionalGetMiddleware(get_response)(self.req) self.assertEqual(resp.status_code, 400) def test_not_modified_headers(self): """ The 304 Not Modified response should include only the headers required by section 4.1 of RFC 7232, Last-Modified, and the cookies. """ def get_response(req): resp = self.client.get(req.path_info) resp["Date"] = "Sat, 12 Feb 2011 17:35:44 GMT" resp["Last-Modified"] = "Sat, 12 Feb 2011 17:35:44 GMT" resp["Expires"] = "Sun, 13 Feb 2011 17:35:44 GMT" resp["Vary"] = "Cookie" resp["Cache-Control"] = "public" resp["Content-Location"] = "/alt" resp["Content-Language"] = "en" # shouldn't be preserved resp["ETag"] = '"spam"' resp.set_cookie("key", "value") return resp self.req.META["HTTP_IF_NONE_MATCH"] = '"spam"' new_response = ConditionalGetMiddleware(get_response)(self.req) self.assertEqual(new_response.status_code, 304) base_response = get_response(self.req) for header in ( "Cache-Control", "Content-Location", "Date", "ETag", "Expires", "Last-Modified", "Vary", ): self.assertEqual( new_response.headers[header], base_response.headers[header] ) self.assertEqual(new_response.cookies, base_response.cookies) self.assertNotIn("Content-Language", new_response) def test_no_unsafe(self): """ ConditionalGetMiddleware shouldn't return a conditional response on an unsafe request. A response has already been generated by the time ConditionalGetMiddleware is called, so it's too late to return a 412 Precondition Failed. """ def get_200_response(req): return HttpResponse(status=200) response = ConditionalGetMiddleware(self.get_response)(self.req) etag = response.headers["ETag"] put_request = self.request_factory.put("/", HTTP_IF_MATCH=etag) conditional_get_response = ConditionalGetMiddleware(get_200_response)( put_request ) self.assertEqual( conditional_get_response.status_code, 200 ) # should never be a 412 def test_no_head(self): """ ConditionalGetMiddleware shouldn't compute and return an ETag on a HEAD request since it can't do so accurately without access to the response body of the corresponding GET. """ def get_200_response(req): return HttpResponse(status=200) request = self.request_factory.head("/") conditional_get_response = ConditionalGetMiddleware(get_200_response)(request) self.assertNotIn("ETag", conditional_get_response) class XFrameOptionsMiddlewareTest(SimpleTestCase): """ Tests for the X-Frame-Options clickjacking prevention middleware. """ def test_same_origin(self): """ The X_FRAME_OPTIONS setting can be set to SAMEORIGIN to have the middleware use that value for the HTTP header. """ with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"): r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN") with override_settings(X_FRAME_OPTIONS="sameorigin"): r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN") def test_deny(self): """ The X_FRAME_OPTIONS setting can be set to DENY to have the middleware use that value for the HTTP header. """ with override_settings(X_FRAME_OPTIONS="DENY"): r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers["X-Frame-Options"], "DENY") with override_settings(X_FRAME_OPTIONS="deny"): r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers["X-Frame-Options"], "DENY") def test_defaults_sameorigin(self): """ If the X_FRAME_OPTIONS setting is not set then it defaults to DENY. """ with override_settings(X_FRAME_OPTIONS=None): del settings.X_FRAME_OPTIONS # restored by override_settings r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers["X-Frame-Options"], "DENY") def test_dont_set_if_set(self): """ If the X-Frame-Options header is already set then the middleware does not attempt to override it. """ def same_origin_response(request): response = HttpResponse() response.headers["X-Frame-Options"] = "SAMEORIGIN" return response def deny_response(request): response = HttpResponse() response.headers["X-Frame-Options"] = "DENY" return response with override_settings(X_FRAME_OPTIONS="DENY"): r = XFrameOptionsMiddleware(same_origin_response)(HttpRequest()) self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN") with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"): r = XFrameOptionsMiddleware(deny_response)(HttpRequest()) self.assertEqual(r.headers["X-Frame-Options"], "DENY") def test_response_exempt(self): """ If the response has an xframe_options_exempt attribute set to False then it still sets the header, but if it's set to True then it doesn't. """ def xframe_exempt_response(request): response = HttpResponse() response.xframe_options_exempt = True return response def xframe_not_exempt_response(request): response = HttpResponse() response.xframe_options_exempt = False return response with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"): r = XFrameOptionsMiddleware(xframe_not_exempt_response)(HttpRequest()) self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN") r = XFrameOptionsMiddleware(xframe_exempt_response)(HttpRequest()) self.assertIsNone(r.headers.get("X-Frame-Options")) def test_is_extendable(self): """ The XFrameOptionsMiddleware method that determines the X-Frame-Options header value can be overridden based on something in the request or response. """ class OtherXFrameOptionsMiddleware(XFrameOptionsMiddleware): # This is just an example for testing purposes... def get_xframe_options_value(self, request, response): if getattr(request, "sameorigin", False): return "SAMEORIGIN" if getattr(response, "sameorigin", False): return "SAMEORIGIN" return "DENY" def same_origin_response(request): response = HttpResponse() response.sameorigin = True return response with override_settings(X_FRAME_OPTIONS="DENY"): r = OtherXFrameOptionsMiddleware(same_origin_response)(HttpRequest()) self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN") request = HttpRequest() request.sameorigin = True r = OtherXFrameOptionsMiddleware(get_response_empty)(request) self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN") with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"): r = OtherXFrameOptionsMiddleware(get_response_empty)(HttpRequest()) self.assertEqual(r.headers["X-Frame-Options"], "DENY") class GZipMiddlewareTest(SimpleTestCase): """ Tests the GZipMiddleware. """ short_string = b"This string is too short to be worth compressing." compressible_string = b"a" * 500 incompressible_string = b"".join( int2byte(random.randint(0, 255)) for _ in range(500) ) sequence = [b"a" * 500, b"b" * 200, b"a" * 300] sequence_unicode = ["a" * 500, "é" * 200, "a" * 300] request_factory = RequestFactory() def setUp(self): self.req = self.request_factory.get("/") self.req.META["HTTP_ACCEPT_ENCODING"] = "gzip, deflate" self.req.META[ "HTTP_USER_AGENT" ] = "Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1" self.resp = HttpResponse() self.resp.status_code = 200 self.resp.content = self.compressible_string self.resp["Content-Type"] = "text/html; charset=UTF-8" def get_response(self, request): return self.resp @staticmethod def decompress(gzipped_string): with gzip.GzipFile(mode="rb", fileobj=BytesIO(gzipped_string)) as f: return f.read() @staticmethod def get_mtime(gzipped_string): with gzip.GzipFile(mode="rb", fileobj=BytesIO(gzipped_string)) as f: f.read() # must read the data before accessing the header return f.mtime def test_compress_response(self): """ Compression is performed on responses with compressible content. """ r = GZipMiddleware(self.get_response)(self.req) self.assertEqual(self.decompress(r.content), self.compressible_string) self.assertEqual(r.get("Content-Encoding"), "gzip") self.assertEqual(r.get("Content-Length"), str(len(r.content))) def test_compress_streaming_response(self): """ Compression is performed on responses with streaming content. """ def get_stream_response(request): resp = StreamingHttpResponse(self.sequence) resp["Content-Type"] = "text/html; charset=UTF-8" return resp r = GZipMiddleware(get_stream_response)(self.req) self.assertEqual(self.decompress(b"".join(r)), b"".join(self.sequence)) self.assertEqual(r.get("Content-Encoding"), "gzip") self.assertFalse(r.has_header("Content-Length")) def test_compress_streaming_response_unicode(self): """ Compression is performed on responses with streaming Unicode content. """ def get_stream_response_unicode(request): resp = StreamingHttpResponse(self.sequence_unicode) resp["Content-Type"] = "text/html; charset=UTF-8" return resp r = GZipMiddleware(get_stream_response_unicode)(self.req) self.assertEqual( self.decompress(b"".join(r)), b"".join(x.encode() for x in self.sequence_unicode), ) self.assertEqual(r.get("Content-Encoding"), "gzip") self.assertFalse(r.has_header("Content-Length")) def test_compress_file_response(self): """ Compression is performed on FileResponse. """ with open(__file__, "rb") as file1: def get_response(req): file_resp = FileResponse(file1) file_resp["Content-Type"] = "text/html; charset=UTF-8" return file_resp r = GZipMiddleware(get_response)(self.req) with open(__file__, "rb") as file2: self.assertEqual(self.decompress(b"".join(r)), file2.read()) self.assertEqual(r.get("Content-Encoding"), "gzip") self.assertIsNot(r.file_to_stream, file1) def test_compress_non_200_response(self): """ Compression is performed on responses with a status other than 200 (#10762). """ self.resp.status_code = 404 r = GZipMiddleware(self.get_response)(self.req) self.assertEqual(self.decompress(r.content), self.compressible_string) self.assertEqual(r.get("Content-Encoding"), "gzip") def test_no_compress_short_response(self): """ Compression isn't performed on responses with short content. """ self.resp.content = self.short_string r = GZipMiddleware(self.get_response)(self.req) self.assertEqual(r.content, self.short_string) self.assertIsNone(r.get("Content-Encoding")) def test_no_compress_compressed_response(self): """ Compression isn't performed on responses that are already compressed. """ self.resp["Content-Encoding"] = "deflate" r = GZipMiddleware(self.get_response)(self.req) self.assertEqual(r.content, self.compressible_string) self.assertEqual(r.get("Content-Encoding"), "deflate") def test_no_compress_incompressible_response(self): """ Compression isn't performed on responses with incompressible content. """ self.resp.content = self.incompressible_string r = GZipMiddleware(self.get_response)(self.req) self.assertEqual(r.content, self.incompressible_string) self.assertIsNone(r.get("Content-Encoding")) def test_compress_deterministic(self): """ Compression results are the same for the same content and don't include a modification time (since that would make the results of compression non-deterministic and prevent ConditionalGetMiddleware from recognizing conditional matches on gzipped content). """ r1 = GZipMiddleware(self.get_response)(self.req) r2 = GZipMiddleware(self.get_response)(self.req) self.assertEqual(r1.content, r2.content) self.assertEqual(self.get_mtime(r1.content), 0) self.assertEqual(self.get_mtime(r2.content), 0) class ETagGZipMiddlewareTest(SimpleTestCase): """ ETags are handled properly by GZipMiddleware. """ rf = RequestFactory() compressible_string = b"a" * 500 def test_strong_etag_modified(self): """ GZipMiddleware makes a strong ETag weak. """ def get_response(req): response = HttpResponse(self.compressible_string) response.headers["ETag"] = '"eggs"' return response request = self.rf.get("/", HTTP_ACCEPT_ENCODING="gzip, deflate") gzip_response = GZipMiddleware(get_response)(request) self.assertEqual(gzip_response.headers["ETag"], 'W/"eggs"') def test_weak_etag_not_modified(self): """ GZipMiddleware doesn't modify a weak ETag. """ def get_response(req): response = HttpResponse(self.compressible_string) response.headers["ETag"] = 'W/"eggs"' return response request = self.rf.get("/", HTTP_ACCEPT_ENCODING="gzip, deflate") gzip_response = GZipMiddleware(get_response)(request) self.assertEqual(gzip_response.headers["ETag"], 'W/"eggs"') def test_etag_match(self): """ GZipMiddleware allows 304 Not Modified responses. """ def get_response(req): return HttpResponse(self.compressible_string) def get_cond_response(req): return ConditionalGetMiddleware(get_response)(req) request = self.rf.get("/", HTTP_ACCEPT_ENCODING="gzip, deflate") response = GZipMiddleware(get_cond_response)(request) gzip_etag = response.headers["ETag"] next_request = self.rf.get( "/", HTTP_ACCEPT_ENCODING="gzip, deflate", HTTP_IF_NONE_MATCH=gzip_etag ) next_response = ConditionalGetMiddleware(get_response)(next_request) self.assertEqual(next_response.status_code, 304)
3e7da5457bfdbf3f9bab7d4f546da8e09b8f93b50490cab4e75ad7923f732231
import unittest from django.core.exceptions import FieldError from django.db import IntegrityError, connection, transaction from django.db.models import Case, CharField, Count, F, IntegerField, Max, When from django.db.models.functions import Abs, Concat, Lower from django.test import TestCase from django.test.utils import register_lookup from .models import ( A, B, Bar, D, DataPoint, Foo, RelatedPoint, UniqueNumber, UniqueNumberChild, ) class SimpleTest(TestCase): @classmethod def setUpTestData(cls): cls.a1 = A.objects.create() cls.a2 = A.objects.create() for x in range(20): B.objects.create(a=cls.a1) D.objects.create(a=cls.a1) def test_nonempty_update(self): """ Update changes the right number of rows for a nonempty queryset """ num_updated = self.a1.b_set.update(y=100) self.assertEqual(num_updated, 20) cnt = B.objects.filter(y=100).count() self.assertEqual(cnt, 20) def test_empty_update(self): """ Update changes the right number of rows for an empty queryset """ num_updated = self.a2.b_set.update(y=100) self.assertEqual(num_updated, 0) cnt = B.objects.filter(y=100).count() self.assertEqual(cnt, 0) def test_nonempty_update_with_inheritance(self): """ Update changes the right number of rows for an empty queryset when the update affects only a base table """ num_updated = self.a1.d_set.update(y=100) self.assertEqual(num_updated, 20) cnt = D.objects.filter(y=100).count() self.assertEqual(cnt, 20) def test_empty_update_with_inheritance(self): """ Update changes the right number of rows for an empty queryset when the update affects only a base table """ num_updated = self.a2.d_set.update(y=100) self.assertEqual(num_updated, 0) cnt = D.objects.filter(y=100).count() self.assertEqual(cnt, 0) def test_foreign_key_update_with_id(self): """ Update works using <field>_id for foreign keys """ num_updated = self.a1.d_set.update(a_id=self.a2) self.assertEqual(num_updated, 20) self.assertEqual(self.a2.d_set.count(), 20) class AdvancedTests(TestCase): @classmethod def setUpTestData(cls): cls.d0 = DataPoint.objects.create(name="d0", value="apple") cls.d2 = DataPoint.objects.create(name="d2", value="banana") cls.d3 = DataPoint.objects.create(name="d3", value="banana", is_active=False) cls.r1 = RelatedPoint.objects.create(name="r1", data=cls.d3) def test_update(self): """ Objects are updated by first filtering the candidates into a queryset and then calling the update() method. It executes immediately and returns nothing. """ resp = DataPoint.objects.filter(value="apple").update(name="d1") self.assertEqual(resp, 1) resp = DataPoint.objects.filter(value="apple") self.assertEqual(list(resp), [self.d0]) def test_update_multiple_objects(self): """ We can update multiple objects at once. """ resp = DataPoint.objects.filter(value="banana").update(value="pineapple") self.assertEqual(resp, 2) self.assertEqual(DataPoint.objects.get(name="d2").value, "pineapple") def test_update_fk(self): """ Foreign key fields can also be updated, although you can only update the object referred to, not anything inside the related object. """ resp = RelatedPoint.objects.filter(name="r1").update(data=self.d0) self.assertEqual(resp, 1) resp = RelatedPoint.objects.filter(data__name="d0") self.assertEqual(list(resp), [self.r1]) def test_update_multiple_fields(self): """ Multiple fields can be updated at once """ resp = DataPoint.objects.filter(value="apple").update( value="fruit", another_value="peach" ) self.assertEqual(resp, 1) d = DataPoint.objects.get(name="d0") self.assertEqual(d.value, "fruit") self.assertEqual(d.another_value, "peach") def test_update_all(self): """ In the rare case you want to update every instance of a model, update() is also a manager method. """ self.assertEqual(DataPoint.objects.update(value="thing"), 3) resp = DataPoint.objects.values("value").distinct() self.assertEqual(list(resp), [{"value": "thing"}]) def test_update_slice_fail(self): """ We do not support update on already sliced query sets. """ method = DataPoint.objects.all()[:2].update msg = "Cannot update a query once a slice has been taken." with self.assertRaisesMessage(TypeError, msg): method(another_value="another thing") def test_update_respects_to_field(self): """ Update of an FK field which specifies a to_field works. """ a_foo = Foo.objects.create(target="aaa") b_foo = Foo.objects.create(target="bbb") bar = Bar.objects.create(foo=a_foo) self.assertEqual(bar.foo_id, a_foo.target) bar_qs = Bar.objects.filter(pk=bar.pk) self.assertEqual(bar_qs[0].foo_id, a_foo.target) bar_qs.update(foo=b_foo) self.assertEqual(bar_qs[0].foo_id, b_foo.target) def test_update_m2m_field(self): msg = ( "Cannot update model field " "<django.db.models.fields.related.ManyToManyField: m2m_foo> " "(only non-relations and foreign keys permitted)." ) with self.assertRaisesMessage(FieldError, msg): Bar.objects.update(m2m_foo="whatever") def test_update_transformed_field(self): A.objects.create(x=5) A.objects.create(x=-6) with register_lookup(IntegerField, Abs): A.objects.update(x=F("x__abs")) self.assertCountEqual(A.objects.values_list("x", flat=True), [5, 6]) def test_update_annotated_queryset(self): """ Update of a queryset that's been annotated. """ # Trivial annotated update qs = DataPoint.objects.annotate(alias=F("value")) self.assertEqual(qs.update(another_value="foo"), 3) # Update where annotation is used for filtering qs = DataPoint.objects.annotate(alias=F("value")).filter(alias="apple") self.assertEqual(qs.update(another_value="foo"), 1) # Update where annotation is used in update parameters qs = DataPoint.objects.annotate(alias=F("value")) self.assertEqual(qs.update(another_value=F("alias")), 3) # Update where aggregation annotation is used in update parameters qs = DataPoint.objects.annotate(max=Max("value")) msg = ( "Aggregate functions are not allowed in this query " "(another_value=Max(Col(update_datapoint, update.DataPoint.value)))." ) with self.assertRaisesMessage(FieldError, msg): qs.update(another_value=F("max")) def test_update_annotated_multi_table_queryset(self): """ Update of a queryset that's been annotated and involves multiple tables. """ # Trivial annotated update qs = DataPoint.objects.annotate(related_count=Count("relatedpoint")) self.assertEqual(qs.update(value="Foo"), 3) # Update where annotation is used for filtering qs = DataPoint.objects.annotate(related_count=Count("relatedpoint")) self.assertEqual(qs.filter(related_count=1).update(value="Foo"), 1) # Update where aggregation annotation is used in update parameters qs = RelatedPoint.objects.annotate(max=Max("data__value")) msg = "Joined field references are not permitted in this query" with self.assertRaisesMessage(FieldError, msg): qs.update(name=F("max")) def test_update_with_joined_field_annotation(self): msg = "Joined field references are not permitted in this query" with register_lookup(CharField, Lower): for annotation in ( F("data__name"), F("data__name__lower"), Lower("data__name"), Concat("data__name", "data__value"), ): with self.subTest(annotation=annotation): with self.assertRaisesMessage(FieldError, msg): RelatedPoint.objects.annotate( new_name=annotation, ).update(name=F("new_name")) def test_update_ordered_by_m2m_aggregation_annotation(self): msg = ( "Cannot update when ordering by an aggregate: " "Count(Col(update_bar_m2m_foo, update.Bar_m2m_foo.foo))" ) with self.assertRaisesMessage(FieldError, msg): Bar.objects.annotate(m2m_count=Count("m2m_foo")).order_by( "m2m_count" ).update(x=2) def test_update_ordered_by_inline_m2m_annotation(self): foo = Foo.objects.create(target="test") Bar.objects.create(foo=foo) Bar.objects.order_by(Abs("m2m_foo")).update(x=2) self.assertEqual(Bar.objects.get().x, 2) def test_update_ordered_by_m2m_annotation(self): foo = Foo.objects.create(target="test") Bar.objects.create(foo=foo) Bar.objects.annotate(abs_id=Abs("m2m_foo")).order_by("abs_id").update(x=3) self.assertEqual(Bar.objects.get().x, 3) def test_update_negated_f(self): DataPoint.objects.update(is_active=~F("is_active")) self.assertCountEqual( DataPoint.objects.values_list("name", "is_active"), [("d0", False), ("d2", False), ("d3", True)], ) DataPoint.objects.update(is_active=~F("is_active")) self.assertCountEqual( DataPoint.objects.values_list("name", "is_active"), [("d0", True), ("d2", True), ("d3", False)], ) def test_update_negated_f_conditional_annotation(self): DataPoint.objects.annotate( is_d2=Case(When(name="d2", then=True), default=False) ).update(is_active=~F("is_d2")) self.assertCountEqual( DataPoint.objects.values_list("name", "is_active"), [("d0", True), ("d2", False), ("d3", True)], ) def test_updating_non_conditional_field(self): msg = "Cannot negate non-conditional expressions." with self.assertRaisesMessage(TypeError, msg): DataPoint.objects.update(is_active=~F("name")) @unittest.skipUnless( connection.vendor == "mysql", "UPDATE...ORDER BY syntax is supported on MySQL/MariaDB", ) class MySQLUpdateOrderByTest(TestCase): """Update field with a unique constraint using an ordered queryset.""" @classmethod def setUpTestData(cls): UniqueNumber.objects.create(number=1) UniqueNumber.objects.create(number=2) def test_order_by_update_on_unique_constraint(self): tests = [ ("-number", "id"), (F("number").desc(), "id"), (F("number") * -1, "id"), ] for ordering in tests: with self.subTest(ordering=ordering), transaction.atomic(): updated = UniqueNumber.objects.order_by(*ordering).update( number=F("number") + 1, ) self.assertEqual(updated, 2) def test_order_by_update_on_unique_constraint_annotation(self): updated = ( UniqueNumber.objects.annotate(number_inverse=F("number").desc()) .order_by("number_inverse") .update(number=F("number") + 1) ) self.assertEqual(updated, 2) def test_order_by_update_on_parent_unique_constraint(self): # Ordering by inherited fields is omitted because joined fields cannot # be used in the ORDER BY clause. UniqueNumberChild.objects.create(number=3) UniqueNumberChild.objects.create(number=4) with self.assertRaises(IntegrityError): UniqueNumberChild.objects.order_by("number").update( number=F("number") + 1, ) def test_order_by_update_on_related_field(self): # Ordering by related fields is omitted because joined fields cannot be # used in the ORDER BY clause. data = DataPoint.objects.create(name="d0", value="apple") related = RelatedPoint.objects.create(name="r0", data=data) with self.assertNumQueries(1) as ctx: updated = RelatedPoint.objects.order_by("data__name").update(name="new") sql = ctx.captured_queries[0]["sql"] self.assertNotIn("ORDER BY", sql) self.assertEqual(updated, 1) related.refresh_from_db() self.assertEqual(related.name, "new")
c5f681f0e04bb91f2b8c4d7d507430c58f2c4eea1486555d443ca758edeb65d7
""" Tests for the update() queryset method that allows in-place, multi-object updates. """ from django.db import models class DataPoint(models.Model): name = models.CharField(max_length=20) value = models.CharField(max_length=20) another_value = models.CharField(max_length=20, blank=True) is_active = models.BooleanField(default=True) class RelatedPoint(models.Model): name = models.CharField(max_length=20) data = models.ForeignKey(DataPoint, models.CASCADE) class A(models.Model): x = models.IntegerField(default=10) class B(models.Model): a = models.ForeignKey(A, models.CASCADE) y = models.IntegerField(default=10) class C(models.Model): y = models.IntegerField(default=10) class D(C): a = models.ForeignKey(A, models.CASCADE) class Foo(models.Model): target = models.CharField(max_length=10, unique=True) class Bar(models.Model): foo = models.ForeignKey(Foo, models.CASCADE, to_field="target") m2m_foo = models.ManyToManyField(Foo, related_name="m2m_foo") x = models.IntegerField(default=0) class UniqueNumber(models.Model): number = models.IntegerField(unique=True) class UniqueNumberChild(UniqueNumber): pass
451dd7aa61453d2762b0b13107c9eb5e63961fbd85f617c93d668b1f6cf917df
""" Tests for django test runner """ import collections.abc import multiprocessing import os import sys import unittest from unittest import mock from admin_scripts.tests import AdminScriptTestCase from django import db from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management import call_command from django.core.management.base import SystemCheckError from django.test import SimpleTestCase, TransactionTestCase, skipUnlessDBFeature from django.test.runner import ( DiscoverRunner, Shuffler, _init_worker, reorder_test_bin, reorder_tests, shuffle_tests, ) from django.test.testcases import connections_support_transactions from django.test.utils import ( captured_stderr, dependency_ordered, get_unique_databases_and_mirrors, iter_test_cases, ) from django.utils.deprecation import RemovedInDjango50Warning from .models import B, Person, Through class MySuite: def __init__(self): self.tests = [] def addTest(self, test): self.tests.append(test) def __iter__(self): yield from self.tests class TestSuiteTests(SimpleTestCase): def build_test_suite(self, test_classes, suite=None, suite_class=None): if suite_class is None: suite_class = unittest.TestSuite if suite is None: suite = suite_class() loader = unittest.defaultTestLoader for test_class in test_classes: tests = loader.loadTestsFromTestCase(test_class) subsuite = suite_class() # Only use addTest() to simplify testing a custom TestSuite. for test in tests: subsuite.addTest(test) suite.addTest(subsuite) return suite def make_test_suite(self, suite=None, suite_class=None): class Tests1(unittest.TestCase): def test1(self): pass def test2(self): pass class Tests2(unittest.TestCase): def test1(self): pass def test2(self): pass return self.build_test_suite( (Tests1, Tests2), suite=suite, suite_class=suite_class, ) def assertTestNames(self, tests, expected): # Each test.id() has a form like the following: # "test_runner.tests.IterTestCasesTests.test_iter_test_cases.<locals>.Tests1.test1". # It suffices to check only the last two parts. names = [".".join(test.id().split(".")[-2:]) for test in tests] self.assertEqual(names, expected) def test_iter_test_cases_basic(self): suite = self.make_test_suite() tests = iter_test_cases(suite) self.assertTestNames( tests, expected=[ "Tests1.test1", "Tests1.test2", "Tests2.test1", "Tests2.test2", ], ) def test_iter_test_cases_string_input(self): msg = ( "Test 'a' must be a test case or test suite not string (was found " "in 'abc')." ) with self.assertRaisesMessage(TypeError, msg): list(iter_test_cases("abc")) def test_iter_test_cases_iterable_of_tests(self): class Tests(unittest.TestCase): def test1(self): pass def test2(self): pass tests = list(unittest.defaultTestLoader.loadTestsFromTestCase(Tests)) actual_tests = iter_test_cases(tests) self.assertTestNames( actual_tests, expected=[ "Tests.test1", "Tests.test2", ], ) def test_iter_test_cases_custom_test_suite_class(self): suite = self.make_test_suite(suite_class=MySuite) tests = iter_test_cases(suite) self.assertTestNames( tests, expected=[ "Tests1.test1", "Tests1.test2", "Tests2.test1", "Tests2.test2", ], ) def test_iter_test_cases_mixed_test_suite_classes(self): suite = self.make_test_suite(suite=MySuite()) child_suite = list(suite)[0] self.assertNotIsInstance(child_suite, MySuite) tests = list(iter_test_cases(suite)) self.assertEqual(len(tests), 4) self.assertNotIsInstance(tests[0], unittest.TestSuite) def make_tests(self): """Return an iterable of tests.""" suite = self.make_test_suite() return list(iter_test_cases(suite)) def test_shuffle_tests(self): tests = self.make_tests() # Choose a seed that shuffles both the classes and methods. shuffler = Shuffler(seed=9) shuffled_tests = shuffle_tests(tests, shuffler) self.assertIsInstance(shuffled_tests, collections.abc.Iterator) self.assertTestNames( shuffled_tests, expected=[ "Tests2.test1", "Tests2.test2", "Tests1.test2", "Tests1.test1", ], ) def test_reorder_test_bin_no_arguments(self): tests = self.make_tests() reordered_tests = reorder_test_bin(tests) self.assertIsInstance(reordered_tests, collections.abc.Iterator) self.assertTestNames( reordered_tests, expected=[ "Tests1.test1", "Tests1.test2", "Tests2.test1", "Tests2.test2", ], ) def test_reorder_test_bin_reverse(self): tests = self.make_tests() reordered_tests = reorder_test_bin(tests, reverse=True) self.assertIsInstance(reordered_tests, collections.abc.Iterator) self.assertTestNames( reordered_tests, expected=[ "Tests2.test2", "Tests2.test1", "Tests1.test2", "Tests1.test1", ], ) def test_reorder_test_bin_random(self): tests = self.make_tests() # Choose a seed that shuffles both the classes and methods. shuffler = Shuffler(seed=9) reordered_tests = reorder_test_bin(tests, shuffler=shuffler) self.assertIsInstance(reordered_tests, collections.abc.Iterator) self.assertTestNames( reordered_tests, expected=[ "Tests2.test1", "Tests2.test2", "Tests1.test2", "Tests1.test1", ], ) def test_reorder_test_bin_random_and_reverse(self): tests = self.make_tests() # Choose a seed that shuffles both the classes and methods. shuffler = Shuffler(seed=9) reordered_tests = reorder_test_bin(tests, shuffler=shuffler, reverse=True) self.assertIsInstance(reordered_tests, collections.abc.Iterator) self.assertTestNames( reordered_tests, expected=[ "Tests1.test1", "Tests1.test2", "Tests2.test2", "Tests2.test1", ], ) def test_reorder_tests_same_type_consecutive(self): """Tests of the same type are made consecutive.""" tests = self.make_tests() # Move the last item to the front. tests.insert(0, tests.pop()) self.assertTestNames( tests, expected=[ "Tests2.test2", "Tests1.test1", "Tests1.test2", "Tests2.test1", ], ) reordered_tests = reorder_tests(tests, classes=[]) self.assertTestNames( reordered_tests, expected=[ "Tests2.test2", "Tests2.test1", "Tests1.test1", "Tests1.test2", ], ) def test_reorder_tests_random(self): tests = self.make_tests() # Choose a seed that shuffles both the classes and methods. shuffler = Shuffler(seed=9) reordered_tests = reorder_tests(tests, classes=[], shuffler=shuffler) self.assertIsInstance(reordered_tests, collections.abc.Iterator) self.assertTestNames( reordered_tests, expected=[ "Tests2.test1", "Tests2.test2", "Tests1.test2", "Tests1.test1", ], ) def test_reorder_tests_random_mixed_classes(self): tests = self.make_tests() # Move the last item to the front. tests.insert(0, tests.pop()) shuffler = Shuffler(seed=9) self.assertTestNames( tests, expected=[ "Tests2.test2", "Tests1.test1", "Tests1.test2", "Tests2.test1", ], ) reordered_tests = reorder_tests(tests, classes=[], shuffler=shuffler) self.assertTestNames( reordered_tests, expected=[ "Tests2.test1", "Tests2.test2", "Tests1.test2", "Tests1.test1", ], ) def test_reorder_tests_reverse_with_duplicates(self): class Tests1(unittest.TestCase): def test1(self): pass class Tests2(unittest.TestCase): def test2(self): pass def test3(self): pass suite = self.build_test_suite((Tests1, Tests2)) subsuite = list(suite)[0] suite.addTest(subsuite) tests = list(iter_test_cases(suite)) self.assertTestNames( tests, expected=[ "Tests1.test1", "Tests2.test2", "Tests2.test3", "Tests1.test1", ], ) reordered_tests = reorder_tests(tests, classes=[]) self.assertTestNames( reordered_tests, expected=[ "Tests1.test1", "Tests2.test2", "Tests2.test3", ], ) reordered_tests = reorder_tests(tests, classes=[], reverse=True) self.assertTestNames( reordered_tests, expected=[ "Tests2.test3", "Tests2.test2", "Tests1.test1", ], ) class DependencyOrderingTests(unittest.TestCase): def test_simple_dependencies(self): raw = [ ("s1", ("s1_db", ["alpha"])), ("s2", ("s2_db", ["bravo"])), ("s3", ("s3_db", ["charlie"])), ] dependencies = { "alpha": ["charlie"], "bravo": ["charlie"], } ordered = dependency_ordered(raw, dependencies=dependencies) ordered_sigs = [sig for sig, value in ordered] self.assertIn("s1", ordered_sigs) self.assertIn("s2", ordered_sigs) self.assertIn("s3", ordered_sigs) self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s1")) self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s2")) def test_chained_dependencies(self): raw = [ ("s1", ("s1_db", ["alpha"])), ("s2", ("s2_db", ["bravo"])), ("s3", ("s3_db", ["charlie"])), ] dependencies = { "alpha": ["bravo"], "bravo": ["charlie"], } ordered = dependency_ordered(raw, dependencies=dependencies) ordered_sigs = [sig for sig, value in ordered] self.assertIn("s1", ordered_sigs) self.assertIn("s2", ordered_sigs) self.assertIn("s3", ordered_sigs) # Explicit dependencies self.assertLess(ordered_sigs.index("s2"), ordered_sigs.index("s1")) self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s2")) # Implied dependencies self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s1")) def test_multiple_dependencies(self): raw = [ ("s1", ("s1_db", ["alpha"])), ("s2", ("s2_db", ["bravo"])), ("s3", ("s3_db", ["charlie"])), ("s4", ("s4_db", ["delta"])), ] dependencies = { "alpha": ["bravo", "delta"], "bravo": ["charlie"], "delta": ["charlie"], } ordered = dependency_ordered(raw, dependencies=dependencies) ordered_sigs = [sig for sig, aliases in ordered] self.assertIn("s1", ordered_sigs) self.assertIn("s2", ordered_sigs) self.assertIn("s3", ordered_sigs) self.assertIn("s4", ordered_sigs) # Explicit dependencies self.assertLess(ordered_sigs.index("s2"), ordered_sigs.index("s1")) self.assertLess(ordered_sigs.index("s4"), ordered_sigs.index("s1")) self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s2")) self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s4")) # Implicit dependencies self.assertLess(ordered_sigs.index("s3"), ordered_sigs.index("s1")) def test_circular_dependencies(self): raw = [ ("s1", ("s1_db", ["alpha"])), ("s2", ("s2_db", ["bravo"])), ] dependencies = { "bravo": ["alpha"], "alpha": ["bravo"], } with self.assertRaises(ImproperlyConfigured): dependency_ordered(raw, dependencies=dependencies) def test_own_alias_dependency(self): raw = [("s1", ("s1_db", ["alpha", "bravo"]))] dependencies = {"alpha": ["bravo"]} with self.assertRaises(ImproperlyConfigured): dependency_ordered(raw, dependencies=dependencies) # reordering aliases shouldn't matter raw = [("s1", ("s1_db", ["bravo", "alpha"]))] with self.assertRaises(ImproperlyConfigured): dependency_ordered(raw, dependencies=dependencies) class MockTestRunner: def __init__(self, *args, **kwargs): if parallel := kwargs.get("parallel"): sys.stderr.write(f"parallel={parallel}") MockTestRunner.run_tests = mock.Mock(return_value=[]) class ManageCommandTests(unittest.TestCase): def test_custom_test_runner(self): call_command("test", "sites", testrunner="test_runner.tests.MockTestRunner") MockTestRunner.run_tests.assert_called_with(("sites",)) def test_bad_test_runner(self): with self.assertRaises(AttributeError): call_command("test", "sites", testrunner="test_runner.NonexistentRunner") def test_time_recorded(self): with captured_stderr() as stderr: call_command( "test", "--timing", "sites", testrunner="test_runner.tests.MockTestRunner", ) self.assertIn("Total run took", stderr.getvalue()) # Isolate from the real environment. @mock.patch.dict(os.environ, {}, clear=True) @mock.patch.object(multiprocessing, "cpu_count", return_value=12) class ManageCommandParallelTests(SimpleTestCase): def test_parallel_default(self, *mocked_objects): with captured_stderr() as stderr: call_command( "test", "--parallel", testrunner="test_runner.tests.MockTestRunner", ) self.assertIn("parallel=12", stderr.getvalue()) def test_parallel_auto(self, *mocked_objects): with captured_stderr() as stderr: call_command( "test", "--parallel=auto", testrunner="test_runner.tests.MockTestRunner", ) self.assertIn("parallel=12", stderr.getvalue()) def test_no_parallel(self, *mocked_objects): with captured_stderr() as stderr: call_command("test", testrunner="test_runner.tests.MockTestRunner") # Parallel is disabled by default. self.assertEqual(stderr.getvalue(), "") @mock.patch.object(multiprocessing, "get_start_method", return_value="spawn") def test_parallel_spawn(self, *mocked_objects): with captured_stderr() as stderr: call_command( "test", "--parallel=auto", testrunner="test_runner.tests.MockTestRunner", ) self.assertIn("parallel=1", stderr.getvalue()) @mock.patch.object(multiprocessing, "get_start_method", return_value="spawn") def test_no_parallel_spawn(self, *mocked_objects): with captured_stderr() as stderr: call_command( "test", testrunner="test_runner.tests.MockTestRunner", ) self.assertEqual(stderr.getvalue(), "") @mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "7"}) def test_no_parallel_django_test_processes_env(self, *mocked_objects): with captured_stderr() as stderr: call_command("test", testrunner="test_runner.tests.MockTestRunner") self.assertEqual(stderr.getvalue(), "") @mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "invalid"}) def test_django_test_processes_env_non_int(self, *mocked_objects): with self.assertRaises(ValueError): call_command( "test", "--parallel", testrunner="test_runner.tests.MockTestRunner", ) @mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "7"}) def test_django_test_processes_parallel_default(self, *mocked_objects): for parallel in ["--parallel", "--parallel=auto"]: with self.subTest(parallel=parallel): with captured_stderr() as stderr: call_command( "test", parallel, testrunner="test_runner.tests.MockTestRunner", ) self.assertIn("parallel=7", stderr.getvalue()) class CustomTestRunnerOptionsSettingsTests(AdminScriptTestCase): """ Custom runners can add command line arguments. The runner is specified through a settings file. """ def setUp(self): super().setUp() settings = { "TEST_RUNNER": "'test_runner.runner.CustomOptionsTestRunner'", } self.write_settings("settings.py", sdict=settings) def test_default_options(self): args = ["test", "--settings=test_project.settings"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "1:2:3") def test_default_and_given_options(self): args = ["test", "--settings=test_project.settings", "--option_b=foo"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "1:foo:3") def test_option_name_and_value_separated(self): args = ["test", "--settings=test_project.settings", "--option_b", "foo"] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "1:foo:3") def test_all_options_given(self): args = [ "test", "--settings=test_project.settings", "--option_a=bar", "--option_b=foo", "--option_c=31337", ] out, err = self.run_django_admin(args) self.assertNoOutput(err) self.assertOutput(out, "bar:foo:31337") class CustomTestRunnerOptionsCmdlineTests(AdminScriptTestCase): """ Custom runners can add command line arguments when the runner is specified using --testrunner. """ def setUp(self): super().setUp() self.write_settings("settings.py") def test_testrunner_option(self): args = [ "test", "--testrunner", "test_runner.runner.CustomOptionsTestRunner", "--option_a=bar", "--option_b=foo", "--option_c=31337", ] out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(err) self.assertOutput(out, "bar:foo:31337") def test_testrunner_equals(self): args = [ "test", "--testrunner=test_runner.runner.CustomOptionsTestRunner", "--option_a=bar", "--option_b=foo", "--option_c=31337", ] out, err = self.run_django_admin(args, "test_project.settings") self.assertNoOutput(err) self.assertOutput(out, "bar:foo:31337") def test_no_testrunner(self): args = ["test", "--testrunner"] out, err = self.run_django_admin(args, "test_project.settings") self.assertIn("usage", err) self.assertNotIn("Traceback", err) self.assertNoOutput(out) class NoInitializeSuiteTestRunnerTests(SimpleTestCase): @mock.patch.object(multiprocessing, "get_start_method", return_value="spawn") @mock.patch( "django.test.runner.ParallelTestSuite.initialize_suite", side_effect=Exception("initialize_suite() is called."), ) def test_no_initialize_suite_test_runner(self, *mocked_objects): """ The test suite's initialize_suite() method must always be called when using spawn. It cannot rely on a test runner implementation. """ class NoInitializeSuiteTestRunner(DiscoverRunner): def setup_test_environment(self, **kwargs): return def setup_databases(self, **kwargs): return def run_checks(self, databases): return def teardown_databases(self, old_config, **kwargs): return def teardown_test_environment(self, **kwargs): return def run_suite(self, suite, **kwargs): kwargs = self.get_test_runner_kwargs() runner = self.test_runner(**kwargs) return runner.run(suite) with self.assertRaisesMessage(Exception, "initialize_suite() is called."): runner = NoInitializeSuiteTestRunner( verbosity=0, interactive=False, parallel=2 ) runner.run_tests( [ "test_runner_apps.sample.tests_sample.TestDjangoTestCase", "test_runner_apps.simple.tests", ] ) class TestRunnerInitializerTests(SimpleTestCase): # Raise an exception to don't actually run tests. @mock.patch.object( multiprocessing, "Pool", side_effect=Exception("multiprocessing.Pool()") ) def test_no_initialize_suite_test_runner(self, mocked_pool): class StubTestRunner(DiscoverRunner): def setup_test_environment(self, **kwargs): return def setup_databases(self, **kwargs): return def run_checks(self, databases): return def teardown_databases(self, old_config, **kwargs): return def teardown_test_environment(self, **kwargs): return def run_suite(self, suite, **kwargs): kwargs = self.get_test_runner_kwargs() runner = self.test_runner(**kwargs) return runner.run(suite) runner = StubTestRunner( verbosity=0, interactive=False, parallel=2, debug_mode=True ) with self.assertRaisesMessage(Exception, "multiprocessing.Pool()"): runner.run_tests( [ "test_runner_apps.sample.tests_sample.TestDjangoTestCase", "test_runner_apps.simple.tests", ] ) # Initializer must be a function. self.assertIs(mocked_pool.call_args.kwargs["initializer"], _init_worker) initargs = mocked_pool.call_args.kwargs["initargs"] self.assertEqual(len(initargs), 6) self.assertEqual(initargs[5], True) # debug_mode class Ticket17477RegressionTests(AdminScriptTestCase): def setUp(self): super().setUp() self.write_settings("settings.py") def test_ticket_17477(self): """'manage.py help test' works after r16352.""" args = ["help", "test"] out, err = self.run_manage(args) self.assertNoOutput(err) class SQLiteInMemoryTestDbs(TransactionTestCase): available_apps = ["test_runner"] databases = {"default", "other"} @unittest.skipUnless( all(db.connections[conn].vendor == "sqlite" for conn in db.connections), "This is an sqlite-specific issue", ) def test_transaction_support(self): # Assert connections mocking is appropriately applied by preventing # any attempts at calling create_test_db on the global connection # objects. for connection in db.connections.all(): create_test_db = mock.patch.object( connection.creation, "create_test_db", side_effect=AssertionError( "Global connection object shouldn't be manipulated." ), ) create_test_db.start() self.addCleanup(create_test_db.stop) for option_key, option_value in ( ("NAME", ":memory:"), ("TEST", {"NAME": ":memory:"}), ): tested_connections = db.ConnectionHandler( { "default": { "ENGINE": "django.db.backends.sqlite3", option_key: option_value, }, "other": { "ENGINE": "django.db.backends.sqlite3", option_key: option_value, }, } ) with mock.patch("django.test.utils.connections", new=tested_connections): other = tested_connections["other"] DiscoverRunner(verbosity=0).setup_databases() msg = ( "DATABASES setting '%s' option set to sqlite3's ':memory:' value " "shouldn't interfere with transaction support detection." % option_key ) # Transaction support is properly initialized for the 'other' DB. self.assertTrue(other.features.supports_transactions, msg) # And all the DBs report that they support transactions. self.assertTrue(connections_support_transactions(), msg) class DummyBackendTest(unittest.TestCase): def test_setup_databases(self): """ setup_databases() doesn't fail with dummy database backend. """ tested_connections = db.ConnectionHandler({}) with mock.patch("django.test.utils.connections", new=tested_connections): runner_instance = DiscoverRunner(verbosity=0) old_config = runner_instance.setup_databases() runner_instance.teardown_databases(old_config) class AliasedDefaultTestSetupTest(unittest.TestCase): def test_setup_aliased_default_database(self): """ setup_databases() doesn't fail when 'default' is aliased """ tested_connections = db.ConnectionHandler( {"default": {"NAME": "dummy"}, "aliased": {"NAME": "dummy"}} ) with mock.patch("django.test.utils.connections", new=tested_connections): runner_instance = DiscoverRunner(verbosity=0) old_config = runner_instance.setup_databases() runner_instance.teardown_databases(old_config) class SetupDatabasesTests(SimpleTestCase): def setUp(self): self.runner_instance = DiscoverRunner(verbosity=0) def test_setup_aliased_databases(self): tested_connections = db.ConnectionHandler( { "default": { "ENGINE": "django.db.backends.dummy", "NAME": "dbname", }, "other": { "ENGINE": "django.db.backends.dummy", "NAME": "dbname", }, } ) with mock.patch( "django.db.backends.dummy.base.DatabaseWrapper.creation_class" ) as mocked_db_creation: with mock.patch("django.test.utils.connections", new=tested_connections): old_config = self.runner_instance.setup_databases() self.runner_instance.teardown_databases(old_config) mocked_db_creation.return_value.destroy_test_db.assert_called_once_with( "dbname", 0, False ) def test_setup_test_database_aliases(self): """ The default database must be the first because data migrations use the default alias by default. """ tested_connections = db.ConnectionHandler( { "other": { "ENGINE": "django.db.backends.dummy", "NAME": "dbname", }, "default": { "ENGINE": "django.db.backends.dummy", "NAME": "dbname", }, } ) with mock.patch("django.test.utils.connections", new=tested_connections): test_databases, _ = get_unique_databases_and_mirrors() self.assertEqual( test_databases, { ("", "", "django.db.backends.dummy", "test_dbname"): ( "dbname", ["default", "other"], ), }, ) def test_destroy_test_db_restores_db_name(self): tested_connections = db.ConnectionHandler( { "default": { "ENGINE": settings.DATABASES[db.DEFAULT_DB_ALIAS]["ENGINE"], "NAME": "xxx_test_database", }, } ) # Using the real current name as old_name to not mess with the test suite. old_name = settings.DATABASES[db.DEFAULT_DB_ALIAS]["NAME"] with mock.patch("django.db.connections", new=tested_connections): tested_connections["default"].creation.destroy_test_db( old_name, verbosity=0, keepdb=True ) self.assertEqual( tested_connections["default"].settings_dict["NAME"], old_name ) def test_serialization(self): tested_connections = db.ConnectionHandler( { "default": { "ENGINE": "django.db.backends.dummy", }, } ) with mock.patch( "django.db.backends.dummy.base.DatabaseWrapper.creation_class" ) as mocked_db_creation: with mock.patch("django.test.utils.connections", new=tested_connections): self.runner_instance.setup_databases() mocked_db_creation.return_value.create_test_db.assert_called_once_with( verbosity=0, autoclobber=False, serialize=True, keepdb=False ) def test_serialized_off(self): tested_connections = db.ConnectionHandler( { "default": { "ENGINE": "django.db.backends.dummy", "TEST": {"SERIALIZE": False}, }, } ) msg = ( "The SERIALIZE test database setting is deprecated as it can be " "inferred from the TestCase/TransactionTestCase.databases that " "enable the serialized_rollback feature." ) with mock.patch( "django.db.backends.dummy.base.DatabaseWrapper.creation_class" ) as mocked_db_creation: with mock.patch("django.test.utils.connections", new=tested_connections): with self.assertWarnsMessage(RemovedInDjango50Warning, msg): self.runner_instance.setup_databases() mocked_db_creation.return_value.create_test_db.assert_called_once_with( verbosity=0, autoclobber=False, serialize=False, keepdb=False ) @skipUnlessDBFeature("supports_sequence_reset") class AutoIncrementResetTest(TransactionTestCase): """ Creating the same models in different test methods receive the same PK values since the sequences are reset before each test method. """ available_apps = ["test_runner"] reset_sequences = True def _test(self): # Regular model p = Person.objects.create(first_name="Jack", last_name="Smith") self.assertEqual(p.pk, 1) # Auto-created many-to-many through model p.friends.add(Person.objects.create(first_name="Jacky", last_name="Smith")) self.assertEqual(p.friends.through.objects.first().pk, 1) # Many-to-many through model b = B.objects.create() t = Through.objects.create(person=p, b=b) self.assertEqual(t.pk, 1) def test_autoincrement_reset1(self): self._test() def test_autoincrement_reset2(self): self._test() class EmptyDefaultDatabaseTest(unittest.TestCase): def test_empty_default_database(self): """ An empty default database in settings does not raise an ImproperlyConfigured error when running a unit test that does not use a database. """ tested_connections = db.ConnectionHandler({"default": {}}) with mock.patch("django.db.connections", new=tested_connections): connection = tested_connections[db.utils.DEFAULT_DB_ALIAS] self.assertEqual( connection.settings_dict["ENGINE"], "django.db.backends.dummy" ) connections_support_transactions() class RunTestsExceptionHandlingTests(unittest.TestCase): def test_run_checks_raises(self): """ Teardown functions are run when run_checks() raises SystemCheckError. """ with mock.patch( "django.test.runner.DiscoverRunner.setup_test_environment" ), mock.patch("django.test.runner.DiscoverRunner.setup_databases"), mock.patch( "django.test.runner.DiscoverRunner.build_suite" ), mock.patch( "django.test.runner.DiscoverRunner.run_checks", side_effect=SystemCheckError ), mock.patch( "django.test.runner.DiscoverRunner.teardown_databases" ) as teardown_databases, mock.patch( "django.test.runner.DiscoverRunner.teardown_test_environment" ) as teardown_test_environment: runner = DiscoverRunner(verbosity=0, interactive=False) with self.assertRaises(SystemCheckError): runner.run_tests( ["test_runner_apps.sample.tests_sample.TestDjangoTestCase"] ) self.assertTrue(teardown_databases.called) self.assertTrue(teardown_test_environment.called) def test_run_checks_raises_and_teardown_raises(self): """ SystemCheckError is surfaced when run_checks() raises SystemCheckError and teardown databases() raises ValueError. """ with mock.patch( "django.test.runner.DiscoverRunner.setup_test_environment" ), mock.patch("django.test.runner.DiscoverRunner.setup_databases"), mock.patch( "django.test.runner.DiscoverRunner.build_suite" ), mock.patch( "django.test.runner.DiscoverRunner.run_checks", side_effect=SystemCheckError ), mock.patch( "django.test.runner.DiscoverRunner.teardown_databases", side_effect=ValueError, ) as teardown_databases, mock.patch( "django.test.runner.DiscoverRunner.teardown_test_environment" ) as teardown_test_environment: runner = DiscoverRunner(verbosity=0, interactive=False) with self.assertRaises(SystemCheckError): runner.run_tests( ["test_runner_apps.sample.tests_sample.TestDjangoTestCase"] ) self.assertTrue(teardown_databases.called) self.assertFalse(teardown_test_environment.called) def test_run_checks_passes_and_teardown_raises(self): """ Exceptions on teardown are surfaced if no exceptions happen during run_checks(). """ with mock.patch( "django.test.runner.DiscoverRunner.setup_test_environment" ), mock.patch("django.test.runner.DiscoverRunner.setup_databases"), mock.patch( "django.test.runner.DiscoverRunner.build_suite" ), mock.patch( "django.test.runner.DiscoverRunner.run_checks" ), mock.patch( "django.test.runner.DiscoverRunner.teardown_databases", side_effect=ValueError, ) as teardown_databases, mock.patch( "django.test.runner.DiscoverRunner.teardown_test_environment" ) as teardown_test_environment: runner = DiscoverRunner(verbosity=0, interactive=False) with self.assertRaises(ValueError): # Suppress the output when running TestDjangoTestCase. with mock.patch("sys.stderr"): runner.run_tests( ["test_runner_apps.sample.tests_sample.TestDjangoTestCase"] ) self.assertTrue(teardown_databases.called) self.assertFalse(teardown_test_environment.called) # RemovedInDjango50Warning class NoOpTestRunner(DiscoverRunner): def setup_test_environment(self, **kwargs): return def setup_databases(self, **kwargs): return def run_checks(self, databases): return def teardown_databases(self, old_config, **kwargs): return def teardown_test_environment(self, **kwargs): return class DiscoverRunnerExtraTestsDeprecationTests(SimpleTestCase): msg = "The extra_tests argument is deprecated." def get_runner(self): return NoOpTestRunner(verbosity=0, interactive=False) def test_extra_tests_build_suite(self): runner = self.get_runner() with self.assertWarnsMessage(RemovedInDjango50Warning, self.msg): runner.build_suite(extra_tests=[]) def test_extra_tests_run_tests(self): runner = self.get_runner() with captured_stderr(): with self.assertWarnsMessage(RemovedInDjango50Warning, self.msg): runner.run_tests( test_labels=["test_runner_apps.sample.tests_sample.EmptyTestCase"], extra_tests=[], )
5c52d099025bd9cfbbbd13c2b6d797a41d5ffb20446c8e66a9c3821453c8219c
from django.core.exceptions import FieldError from django.db.models import ( BooleanField, Exists, ExpressionWrapper, F, OuterRef, Q, Value, ) from django.db.models.expressions import NegatedExpression, RawSQL from django.db.models.functions import Lower from django.db.models.sql.where import NothingNode from django.test import SimpleTestCase, TestCase from .models import Tag class QTests(SimpleTestCase): def test_combine_and_empty(self): q = Q(x=1) self.assertEqual(q & Q(), q) self.assertEqual(Q() & q, q) q = Q(x__in={}.keys()) self.assertEqual(q & Q(), q) self.assertEqual(Q() & q, q) def test_combine_and_both_empty(self): self.assertEqual(Q() & Q(), Q()) def test_combine_or_empty(self): q = Q(x=1) self.assertEqual(q | Q(), q) self.assertEqual(Q() | q, q) q = Q(x__in={}.keys()) self.assertEqual(q | Q(), q) self.assertEqual(Q() | q, q) def test_combine_xor_empty(self): q = Q(x=1) self.assertEqual(q ^ Q(), q) self.assertEqual(Q() ^ q, q) q = Q(x__in={}.keys()) self.assertEqual(q ^ Q(), q) self.assertEqual(Q() ^ q, q) def test_combine_empty_copy(self): base_q = Q(x=1) tests = [ base_q | Q(), Q() | base_q, base_q & Q(), Q() & base_q, base_q ^ Q(), Q() ^ base_q, ] for i, q in enumerate(tests): with self.subTest(i=i): self.assertEqual(q, base_q) self.assertIsNot(q, base_q) def test_combine_or_both_empty(self): self.assertEqual(Q() | Q(), Q()) def test_combine_xor_both_empty(self): self.assertEqual(Q() ^ Q(), Q()) def test_combine_not_q_object(self): obj = object() q = Q(x=1) with self.assertRaisesMessage(TypeError, str(obj)): q | obj with self.assertRaisesMessage(TypeError, str(obj)): q & obj with self.assertRaisesMessage(TypeError, str(obj)): q ^ obj def test_combine_negated_boolean_expression(self): tagged = Tag.objects.filter(category=OuterRef("pk")) tests = [ Q() & ~Exists(tagged), Q() | ~Exists(tagged), Q() ^ ~Exists(tagged), ] for q in tests: with self.subTest(q=q): self.assertIsInstance(q, NegatedExpression) def test_deconstruct(self): q = Q(price__gt=F("discounted_price")) path, args, kwargs = q.deconstruct() self.assertEqual(path, "django.db.models.Q") self.assertEqual(args, (("price__gt", F("discounted_price")),)) self.assertEqual(kwargs, {}) def test_deconstruct_negated(self): q = ~Q(price__gt=F("discounted_price")) path, args, kwargs = q.deconstruct() self.assertEqual(args, (("price__gt", F("discounted_price")),)) self.assertEqual(kwargs, {"_negated": True}) def test_deconstruct_or(self): q1 = Q(price__gt=F("discounted_price")) q2 = Q(price=F("discounted_price")) q = q1 | q2 path, args, kwargs = q.deconstruct() self.assertEqual( args, ( ("price__gt", F("discounted_price")), ("price", F("discounted_price")), ), ) self.assertEqual(kwargs, {"_connector": Q.OR}) def test_deconstruct_xor(self): q1 = Q(price__gt=F("discounted_price")) q2 = Q(price=F("discounted_price")) q = q1 ^ q2 path, args, kwargs = q.deconstruct() self.assertEqual( args, ( ("price__gt", F("discounted_price")), ("price", F("discounted_price")), ), ) self.assertEqual(kwargs, {"_connector": Q.XOR}) def test_deconstruct_and(self): q1 = Q(price__gt=F("discounted_price")) q2 = Q(price=F("discounted_price")) q = q1 & q2 path, args, kwargs = q.deconstruct() self.assertEqual( args, ( ("price__gt", F("discounted_price")), ("price", F("discounted_price")), ), ) self.assertEqual(kwargs, {}) def test_deconstruct_multiple_kwargs(self): q = Q(price__gt=F("discounted_price"), price=F("discounted_price")) path, args, kwargs = q.deconstruct() self.assertEqual( args, ( ("price", F("discounted_price")), ("price__gt", F("discounted_price")), ), ) self.assertEqual(kwargs, {}) def test_deconstruct_nested(self): q = Q(Q(price__gt=F("discounted_price"))) path, args, kwargs = q.deconstruct() self.assertEqual(args, (Q(price__gt=F("discounted_price")),)) self.assertEqual(kwargs, {}) def test_deconstruct_boolean_expression(self): expr = RawSQL("1 = 1", BooleanField()) q = Q(expr) _, args, kwargs = q.deconstruct() self.assertEqual(args, (expr,)) self.assertEqual(kwargs, {}) def test_reconstruct(self): q = Q(price__gt=F("discounted_price")) path, args, kwargs = q.deconstruct() self.assertEqual(Q(*args, **kwargs), q) def test_reconstruct_negated(self): q = ~Q(price__gt=F("discounted_price")) path, args, kwargs = q.deconstruct() self.assertEqual(Q(*args, **kwargs), q) def test_reconstruct_or(self): q1 = Q(price__gt=F("discounted_price")) q2 = Q(price=F("discounted_price")) q = q1 | q2 path, args, kwargs = q.deconstruct() self.assertEqual(Q(*args, **kwargs), q) def test_reconstruct_xor(self): q1 = Q(price__gt=F("discounted_price")) q2 = Q(price=F("discounted_price")) q = q1 ^ q2 path, args, kwargs = q.deconstruct() self.assertEqual(Q(*args, **kwargs), q) def test_reconstruct_and(self): q1 = Q(price__gt=F("discounted_price")) q2 = Q(price=F("discounted_price")) q = q1 & q2 path, args, kwargs = q.deconstruct() self.assertEqual(Q(*args, **kwargs), q) def test_flatten(self): q = Q() self.assertEqual(list(q.flatten()), [q]) q = Q(NothingNode()) self.assertEqual(list(q.flatten()), [q, q.children[0]]) q = Q( ExpressionWrapper( Q(RawSQL("id = 0", params=(), output_field=BooleanField())) | Q(price=Value("4.55")) | Q(name=Lower("category")), output_field=BooleanField(), ) ) flatten = list(q.flatten()) self.assertEqual(len(flatten), 7) def test_create_helper(self): items = [("a", 1), ("b", 2), ("c", 3)] for connector in [Q.AND, Q.OR, Q.XOR]: with self.subTest(connector=connector): self.assertEqual( Q.create(items, connector=connector), Q(*items, _connector=connector), ) class QCheckTests(TestCase): def test_basic(self): q = Q(price__gt=20) self.assertIs(q.check({"price": 30}), True) self.assertIs(q.check({"price": 10}), False) def test_expression(self): q = Q(name="test") self.assertIs(q.check({"name": Lower(Value("TeSt"))}), True) self.assertIs(q.check({"name": Value("other")}), False) def test_missing_field(self): q = Q(description__startswith="prefix") msg = "Cannot resolve keyword 'description' into field." with self.assertRaisesMessage(FieldError, msg): q.check({"name": "test"}) def test_boolean_expression(self): q = Q(ExpressionWrapper(Q(price__gt=20), output_field=BooleanField())) self.assertIs(q.check({"price": 25}), True) self.assertIs(q.check({"price": Value(10)}), False) def test_rawsql(self): """ RawSQL expressions cause a database error because "price" cannot be replaced by its value. In this case, Q.check() logs a warning and return True. """ q = Q(RawSQL("price > %s", params=(20,), output_field=BooleanField())) with self.assertLogs("django.db.models", "WARNING") as cm: self.assertIs(q.check({"price": 10}), True) self.assertIn( f"Got a database error calling check() on {q!r}: ", cm.records[0].getMessage(), )
97d4e3e76e50bb206d6f5973537363a3834f6a0c3dd24e72a6cc3e43d83cae23
import operator from django.db import DatabaseError, NotSupportedError, connection from django.db.models import Exists, F, IntegerField, OuterRef, Subquery, Value from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from django.test.utils import CaptureQueriesContext from .models import Author, Celebrity, ExtraInfo, Number, ReservedName @skipUnlessDBFeature("supports_select_union") class QuerySetSetOperationTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.bulk_create(Number(num=i, other_num=10 - i) for i in range(10)) def assertNumbersEqual(self, queryset, expected_numbers, ordered=True): self.assertQuerySetEqual( queryset, expected_numbers, operator.attrgetter("num"), ordered ) def test_simple_union(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=8) qs3 = Number.objects.filter(num=5) self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False) @skipUnlessDBFeature("supports_select_intersection") def test_simple_intersection(self): qs1 = Number.objects.filter(num__lte=5) qs2 = Number.objects.filter(num__gte=5) qs3 = Number.objects.filter(num__gte=4, num__lte=6) self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False) @skipUnlessDBFeature("supports_select_intersection") def test_intersection_with_values(self): ReservedName.objects.create(name="a", order=2) qs1 = ReservedName.objects.all() reserved_name = qs1.intersection(qs1).values("name", "order", "id").get() self.assertEqual(reserved_name["name"], "a") self.assertEqual(reserved_name["order"], 2) reserved_name = qs1.intersection(qs1).values_list("name", "order", "id").get() self.assertEqual(reserved_name[:2], ("a", 2)) @skipUnlessDBFeature("supports_select_difference") def test_simple_difference(self): qs1 = Number.objects.filter(num__lte=5) qs2 = Number.objects.filter(num__lte=4) self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False) def test_union_distinct(self): qs1 = Number.objects.all() qs2 = Number.objects.all() self.assertEqual(len(list(qs1.union(qs2, all=True))), 20) self.assertEqual(len(list(qs1.union(qs2))), 10) def test_union_none(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=8) qs3 = qs1.union(qs2) self.assertSequenceEqual(qs3.none(), []) self.assertNumbersEqual(qs3, [0, 1, 8, 9], ordered=False) def test_union_none_slice(self): qs1 = Number.objects.filter(num__lte=0) qs2 = Number.objects.none() qs3 = qs1.union(qs2) self.assertNumbersEqual(qs3[:1], [0]) def test_union_empty_filter_slice(self): qs1 = Number.objects.filter(num__lte=0) qs2 = Number.objects.filter(pk__in=[]) qs3 = qs1.union(qs2) self.assertNumbersEqual(qs3[:1], [0]) @skipUnlessDBFeature("supports_slicing_ordering_in_compound") def test_union_slice_compound_empty(self): qs1 = Number.objects.filter(num__lte=0)[:1] qs2 = Number.objects.none() qs3 = qs1.union(qs2) self.assertNumbersEqual(qs3[:1], [0]) @skipUnlessDBFeature("supports_slicing_ordering_in_compound") def test_union_combined_slice_compound_empty(self): qs1 = Number.objects.filter(num__lte=2)[:3] qs2 = Number.objects.none() qs3 = qs1.union(qs2) self.assertNumbersEqual(qs3.order_by("num")[2:3], [2]) def test_union_order_with_null_first_last(self): Number.objects.filter(other_num=5).update(other_num=None) qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=2) qs3 = qs1.union(qs2) self.assertSequenceEqual( qs3.order_by( F("other_num").asc(nulls_first=True), ).values_list("other_num", flat=True), [None, 1, 2, 3, 4, 6, 7, 8, 9, 10], ) self.assertSequenceEqual( qs3.order_by( F("other_num").asc(nulls_last=True), ).values_list("other_num", flat=True), [1, 2, 3, 4, 6, 7, 8, 9, 10, None], ) @skipUnlessDBFeature("supports_select_intersection") def test_intersection_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.intersection(qs2)), 0) self.assertEqual(len(qs1.intersection(qs3)), 0) self.assertEqual(len(qs2.intersection(qs1)), 0) self.assertEqual(len(qs3.intersection(qs1)), 0) self.assertEqual(len(qs2.intersection(qs2)), 0) self.assertEqual(len(qs3.intersection(qs3)), 0) @skipUnlessDBFeature("supports_select_difference") def test_difference_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.difference(qs2)), 10) self.assertEqual(len(qs1.difference(qs3)), 10) self.assertEqual(len(qs2.difference(qs1)), 0) self.assertEqual(len(qs3.difference(qs1)), 0) self.assertEqual(len(qs2.difference(qs2)), 0) self.assertEqual(len(qs3.difference(qs3)), 0) @skipUnlessDBFeature("supports_select_difference") def test_difference_with_values(self): ReservedName.objects.create(name="a", order=2) qs1 = ReservedName.objects.all() qs2 = ReservedName.objects.none() reserved_name = qs1.difference(qs2).values("name", "order", "id").get() self.assertEqual(reserved_name["name"], "a") self.assertEqual(reserved_name["order"], 2) reserved_name = qs1.difference(qs2).values_list("name", "order", "id").get() self.assertEqual(reserved_name[:2], ("a", 2)) def test_union_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.union(qs2)), 10) self.assertEqual(len(qs2.union(qs1)), 10) self.assertEqual(len(qs1.union(qs3)), 10) self.assertEqual(len(qs3.union(qs1)), 10) self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10) self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20) self.assertEqual(len(qs2.union(qs2)), 0) self.assertEqual(len(qs3.union(qs3)), 0) def test_empty_qs_union_with_ordered_qs(self): qs1 = Number.objects.order_by("num") qs2 = Number.objects.none().union(qs1).order_by("num") self.assertEqual(list(qs1), list(qs2)) def test_limits(self): qs1 = Number.objects.all() qs2 = Number.objects.all() self.assertEqual(len(list(qs1.union(qs2)[:2])), 2) def test_ordering(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=2, num__lte=3) self.assertNumbersEqual(qs1.union(qs2).order_by("-num"), [3, 2, 1, 0]) def test_ordering_by_alias(self): qs1 = Number.objects.filter(num__lte=1).values(alias=F("num")) qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("num")) self.assertQuerySetEqual( qs1.union(qs2).order_by("-alias"), [3, 2, 1, 0], operator.itemgetter("alias"), ) def test_ordering_by_f_expression(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=2, num__lte=3) self.assertNumbersEqual(qs1.union(qs2).order_by(F("num").desc()), [3, 2, 1, 0]) def test_ordering_by_f_expression_and_alias(self): qs1 = Number.objects.filter(num__lte=1).values(alias=F("other_num")) qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("other_num")) self.assertQuerySetEqual( qs1.union(qs2).order_by(F("alias").desc()), [10, 9, 8, 7], operator.itemgetter("alias"), ) Number.objects.create(num=-1) self.assertQuerySetEqual( qs1.union(qs2).order_by(F("alias").desc(nulls_last=True)), [10, 9, 8, 7, None], operator.itemgetter("alias"), ) def test_union_with_values(self): ReservedName.objects.create(name="a", order=2) qs1 = ReservedName.objects.all() reserved_name = qs1.union(qs1).values("name", "order", "id").get() self.assertEqual(reserved_name["name"], "a") self.assertEqual(reserved_name["order"], 2) reserved_name = qs1.union(qs1).values_list("name", "order", "id").get() self.assertEqual(reserved_name[:2], ("a", 2)) # List of columns can be changed. reserved_name = qs1.union(qs1).values_list("order").get() self.assertEqual(reserved_name, (2,)) def test_union_with_two_annotated_values_list(self): qs1 = ( Number.objects.filter(num=1) .annotate( count=Value(0, IntegerField()), ) .values_list("num", "count") ) qs2 = ( Number.objects.filter(num=2) .values("pk") .annotate( count=F("num"), ) .annotate( num=Value(1, IntegerField()), ) .values_list("num", "count") ) self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)]) def test_union_with_extra_and_values_list(self): qs1 = ( Number.objects.filter(num=1) .extra( select={"count": 0}, ) .values_list("num", "count") ) qs2 = Number.objects.filter(num=2).extra(select={"count": 1}) self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)]) def test_union_with_values_list_on_annotated_and_unannotated(self): ReservedName.objects.create(name="rn1", order=1) qs1 = Number.objects.annotate( has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef("num"))) ).filter(has_reserved_name=True) qs2 = Number.objects.filter(num=9) self.assertCountEqual(qs1.union(qs2).values_list("num", flat=True), [1, 9]) def test_union_with_values_list_and_order(self): ReservedName.objects.bulk_create( [ ReservedName(name="rn1", order=7), ReservedName(name="rn2", order=5), ReservedName(name="rn0", order=6), ReservedName(name="rn9", order=-1), ] ) qs1 = ReservedName.objects.filter(order__gte=6) qs2 = ReservedName.objects.filter(order__lte=5) union_qs = qs1.union(qs2) for qs, expected_result in ( # Order by a single column. (union_qs.order_by("-pk").values_list("order", flat=True), [-1, 6, 5, 7]), (union_qs.order_by("pk").values_list("order", flat=True), [7, 5, 6, -1]), (union_qs.values_list("order", flat=True).order_by("-pk"), [-1, 6, 5, 7]), (union_qs.values_list("order", flat=True).order_by("pk"), [7, 5, 6, -1]), # Order by multiple columns. ( union_qs.order_by("-name", "pk").values_list("order", flat=True), [-1, 5, 7, 6], ), ( union_qs.values_list("order", flat=True).order_by("-name", "pk"), [-1, 5, 7, 6], ), ): with self.subTest(qs=qs): self.assertEqual(list(qs), expected_result) def test_union_with_values_list_and_order_on_annotation(self): qs1 = Number.objects.annotate( annotation=Value(-1), multiplier=F("annotation"), ).filter(num__gte=6) qs2 = Number.objects.annotate( annotation=Value(2), multiplier=F("annotation"), ).filter(num__lte=5) self.assertSequenceEqual( qs1.union(qs2).order_by("annotation", "num").values_list("num", flat=True), [6, 7, 8, 9, 0, 1, 2, 3, 4, 5], ) self.assertQuerySetEqual( qs1.union(qs2) .order_by( F("annotation") * F("multiplier"), "num", ) .values("num"), [6, 7, 8, 9, 0, 1, 2, 3, 4, 5], operator.itemgetter("num"), ) def test_union_multiple_models_with_values_list_and_order(self): reserved_name = ReservedName.objects.create(name="rn1", order=0) qs1 = Celebrity.objects.all() qs2 = ReservedName.objects.all() self.assertSequenceEqual( qs1.union(qs2).order_by("name").values_list("pk", flat=True), [reserved_name.pk], ) def test_union_multiple_models_with_values_list_and_order_by_extra_select(self): reserved_name = ReservedName.objects.create(name="rn1", order=0) qs1 = Celebrity.objects.extra(select={"extra_name": "name"}) qs2 = ReservedName.objects.extra(select={"extra_name": "name"}) self.assertSequenceEqual( qs1.union(qs2).order_by("extra_name").values_list("pk", flat=True), [reserved_name.pk], ) def test_union_in_subquery(self): ReservedName.objects.bulk_create( [ ReservedName(name="rn1", order=8), ReservedName(name="rn2", order=1), ReservedName(name="rn3", order=5), ] ) qs1 = Number.objects.filter(num__gt=7, num=OuterRef("order")) qs2 = Number.objects.filter(num__lt=2, num=OuterRef("order")) self.assertCountEqual( ReservedName.objects.annotate( number=Subquery(qs1.union(qs2).values("num")), ) .filter(number__isnull=False) .values_list("order", flat=True), [8, 1], ) def test_union_in_subquery_related_outerref(self): e1 = ExtraInfo.objects.create(value=7, info="e3") e2 = ExtraInfo.objects.create(value=5, info="e2") e3 = ExtraInfo.objects.create(value=1, info="e1") Author.objects.bulk_create( [ Author(name="a1", num=1, extra=e1), Author(name="a2", num=3, extra=e2), Author(name="a3", num=2, extra=e3), ] ) qs1 = ExtraInfo.objects.order_by().filter(value=OuterRef("num")) qs2 = ExtraInfo.objects.order_by().filter(value__lt=OuterRef("extra__value")) qs = ( Author.objects.annotate( info=Subquery(qs1.union(qs2).values("info")[:1]), ) .filter(info__isnull=False) .values_list("name", flat=True) ) self.assertCountEqual(qs, ["a1", "a2"]) # Combined queries don't mutate. self.assertCountEqual(qs, ["a1", "a2"]) @skipUnlessDBFeature("supports_slicing_ordering_in_compound") def test_union_in_with_ordering(self): qs1 = Number.objects.filter(num__gt=7).order_by("num") qs2 = Number.objects.filter(num__lt=2).order_by("num") self.assertNumbersEqual( Number.objects.exclude(id__in=qs1.union(qs2).values("id")), [2, 3, 4, 5, 6, 7], ordered=False, ) @skipUnlessDBFeature( "supports_slicing_ordering_in_compound", "allow_sliced_subqueries_with_in" ) def test_union_in_with_ordering_and_slice(self): qs1 = Number.objects.filter(num__gt=7).order_by("num")[:1] qs2 = Number.objects.filter(num__lt=2).order_by("-num")[:1] self.assertNumbersEqual( Number.objects.exclude(id__in=qs1.union(qs2).values("id")), [0, 2, 3, 4, 5, 6, 7, 9], ordered=False, ) def test_count_union(self): qs1 = Number.objects.filter(num__lte=1).values("num") qs2 = Number.objects.filter(num__gte=2, num__lte=3).values("num") self.assertEqual(qs1.union(qs2).count(), 4) def test_count_union_empty_result(self): qs = Number.objects.filter(pk__in=[]) self.assertEqual(qs.union(qs).count(), 0) @skipUnlessDBFeature("supports_select_difference") def test_count_difference(self): qs1 = Number.objects.filter(num__lt=10) qs2 = Number.objects.filter(num__lt=9) self.assertEqual(qs1.difference(qs2).count(), 1) @skipUnlessDBFeature("supports_select_intersection") def test_count_intersection(self): qs1 = Number.objects.filter(num__gte=5) qs2 = Number.objects.filter(num__lte=5) self.assertEqual(qs1.intersection(qs2).count(), 1) def test_exists_union(self): qs1 = Number.objects.filter(num__gte=5) qs2 = Number.objects.filter(num__lte=5) with CaptureQueriesContext(connection) as context: self.assertIs(qs1.union(qs2).exists(), True) captured_queries = context.captured_queries self.assertEqual(len(captured_queries), 1) captured_sql = captured_queries[0]["sql"] self.assertNotIn( connection.ops.quote_name(Number._meta.pk.column), captured_sql, ) self.assertEqual( captured_sql.count(connection.ops.limit_offset_sql(None, 1)), 3 if connection.features.supports_slicing_ordering_in_compound else 1, ) def test_exists_union_empty_result(self): qs = Number.objects.filter(pk__in=[]) self.assertIs(qs.union(qs).exists(), False) @skipUnlessDBFeature("supports_select_intersection") def test_exists_intersection(self): qs1 = Number.objects.filter(num__gt=5) qs2 = Number.objects.filter(num__lt=5) self.assertIs(qs1.intersection(qs1).exists(), True) self.assertIs(qs1.intersection(qs2).exists(), False) @skipUnlessDBFeature("supports_select_difference") def test_exists_difference(self): qs1 = Number.objects.filter(num__gte=5) qs2 = Number.objects.filter(num__gte=3) self.assertIs(qs1.difference(qs2).exists(), False) self.assertIs(qs2.difference(qs1).exists(), True) def test_get_union(self): qs = Number.objects.filter(num=2) self.assertEqual(qs.union(qs).get().num, 2) @skipUnlessDBFeature("supports_select_difference") def test_get_difference(self): qs1 = Number.objects.all() qs2 = Number.objects.exclude(num=2) self.assertEqual(qs1.difference(qs2).get().num, 2) @skipUnlessDBFeature("supports_select_intersection") def test_get_intersection(self): qs1 = Number.objects.all() qs2 = Number.objects.filter(num=2) self.assertEqual(qs1.intersection(qs2).get().num, 2) @skipUnlessDBFeature("supports_slicing_ordering_in_compound") def test_ordering_subqueries(self): qs1 = Number.objects.order_by("num")[:2] qs2 = Number.objects.order_by("-num")[:2] self.assertNumbersEqual(qs1.union(qs2).order_by("-num")[:4], [9, 8, 1, 0]) @skipIfDBFeature("supports_slicing_ordering_in_compound") def test_unsupported_ordering_slicing_raises_db_error(self): qs1 = Number.objects.all() qs2 = Number.objects.all() qs3 = Number.objects.all() msg = "LIMIT/OFFSET not allowed in subqueries of compound statements" with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2[:10])) msg = "ORDER BY not allowed in subqueries of compound statements" with self.assertRaisesMessage(DatabaseError, msg): list(qs1.order_by("id").union(qs2)) with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by("id").union(qs3)) @skipIfDBFeature("supports_select_intersection") def test_unsupported_intersection_raises_db_error(self): qs1 = Number.objects.all() qs2 = Number.objects.all() msg = "intersection is not supported on this database backend" with self.assertRaisesMessage(NotSupportedError, msg): list(qs1.intersection(qs2)) def test_combining_multiple_models(self): ReservedName.objects.create(name="99 little bugs", order=99) qs1 = Number.objects.filter(num=1).values_list("num", flat=True) qs2 = ReservedName.objects.values_list("order") self.assertEqual(list(qs1.union(qs2).order_by("num")), [1, 99]) def test_order_raises_on_non_selected_column(self): qs1 = ( Number.objects.filter() .annotate( annotation=Value(1, IntegerField()), ) .values("annotation", num2=F("num")) ) qs2 = Number.objects.filter().values("id", "num") # Should not raise list(qs1.union(qs2).order_by("annotation")) list(qs1.union(qs2).order_by("num2")) msg = "ORDER BY term does not match any column in the result set" # 'id' is not part of the select with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by("id")) # 'num' got realiased to num2 with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by("num")) with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by(F("num"))) with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by(F("num").desc())) # switched order, now 'exists' again: list(qs2.union(qs1).order_by("num")) @skipUnlessDBFeature("supports_select_difference", "supports_select_intersection") def test_qs_with_subcompound_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.intersection(Number.objects.filter(num__gt=1)) self.assertEqual(qs1.difference(qs2).count(), 2) def test_order_by_same_type(self): qs = Number.objects.all() union = qs.union(qs) numbers = list(range(10)) self.assertNumbersEqual(union.order_by("num"), numbers) self.assertNumbersEqual(union.order_by("other_num"), reversed(numbers)) def test_unsupported_operations_on_combined_qs(self): qs = Number.objects.all() msg = "Calling QuerySet.%s() after %s() is not supported." combinators = ["union"] if connection.features.supports_select_difference: combinators.append("difference") if connection.features.supports_select_intersection: combinators.append("intersection") for combinator in combinators: for operation in ( "alias", "annotate", "defer", "delete", "distinct", "exclude", "extra", "filter", "only", "prefetch_related", "select_related", "update", ): with self.subTest(combinator=combinator, operation=operation): with self.assertRaisesMessage( NotSupportedError, msg % (operation, combinator), ): getattr(getattr(qs, combinator)(qs), operation)() with self.assertRaisesMessage( NotSupportedError, msg % ("contains", combinator), ): obj = Number.objects.first() getattr(qs, combinator)(qs).contains(obj) def test_get_with_filters_unsupported_on_combined_qs(self): qs = Number.objects.all() msg = "Calling QuerySet.get(...) with filters after %s() is not supported." combinators = ["union"] if connection.features.supports_select_difference: combinators.append("difference") if connection.features.supports_select_intersection: combinators.append("intersection") for combinator in combinators: with self.subTest(combinator=combinator): with self.assertRaisesMessage(NotSupportedError, msg % combinator): getattr(qs, combinator)(qs).get(num=2) def test_operator_on_combined_qs_error(self): qs = Number.objects.all() msg = "Cannot use %s operator with combined queryset." combinators = ["union"] if connection.features.supports_select_difference: combinators.append("difference") if connection.features.supports_select_intersection: combinators.append("intersection") operators = [ ("|", operator.or_), ("&", operator.and_), ("^", operator.xor), ] for combinator in combinators: combined_qs = getattr(qs, combinator)(qs) for operator_, operator_func in operators: with self.subTest(combinator=combinator): with self.assertRaisesMessage(TypeError, msg % operator_): operator_func(qs, combined_qs) with self.assertRaisesMessage(TypeError, msg % operator_): operator_func(combined_qs, qs)
7f9570891c6f45db6242ea4bc04b29248ed31cfed48bcecc043bd88daed6d51e
from unittest import TestCase from django.db.models.utils import AltersData from django.template import Context, Engine class CallableVariablesTests(TestCase): @classmethod def setUpClass(cls): cls.engine = Engine() super().setUpClass() def test_callable(self): class Doodad: def __init__(self, value): self.num_calls = 0 self.value = value def __call__(self): self.num_calls += 1 return {"the_value": self.value} my_doodad = Doodad(42) c = Context({"my_doodad": my_doodad}) # We can't access ``my_doodad.value`` in the template, because # ``my_doodad.__call__`` will be invoked first, yielding a dictionary # without a key ``value``. t = self.engine.from_string("{{ my_doodad.value }}") self.assertEqual(t.render(c), "") # We can confirm that the doodad has been called self.assertEqual(my_doodad.num_calls, 1) # But we can access keys on the dict that's returned # by ``__call__``, instead. t = self.engine.from_string("{{ my_doodad.the_value }}") self.assertEqual(t.render(c), "42") self.assertEqual(my_doodad.num_calls, 2) def test_alters_data(self): class Doodad: alters_data = True def __init__(self, value): self.num_calls = 0 self.value = value def __call__(self): self.num_calls += 1 return {"the_value": self.value} my_doodad = Doodad(42) c = Context({"my_doodad": my_doodad}) # Since ``my_doodad.alters_data`` is True, the template system will not # try to call our doodad but will use string_if_invalid t = self.engine.from_string("{{ my_doodad.value }}") self.assertEqual(t.render(c), "") t = self.engine.from_string("{{ my_doodad.the_value }}") self.assertEqual(t.render(c), "") # Double-check that the object was really never called during the # template rendering. self.assertEqual(my_doodad.num_calls, 0) def test_alters_data_propagation(self): class GrandParentLeft(AltersData): def my_method(self): return 42 my_method.alters_data = True class ParentLeft(GrandParentLeft): def change_alters_data_method(self): return 63 change_alters_data_method.alters_data = True def sub_non_callable_method(self): return 64 sub_non_callable_method.alters_data = True class ParentRight(AltersData): def other_method(self): return 52 other_method.alters_data = True class Child(ParentLeft, ParentRight): def my_method(self): return 101 def other_method(self): return 102 def change_alters_data_method(self): return 103 change_alters_data_method.alters_data = False sub_non_callable_method = 104 class GrandChild(Child): pass child = Child() self.assertIs(child.my_method.alters_data, True) self.assertIs(child.other_method.alters_data, True) self.assertIs(child.change_alters_data_method.alters_data, False) grand_child = GrandChild() self.assertIs(grand_child.my_method.alters_data, True) self.assertIs(grand_child.other_method.alters_data, True) self.assertIs(grand_child.change_alters_data_method.alters_data, False) c = Context({"element": grand_child}) t = self.engine.from_string("{{ element.my_method }}") self.assertEqual(t.render(c), "") t = self.engine.from_string("{{ element.other_method }}") self.assertEqual(t.render(c), "") t = self.engine.from_string("{{ element.change_alters_data_method }}") self.assertEqual(t.render(c), "103") t = self.engine.from_string("{{ element.sub_non_callable_method }}") self.assertEqual(t.render(c), "104") def test_do_not_call(self): class Doodad: do_not_call_in_templates = True def __init__(self, value): self.num_calls = 0 self.value = value def __call__(self): self.num_calls += 1 return {"the_value": self.value} my_doodad = Doodad(42) c = Context({"my_doodad": my_doodad}) # Since ``my_doodad.do_not_call_in_templates`` is True, the template # system will not try to call our doodad. We can access its attributes # as normal, and we don't have access to the dict that it returns when # called. t = self.engine.from_string("{{ my_doodad.value }}") self.assertEqual(t.render(c), "42") t = self.engine.from_string("{{ my_doodad.the_value }}") self.assertEqual(t.render(c), "") # Double-check that the object was really never called during the # template rendering. self.assertEqual(my_doodad.num_calls, 0) def test_do_not_call_and_alters_data(self): # If we combine ``alters_data`` and ``do_not_call_in_templates``, the # ``alters_data`` attribute will not make any difference in the # template system's behavior. class Doodad: do_not_call_in_templates = True alters_data = True def __init__(self, value): self.num_calls = 0 self.value = value def __call__(self): self.num_calls += 1 return {"the_value": self.value} my_doodad = Doodad(42) c = Context({"my_doodad": my_doodad}) t = self.engine.from_string("{{ my_doodad.value }}") self.assertEqual(t.render(c), "42") t = self.engine.from_string("{{ my_doodad.the_value }}") self.assertEqual(t.render(c), "") # Double-check that the object was really never called during the # template rendering. self.assertEqual(my_doodad.num_calls, 0)
53d2eb367f99b73dca1a10d5b6afe841bb9a305e0eb1a276e0b2471aef35fd25
import datetime import itertools import unittest from copy import copy from unittest import mock from django.core.exceptions import FieldError from django.core.management.color import no_style from django.db import ( DatabaseError, DataError, IntegrityError, OperationalError, connection, ) from django.db.models import ( CASCADE, PROTECT, AutoField, BigAutoField, BigIntegerField, BinaryField, BooleanField, CharField, CheckConstraint, DateField, DateTimeField, DecimalField, DurationField, F, FloatField, ForeignKey, ForeignObject, Index, IntegerField, JSONField, ManyToManyField, Model, OneToOneField, OrderBy, PositiveIntegerField, Q, SlugField, SmallAutoField, SmallIntegerField, TextField, TimeField, UniqueConstraint, UUIDField, Value, ) from django.db.models.fields.json import KeyTextTransform from django.db.models.functions import Abs, Cast, Collate, Lower, Random, Upper from django.db.models.indexes import IndexExpression from django.db.transaction import TransactionManagementError, atomic from django.test import ( TransactionTestCase, ignore_warnings, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import CaptureQueriesContext, isolate_apps, register_lookup from django.utils.deprecation import RemovedInDjango51Warning from .fields import CustomManyToManyField, InheritedManyToManyField, MediumBlobField from .models import ( Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, AuthorWithIndexedName, AuthorWithUniqueName, AuthorWithUniqueNameAndBirthday, Book, BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK, Node, Note, NoteRename, Tag, TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps, ) class SchemaTests(TransactionTestCase): """ Tests for the schema-alteration code. Be aware that these tests are more liable than most to false results, as sometimes the code to check if a test has worked is almost as complex as the code it is testing. """ available_apps = [] models = [ Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Node, Note, Tag, TagM2MTest, TagUniqueRename, Thing, UniqueTest, ] # Utility functions def setUp(self): # local_models should contain test dependent model classes that will be # automatically removed from the app cache on test tear down. self.local_models = [] # isolated_local_models contains models that are in test methods # decorated with @isolate_apps. self.isolated_local_models = [] def tearDown(self): # Delete any tables made for our models self.delete_tables() new_apps.clear_cache() for model in new_apps.get_models(): model._meta._expire_cache() if "schema" in new_apps.all_models: for model in self.local_models: for many_to_many in model._meta.many_to_many: through = many_to_many.remote_field.through if through and through._meta.auto_created: del new_apps.all_models["schema"][through._meta.model_name] del new_apps.all_models["schema"][model._meta.model_name] if self.isolated_local_models: with connection.schema_editor() as editor: for model in self.isolated_local_models: editor.delete_model(model) def delete_tables(self): "Deletes all model tables for our models for a clean test environment" converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: connection.disable_constraint_checking() table_names = connection.introspection.table_names() if connection.features.ignores_table_name_case: table_names = [table_name.lower() for table_name in table_names] for model in itertools.chain(SchemaTests.models, self.local_models): tbl = converter(model._meta.db_table) if connection.features.ignores_table_name_case: tbl = tbl.lower() if tbl in table_names: editor.delete_model(model) table_names.remove(tbl) connection.enable_constraint_checking() def column_classes(self, model): with connection.cursor() as cursor: columns = { d[0]: (connection.introspection.get_field_type(d[1], d), d) for d in connection.introspection.get_table_description( cursor, model._meta.db_table, ) } # SQLite has a different format for field_type for name, (type, desc) in columns.items(): if isinstance(type, tuple): columns[name] = (type[0], desc) return columns def get_primary_key(self, table): with connection.cursor() as cursor: return connection.introspection.get_primary_key_column(cursor, table) def get_indexes(self, table): """ Get the indexes on the table using a new cursor. """ with connection.cursor() as cursor: return [ c["columns"][0] for c in connection.introspection.get_constraints( cursor, table ).values() if c["index"] and len(c["columns"]) == 1 ] def get_uniques(self, table): with connection.cursor() as cursor: return [ c["columns"][0] for c in connection.introspection.get_constraints( cursor, table ).values() if c["unique"] and len(c["columns"]) == 1 ] def get_constraints(self, table): """ Get the constraints on a table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def get_constraints_for_column(self, model, column_name): constraints = self.get_constraints(model._meta.db_table) constraints_for_column = [] for name, details in constraints.items(): if details["columns"] == [column_name]: constraints_for_column.append(name) return sorted(constraints_for_column) def check_added_field_default( self, schema_editor, model, field, field_name, expected_default, cast_function=None, ): with connection.cursor() as cursor: schema_editor.add_field(model, field) cursor.execute( "SELECT {} FROM {};".format(field_name, model._meta.db_table) ) database_default = cursor.fetchall()[0][0] if cast_function and type(database_default) != type(expected_default): database_default = cast_function(database_default) self.assertEqual(database_default, expected_default) def get_constraints_count(self, table, column, fk_to): """ Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the number of foreign keys, unique constraints, and indexes on `table`.`column`. The `fk_to` argument is a 2-tuple specifying the expected foreign key relationship's (table, column). """ with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, table) counts = {"fks": 0, "uniques": 0, "indexes": 0} for c in constraints.values(): if c["columns"] == [column]: if c["foreign_key"] == fk_to: counts["fks"] += 1 if c["unique"]: counts["uniques"] += 1 elif c["index"]: counts["indexes"] += 1 return counts def get_column_collation(self, table, column): with connection.cursor() as cursor: return next( f.collation for f in connection.introspection.get_table_description(cursor, table) if f.name == column ) def assertIndexOrder(self, table, index, order): constraints = self.get_constraints(table) self.assertIn(index, constraints) index_orders = constraints[index]["orders"] self.assertTrue( all(val == expected for val, expected in zip(index_orders, order)) ) def assertForeignKeyExists(self, model, column, expected_fk_table, field="id"): """ Fail if the FK constraint on `model.Meta.db_table`.`column` to `expected_fk_table`.id doesn't exist. """ if not connection.features.can_introspect_foreign_keys: return constraints = self.get_constraints(model._meta.db_table) constraint_fk = None for details in constraints.values(): if details["columns"] == [column] and details["foreign_key"]: constraint_fk = details["foreign_key"] break self.assertEqual(constraint_fk, (expected_fk_table, field)) def assertForeignKeyNotExists(self, model, column, expected_fk_table): if not connection.features.can_introspect_foreign_keys: return with self.assertRaises(AssertionError): self.assertForeignKeyExists(model, column, expected_fk_table) # Tests def test_creation_deletion(self): """ Tries creating a model's table, and then deleting it. """ with connection.schema_editor() as editor: # Create the table editor.create_model(Author) # The table is there list(Author.objects.all()) # Clean up that table editor.delete_model(Author) # No deferred SQL should be left over. self.assertEqual(editor.deferred_sql, []) # The table is gone with self.assertRaises(DatabaseError): list(Author.objects.all()) @skipUnlessDBFeature("supports_foreign_keys") def test_fk(self): "Creating tables out of FK order, then repointing, works" # Create the table with connection.schema_editor() as editor: editor.create_model(Book) editor.create_model(Author) editor.create_model(Tag) # Initial tables are there list(Author.objects.all()) list(Book.objects.all()) # Make sure the FK constraint is present with self.assertRaises(IntegrityError): Book.objects.create( author_id=1, title="Much Ado About Foreign Keys", pub_date=datetime.datetime.now(), ) # Repoint the FK constraint old_field = Book._meta.get_field("author") new_field = ForeignKey(Tag, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) self.assertForeignKeyExists(Book, "author_id", "schema_tag") @skipUnlessDBFeature("can_create_inline_fk") def test_inline_fk(self): # Create some tables. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) editor.create_model(Note) self.assertForeignKeyNotExists(Note, "book_id", "schema_book") # Add a foreign key from one to the other. with connection.schema_editor() as editor: new_field = ForeignKey(Book, CASCADE) new_field.set_attributes_from_name("book") editor.add_field(Note, new_field) self.assertForeignKeyExists(Note, "book_id", "schema_book") # Creating a FK field with a constraint uses a single statement without # a deferred ALTER TABLE. self.assertFalse( [ sql for sql in (str(statement) for statement in editor.deferred_sql) if sql.startswith("ALTER TABLE") and "ADD CONSTRAINT" in sql ] ) @skipUnlessDBFeature("can_create_inline_fk") def test_add_inline_fk_update_data(self): with connection.schema_editor() as editor: editor.create_model(Node) # Add an inline foreign key and update data in the same transaction. new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True) new_field.set_attributes_from_name("new_parent_fk") parent = Node.objects.create() with connection.schema_editor() as editor: editor.add_field(Node, new_field) editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk]) assertIndex = ( self.assertIn if connection.features.indexes_foreign_keys else self.assertNotIn ) assertIndex("new_parent_fk_id", self.get_indexes(Node._meta.db_table)) @skipUnlessDBFeature( "can_create_inline_fk", "allows_multiple_constraints_on_same_fields", ) @isolate_apps("schema") def test_add_inline_fk_index_update_data(self): class Node(Model): class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Node) # Add an inline foreign key, update data, and an index in the same # transaction. new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True) new_field.set_attributes_from_name("new_parent_fk") parent = Node.objects.create() with connection.schema_editor() as editor: editor.add_field(Node, new_field) Node._meta.add_field(new_field) editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk]) editor.add_index( Node, Index(fields=["new_parent_fk"], name="new_parent_inline_fk_idx") ) self.assertIn("new_parent_fk_id", self.get_indexes(Node._meta.db_table)) @skipUnlessDBFeature("supports_foreign_keys") def test_char_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorCharFieldWithIndex) # Change CharField to FK old_field = AuthorCharFieldWithIndex._meta.get_field("char_field") new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name("char_field") with connection.schema_editor() as editor: editor.alter_field( AuthorCharFieldWithIndex, old_field, new_field, strict=True ) self.assertForeignKeyExists( AuthorCharFieldWithIndex, "char_field_id", "schema_author" ) @skipUnlessDBFeature("supports_foreign_keys") @skipUnlessDBFeature("supports_index_on_text_field") def test_text_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorTextFieldWithIndex) # Change TextField to FK old_field = AuthorTextFieldWithIndex._meta.get_field("text_field") new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name("text_field") with connection.schema_editor() as editor: editor.alter_field( AuthorTextFieldWithIndex, old_field, new_field, strict=True ) self.assertForeignKeyExists( AuthorTextFieldWithIndex, "text_field_id", "schema_author" ) @isolate_apps("schema") def test_char_field_pk_to_auto_field(self): class Foo(Model): id = CharField(max_length=255, primary_key=True) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field("id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Foo with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) @skipUnlessDBFeature("supports_foreign_keys") def test_fk_to_proxy(self): "Creating a FK to a proxy model creates database constraints." class AuthorProxy(Author): class Meta: app_label = "schema" apps = new_apps proxy = True class AuthorRef(Model): author = ForeignKey(AuthorProxy, on_delete=CASCADE) class Meta: app_label = "schema" apps = new_apps self.local_models = [AuthorProxy, AuthorRef] # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorRef) self.assertForeignKeyExists(AuthorRef, "author_id", "schema_author") @skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys") def test_fk_db_constraint(self): "The db_constraint parameter is respected" # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Author) editor.create_model(BookWeak) # Initial tables are there list(Author.objects.all()) list(Tag.objects.all()) list(BookWeak.objects.all()) self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author") # Make a db_constraint=False FK new_field = ForeignKey(Tag, CASCADE, db_constraint=False) new_field.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.add_field(Author, new_field) self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag") # Alter to one with a constraint new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) self.assertForeignKeyExists(Author, "tag_id", "schema_tag") # Alter to one without a constraint again new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field2, new_field, strict=True) self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag") @isolate_apps("schema") def test_no_db_constraint_added_during_primary_key_change(self): """ When a primary key that's pointed to by a ForeignKey with db_constraint=False is altered, a foreign key constraint isn't added. """ class Author(Model): class Meta: app_label = "schema" class BookWeak(Model): author = ForeignKey(Author, CASCADE, db_constraint=False) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWeak) self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author") old_field = Author._meta.get_field("id") new_field = BigAutoField(primary_key=True) new_field.model = Author new_field.set_attributes_from_name("id") # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author") def _test_m2m_db_constraint(self, M2MFieldClass): class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(LocalAuthorWithM2M) # Initial tables are there list(LocalAuthorWithM2M.objects.all()) list(Tag.objects.all()) # Make a db_constraint=False FK new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False) new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) self.assertForeignKeyNotExists( new_field.remote_field.through, "tag_id", "schema_tag" ) @skipUnlessDBFeature("supports_foreign_keys") def test_m2m_db_constraint(self): self._test_m2m_db_constraint(ManyToManyField) @skipUnlessDBFeature("supports_foreign_keys") def test_m2m_db_constraint_custom(self): self._test_m2m_db_constraint(CustomManyToManyField) @skipUnlessDBFeature("supports_foreign_keys") def test_m2m_db_constraint_inherited(self): self._test_m2m_db_constraint(InheritedManyToManyField) def test_add_field(self): """ Tests adding fields to models """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add the new field new_field = IntegerField(null=True) new_field.set_attributes_from_name("age") with CaptureQueriesContext( connection ) as ctx, connection.schema_editor() as editor: editor.add_field(Author, new_field) drop_default_sql = editor.sql_alter_column_no_default % { "column": editor.quote_name(new_field.name), } self.assertFalse( any(drop_default_sql in query["sql"] for query in ctx.captured_queries) ) # Table is not rebuilt. self.assertIs( any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries), False ) self.assertIs( any("DROP TABLE" in query["sql"] for query in ctx.captured_queries), False ) columns = self.column_classes(Author) self.assertEqual( columns["age"][0], connection.features.introspected_field_types["IntegerField"], ) self.assertTrue(columns["age"][1][6]) def test_add_field_remove_field(self): """ Adding a field and removing it removes all deferred sql referring to it. """ with connection.schema_editor() as editor: # Create a table with a unique constraint on the slug field. editor.create_model(Tag) # Remove the slug column. editor.remove_field(Tag, Tag._meta.get_field("slug")) self.assertEqual(editor.deferred_sql, []) def test_add_field_temp_default(self): """ Tests adding fields to models with a temporary default """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = CharField(max_length=30, default="Godwin") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) self.assertEqual( columns["surname"][0], connection.features.introspected_field_types["CharField"], ) self.assertEqual( columns["surname"][1][6], connection.features.interprets_empty_strings_as_nulls, ) def test_add_field_temp_default_boolean(self): """ Tests adding fields to models with a temporary default where the default is False. (#21783) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = BooleanField(default=False) new_field.set_attributes_from_name("awesome") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # BooleanField are stored as TINYINT(1) on MySQL. field_type = columns["awesome"][0] self.assertEqual( field_type, connection.features.introspected_field_types["BooleanField"] ) def test_add_field_default_transform(self): """ Tests adding fields to models with a default that is not directly valid in the database (#22581) """ class TestTransformField(IntegerField): # Weird field that saves the count of items in its value def get_default(self): return self.default def get_prep_value(self, value): if value is None: return 0 return len(value) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add the field with a default it needs to cast (to string in this case) new_field = TestTransformField(default={1: 2}) new_field.set_attributes_from_name("thing") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is there columns = self.column_classes(Author) field_type, field_info = columns["thing"] self.assertEqual( field_type, connection.features.introspected_field_types["IntegerField"] ) # Make sure the values were transformed correctly self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2) def test_add_field_o2o_nullable(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Note) new_field = OneToOneField(Note, CASCADE, null=True) new_field.set_attributes_from_name("note") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) self.assertIn("note_id", columns) self.assertTrue(columns["note_id"][1][6]) def test_add_field_binary(self): """ Tests binary fields get a sane default (#22851) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field new_field = BinaryField(blank=True) new_field.set_attributes_from_name("bits") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # MySQL annoyingly uses the same backend, so it'll come back as one of # these two types. self.assertIn(columns["bits"][0], ("BinaryField", "TextField")) def test_add_field_durationfield_with_default(self): with connection.schema_editor() as editor: editor.create_model(Author) new_field = DurationField(default=datetime.timedelta(minutes=10)) new_field.set_attributes_from_name("duration") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) self.assertEqual( columns["duration"][0], connection.features.introspected_field_types["DurationField"], ) @unittest.skipUnless(connection.vendor == "mysql", "MySQL specific") def test_add_binaryfield_mediumblob(self): """ Test adding a custom-sized binary field on MySQL (#24846). """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field with default new_field = MediumBlobField(blank=True, default=b"123") new_field.set_attributes_from_name("bits") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # Introspection treats BLOBs as TextFields self.assertEqual(columns["bits"][0], "TextField") @isolate_apps("schema") def test_add_auto_field(self): class AddAutoFieldModel(Model): name = CharField(max_length=255, primary_key=True) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(AddAutoFieldModel) self.isolated_local_models = [AddAutoFieldModel] old_field = AddAutoFieldModel._meta.get_field("name") new_field = CharField(max_length=255) new_field.set_attributes_from_name("name") new_field.model = AddAutoFieldModel with connection.schema_editor() as editor: editor.alter_field(AddAutoFieldModel, old_field, new_field) new_auto_field = AutoField(primary_key=True) new_auto_field.set_attributes_from_name("id") new_auto_field.model = AddAutoFieldModel() with connection.schema_editor() as editor: editor.add_field(AddAutoFieldModel, new_auto_field) # Crashes on PostgreSQL when the GENERATED BY suffix is missing. AddAutoFieldModel.objects.create(name="test") def test_remove_field(self): with connection.schema_editor() as editor: editor.create_model(Author) with CaptureQueriesContext(connection) as ctx: editor.remove_field(Author, Author._meta.get_field("name")) columns = self.column_classes(Author) self.assertNotIn("name", columns) if getattr(connection.features, "can_alter_table_drop_column", True): # Table is not rebuilt. self.assertIs( any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries), False, ) self.assertIs( any("DROP TABLE" in query["sql"] for query in ctx.captured_queries), False, ) def test_remove_indexed_field(self): with connection.schema_editor() as editor: editor.create_model(AuthorCharFieldWithIndex) with connection.schema_editor() as editor: editor.remove_field( AuthorCharFieldWithIndex, AuthorCharFieldWithIndex._meta.get_field("char_field"), ) columns = self.column_classes(AuthorCharFieldWithIndex) self.assertNotIn("char_field", columns) def test_alter(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual( columns["name"][0], connection.features.introspected_field_types["CharField"], ) self.assertEqual( bool(columns["name"][1][6]), bool(connection.features.interprets_empty_strings_as_nulls), ) # Alter the name field to a TextField old_field = Author._meta.get_field("name") new_field = TextField(null=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) columns = self.column_classes(Author) self.assertEqual(columns["name"][0], "TextField") self.assertTrue(columns["name"][1][6]) # Change nullability again new_field2 = TextField(null=False) new_field2.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) columns = self.column_classes(Author) self.assertEqual(columns["name"][0], "TextField") self.assertEqual( bool(columns["name"][1][6]), bool(connection.features.interprets_empty_strings_as_nulls), ) def test_alter_auto_field_to_integer_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to IntegerField old_field = Author._meta.get_field("id") new_field = IntegerField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Now that ID is an IntegerField, the database raises an error if it # isn't provided. if not connection.features.supports_unspecified_pk: with self.assertRaises(DatabaseError): Author.objects.create() def test_alter_auto_field_to_char_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to CharField old_field = Author._meta.get_field("id") new_field = CharField(primary_key=True, max_length=50) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) @isolate_apps("schema") def test_alter_auto_field_quoted_db_column(self): class Foo(Model): id = AutoField(primary_key=True, db_column='"quoted_id"') class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field("id") new_field = BigAutoField(primary_key=True) new_field.model = Foo new_field.db_column = '"quoted_id"' new_field.set_attributes_from_name("id") with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) Foo.objects.create() def test_alter_not_unique_field_to_primary_key(self): # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) # Change UUIDField to primary key. old_field = Author._meta.get_field("uuid") new_field = UUIDField(primary_key=True) new_field.set_attributes_from_name("uuid") new_field.model = Author with connection.schema_editor() as editor: editor.remove_field(Author, Author._meta.get_field("id")) editor.alter_field(Author, old_field, new_field, strict=True) # Redundant unique constraint is not added. count = self.get_constraints_count( Author._meta.db_table, Author._meta.get_field("uuid").column, None, ) self.assertLessEqual(count["uniques"], 1) @isolate_apps("schema") def test_alter_primary_key_quoted_db_table(self): class Foo(Model): class Meta: app_label = "schema" db_table = '"foo"' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field("id") new_field = BigAutoField(primary_key=True) new_field.model = Foo new_field.set_attributes_from_name("id") with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) Foo.objects.create() def test_alter_text_field(self): # Regression for "BLOB/TEXT column 'info' can't have a default value") # on MySQL. # Create the table with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("info") new_field = TextField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) def test_alter_text_field_to_not_null_with_default_value(self): with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("address") new_field = TextField(blank=True, default="", null=False) new_field.set_attributes_from_name("address") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) @skipUnlessDBFeature("can_defer_constraint_checks", "can_rollback_ddl") def test_alter_fk_checks_deferred_constraints(self): """ #25492 - Altering a foreign key's structure and data in the same transaction. """ with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field("parent") new_field = ForeignKey(Node, CASCADE) new_field.set_attributes_from_name("parent") parent = Node.objects.create() with connection.schema_editor() as editor: # Update the parent FK to create a deferred constraint check. Node.objects.update(parent=parent) editor.alter_field(Node, old_field, new_field, strict=True) @isolate_apps("schema") def test_alter_null_with_default_value_deferred_constraints(self): class Publisher(Model): class Meta: app_label = "schema" class Article(Model): publisher = ForeignKey(Publisher, CASCADE) title = CharField(max_length=50, null=True) description = CharField(max_length=100, null=True) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Publisher) editor.create_model(Article) self.isolated_local_models = [Article, Publisher] publisher = Publisher.objects.create() Article.objects.create(publisher=publisher) old_title = Article._meta.get_field("title") new_title = CharField(max_length=50, null=False, default="") new_title.set_attributes_from_name("title") old_description = Article._meta.get_field("description") new_description = CharField(max_length=100, null=False, default="") new_description.set_attributes_from_name("description") with connection.schema_editor() as editor: editor.alter_field(Article, old_title, new_title, strict=True) editor.alter_field(Article, old_description, new_description, strict=True) def test_alter_text_field_to_date_field(self): """ #25002 - Test conversion of text field to date field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info="1988-05-05") old_field = Note._meta.get_field("info") new_field = DateField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns["info"][1][6]) def test_alter_text_field_to_datetime_field(self): """ #25002 - Test conversion of text field to datetime field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info="1988-05-05 3:16:17.4567") old_field = Note._meta.get_field("info") new_field = DateTimeField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns["info"][1][6]) def test_alter_text_field_to_time_field(self): """ #25002 - Test conversion of text field to time field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info="3:16:17.4567") old_field = Note._meta.get_field("info") new_field = TimeField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns["info"][1][6]) @skipIfDBFeature("interprets_empty_strings_as_nulls") def test_alter_textual_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = CharField(max_length=50) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): Note.objects.create(info=None) @skipUnlessDBFeature("interprets_empty_strings_as_nulls") def test_alter_textual_field_not_null_to_null(self): """ Nullability for textual fields is preserved on databases that interpret empty strings as NULLs. """ with connection.schema_editor() as editor: editor.create_model(Author) columns = self.column_classes(Author) # Field is nullable. self.assertTrue(columns["uuid"][1][6]) # Change to NOT NULL. old_field = Author._meta.get_field("uuid") new_field = SlugField(null=False, blank=True) new_field.set_attributes_from_name("uuid") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) columns = self.column_classes(Author) # Nullability is preserved. self.assertTrue(columns["uuid"][1][6]) def test_alter_numeric_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(UniqueTest) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug="aaa") old_field = UniqueTest._meta.get_field("year") new_field = BigIntegerField() new_field.set_attributes_from_name("year") with connection.schema_editor() as editor: editor.alter_field(UniqueTest, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug="bbb") def test_alter_null_to_not_null(self): """ #23609 - Tests handling of default values when altering from NULL to NOT NULL. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertTrue(columns["height"][1][6]) # Create some test data Author.objects.create(name="Not null author", height=12) Author.objects.create(name="Null author") # Verify null value self.assertEqual(Author.objects.get(name="Not null author").height, 12) self.assertIsNone(Author.objects.get(name="Null author").height) # Alter the height field to NOT NULL with default old_field = Author._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) columns = self.column_classes(Author) self.assertFalse(columns["height"][1][6]) # Verify default value self.assertEqual(Author.objects.get(name="Not null author").height, 12) self.assertEqual(Author.objects.get(name="Null author").height, 42) def test_alter_charfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_nulls when changing a CharField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change the CharField to null old_field = Author._meta.get_field("name") new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_char_field_decrease_length(self): # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) Author.objects.create(name="x" * 255) # Change max_length of CharField. old_field = Author._meta.get_field("name") new_field = CharField(max_length=254) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: msg = "value too long for type character varying(254)" with self.assertRaisesMessage(DataError, msg): editor.alter_field(Author, old_field, new_field, strict=True) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_with_custom_db_type(self): from django.contrib.postgres.fields import ArrayField class Foo(Model): field = ArrayField(CharField(max_length=255)) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field("field") new_field = ArrayField(CharField(max_length=16)) new_field.set_attributes_from_name("field") new_field.model = Foo with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) @isolate_apps("schema") @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_array_field_decrease_base_field_length(self): from django.contrib.postgres.fields import ArrayField class ArrayModel(Model): field = ArrayField(CharField(max_length=16)) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(ArrayModel) self.isolated_local_models = [ArrayModel] ArrayModel.objects.create(field=["x" * 16]) old_field = ArrayModel._meta.get_field("field") new_field = ArrayField(CharField(max_length=15)) new_field.set_attributes_from_name("field") new_field.model = ArrayModel with connection.schema_editor() as editor: msg = "value too long for type character varying(15)" with self.assertRaisesMessage(DataError, msg): editor.alter_field(ArrayModel, old_field, new_field, strict=True) @isolate_apps("schema") @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_array_field_decrease_nested_base_field_length(self): from django.contrib.postgres.fields import ArrayField class ArrayModel(Model): field = ArrayField(ArrayField(CharField(max_length=16))) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(ArrayModel) self.isolated_local_models = [ArrayModel] ArrayModel.objects.create(field=[["x" * 16]]) old_field = ArrayModel._meta.get_field("field") new_field = ArrayField(ArrayField(CharField(max_length=15))) new_field.set_attributes_from_name("field") new_field.model = ArrayModel with connection.schema_editor() as editor: msg = "value too long for type character varying(15)" with self.assertRaisesMessage(DataError, msg): editor.alter_field(ArrayModel, old_field, new_field, strict=True) @isolate_apps("schema") @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") @skipUnlessDBFeature( "supports_collation_on_charfield", "supports_non_deterministic_collations", ) def test_db_collation_arrayfield(self): from django.contrib.postgres.fields import ArrayField ci_collation = "case_insensitive" cs_collation = "en-x-icu" def drop_collation(): with connection.cursor() as cursor: cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}") with connection.cursor() as cursor: cursor.execute( f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider = icu, " f"locale = 'und-u-ks-level2', deterministic = false)" ) self.addCleanup(drop_collation) class ArrayModel(Model): field = ArrayField(CharField(max_length=16, db_collation=ci_collation)) class Meta: app_label = "schema" # Create the table. with connection.schema_editor() as editor: editor.create_model(ArrayModel) self.isolated_local_models = [ArrayModel] self.assertEqual( self.get_column_collation(ArrayModel._meta.db_table, "field"), ci_collation, ) # Alter collation. old_field = ArrayModel._meta.get_field("field") new_field_cs = ArrayField(CharField(max_length=16, db_collation=cs_collation)) new_field_cs.set_attributes_from_name("field") new_field_cs.model = ArrayField with connection.schema_editor() as editor: editor.alter_field(ArrayModel, old_field, new_field_cs, strict=True) self.assertEqual( self.get_column_collation(ArrayModel._meta.db_table, "field"), cs_collation, ) @isolate_apps("schema") @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") @skipUnlessDBFeature( "supports_collation_on_charfield", "supports_non_deterministic_collations", ) def test_unique_with_collation_charfield(self): ci_collation = "case_insensitive" def drop_collation(): with connection.cursor() as cursor: cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}") with connection.cursor() as cursor: cursor.execute( f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider = icu, " f"locale = 'und-u-ks-level2', deterministic = false)" ) self.addCleanup(drop_collation) class CiCharModel(Model): field = CharField(max_length=16, db_collation=ci_collation, unique=True) class Meta: app_label = "schema" # Create the table. with connection.schema_editor() as editor: editor.create_model(CiCharModel) self.isolated_local_models = [CiCharModel] self.assertEqual( self.get_column_collation(CiCharModel._meta.db_table, "field"), ci_collation, ) self.assertIn("field", self.get_uniques(CiCharModel._meta.db_table)) def test_alter_textfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_nulls when changing a TextField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Note) # Change the TextField to null old_field = Note._meta.get_field("info") new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) def test_alter_null_to_not_null_keeping_default(self): """ #23738 - Can change a nullable field with default to non-nullable with the same default. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) # Ensure the field is right to begin with columns = self.column_classes(AuthorWithDefaultHeight) self.assertTrue(columns["height"][1][6]) # Alter the height field to NOT NULL keeping the previous default old_field = AuthorWithDefaultHeight._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field( AuthorWithDefaultHeight, old_field, new_field, strict=True ) columns = self.column_classes(AuthorWithDefaultHeight) self.assertFalse(columns["height"][1][6]) @skipUnlessDBFeature("supports_foreign_keys") def test_alter_fk(self): """ Tests altering of FKs """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) self.assertForeignKeyExists(Book, "author_id", "schema_author") # Alter the FK old_field = Book._meta.get_field("author") new_field = ForeignKey(Author, CASCADE, editable=False) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) columns = self.column_classes(Book) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) self.assertForeignKeyExists(Book, "author_id", "schema_author") @skipUnlessDBFeature("supports_foreign_keys") def test_alter_to_fk(self): """ #24447 - Tests adding a FK constraint for an existing column """ class LocalBook(Model): author = IntegerField() title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalBook] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBook) # Ensure no FK constraint exists constraints = self.get_constraints(LocalBook._meta.db_table) for details in constraints.values(): if details["foreign_key"]: self.fail( "Found an unexpected FK constraint to %s" % details["columns"] ) old_field = LocalBook._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(LocalBook, old_field, new_field, strict=True) self.assertForeignKeyExists(LocalBook, "author_id", "schema_author") @skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys") def test_alter_o2o_to_fk(self): """ #24163 - Tests altering of OneToOneField to ForeignKey """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) # Ensure the field is right to begin with columns = self.column_classes(BookWithO2O) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) # Ensure the field is unique author = Author.objects.create(name="Joe") BookWithO2O.objects.create( author=author, title="Django 1", pub_date=datetime.datetime.now() ) with self.assertRaises(IntegrityError): BookWithO2O.objects.create( author=author, title="Django 2", pub_date=datetime.datetime.now() ) BookWithO2O.objects.all().delete() self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author") # Alter the OneToOneField to ForeignKey old_field = BookWithO2O._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) columns = self.column_classes(Book) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) # Ensure the field is not unique anymore Book.objects.create( author=author, title="Django 1", pub_date=datetime.datetime.now() ) Book.objects.create( author=author, title="Django 2", pub_date=datetime.datetime.now() ) self.assertForeignKeyExists(Book, "author_id", "schema_author") @skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys") def test_alter_fk_to_o2o(self): """ #24163 - Tests altering of ForeignKey to OneToOneField """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) # Ensure the field is not unique author = Author.objects.create(name="Joe") Book.objects.create( author=author, title="Django 1", pub_date=datetime.datetime.now() ) Book.objects.create( author=author, title="Django 2", pub_date=datetime.datetime.now() ) Book.objects.all().delete() self.assertForeignKeyExists(Book, "author_id", "schema_author") # Alter the ForeignKey to OneToOneField old_field = Book._meta.get_field("author") new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) columns = self.column_classes(BookWithO2O) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) # Ensure the field is unique now BookWithO2O.objects.create( author=author, title="Django 1", pub_date=datetime.datetime.now() ) with self.assertRaises(IntegrityError): BookWithO2O.objects.create( author=author, title="Django 2", pub_date=datetime.datetime.now() ) self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author") def test_alter_field_fk_to_o2o(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = ( 1 if connection.features.supports_foreign_keys and connection.features.can_introspect_foreign_keys else 0 ) expected_indexes = 1 if connection.features.indexes_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual( counts, {"fks": expected_fks, "uniques": 0, "indexes": expected_indexes}, ) old_field = Book._meta.get_field("author") new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) # The index on ForeignKey is replaced with a unique constraint for # OneToOneField. self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0}) def test_autofield_to_o2o(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Note) # Rename the field. old_field = Author._meta.get_field("id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("note_ptr") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Alter AutoField to OneToOneField. new_field_o2o = OneToOneField(Note, CASCADE) new_field_o2o.set_attributes_from_name("note_ptr") new_field_o2o.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field_o2o, strict=True) columns = self.column_classes(Author) field_type, _ = columns["note_ptr_id"] self.assertEqual( field_type, connection.features.introspected_field_types["IntegerField"] ) def test_alter_field_fk_keeps_index(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = ( 1 if connection.features.supports_foreign_keys and connection.features.can_introspect_foreign_keys else 0 ) expected_indexes = 1 if connection.features.indexes_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual( counts, {"fks": expected_fks, "uniques": 0, "indexes": expected_indexes}, ) old_field = Book._meta.get_field("author") # on_delete changed from CASCADE. new_field = ForeignKey(Author, PROTECT) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) # The index remains. self.assertEqual( counts, {"fks": expected_fks, "uniques": 0, "indexes": expected_indexes}, ) def test_alter_field_o2o_to_fk(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = ( 1 if connection.features.supports_foreign_keys and connection.features.can_introspect_foreign_keys else 0 ) # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0}) old_field = BookWithO2O._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint on OneToOneField is replaced with an index for # ForeignKey. self.assertEqual(counts, {"fks": expected_fks, "uniques": 0, "indexes": 1}) def test_alter_field_o2o_keeps_unique(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = ( 1 if connection.features.supports_foreign_keys and connection.features.can_introspect_foreign_keys else 0 ) # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0}) old_field = BookWithO2O._meta.get_field("author") # on_delete changed from CASCADE. new_field = OneToOneField(Author, PROTECT) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint remains. self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0}) @skipUnlessDBFeature("ignores_table_name_case") def test_alter_db_table_case(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Alter the case of the table old_table_name = Author._meta.db_table with connection.schema_editor() as editor: editor.alter_db_table(Author, old_table_name, old_table_name.upper()) def test_alter_implicit_id_to_explicit(self): """ Should be able to convert an implicit "id" field to an explicit "id" primary key field. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # This will fail if DROP DEFAULT is inadvertently executed on this # field which drops the id sequence, at least on PostgreSQL. Author.objects.create(name="Foo") Author.objects.create(name="Bar") def test_alter_autofield_pk_to_bigautofield_pk(self): with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = BigAutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) Author.objects.create(name="Foo", pk=1) with connection.cursor() as cursor: sequence_reset_sqls = connection.ops.sequence_reset_sql( no_style(), [Author] ) if sequence_reset_sqls: cursor.execute(sequence_reset_sqls[0]) self.assertIsNotNone(Author.objects.create(name="Bar")) def test_alter_autofield_pk_to_smallautofield_pk(self): with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = SmallAutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) Author.objects.create(name="Foo", pk=1) with connection.cursor() as cursor: sequence_reset_sqls = connection.ops.sequence_reset_sql( no_style(), [Author] ) if sequence_reset_sqls: cursor.execute(sequence_reset_sqls[0]) self.assertIsNotNone(Author.objects.create(name="Bar")) def test_alter_int_pk_to_autofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to AutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field("i") new_field = AutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name("i") with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # A model representing the updated model. class IntegerPKToAutoField(Model): i = AutoField(primary_key=True) j = IntegerField(unique=True) class Meta: app_label = "schema" apps = new_apps db_table = IntegerPK._meta.db_table # An id (i) is generated by the database. obj = IntegerPKToAutoField.objects.create(j=1) self.assertIsNotNone(obj.i) def test_alter_int_pk_to_bigautofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to BigAutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field("i") new_field = BigAutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name("i") with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # A model representing the updated model. class IntegerPKToBigAutoField(Model): i = BigAutoField(primary_key=True) j = IntegerField(unique=True) class Meta: app_label = "schema" apps = new_apps db_table = IntegerPK._meta.db_table # An id (i) is generated by the database. obj = IntegerPKToBigAutoField.objects.create(j=1) self.assertIsNotNone(obj.i) @isolate_apps("schema") def test_alter_smallint_pk_to_smallautofield_pk(self): """ Should be able to rename an SmallIntegerField(primary_key=True) to SmallAutoField(primary_key=True). """ class SmallIntegerPK(Model): i = SmallIntegerField(primary_key=True) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(SmallIntegerPK) self.isolated_local_models = [SmallIntegerPK] old_field = SmallIntegerPK._meta.get_field("i") new_field = SmallAutoField(primary_key=True) new_field.model = SmallIntegerPK new_field.set_attributes_from_name("i") with connection.schema_editor() as editor: editor.alter_field(SmallIntegerPK, old_field, new_field, strict=True) @isolate_apps("schema") @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_serial_auto_field_to_bigautofield(self): class SerialAutoField(Model): id = SmallAutoField(primary_key=True) class Meta: app_label = "schema" table = SerialAutoField._meta.db_table column = SerialAutoField._meta.get_field("id").column with connection.cursor() as cursor: cursor.execute( f'CREATE TABLE "{table}" ' f'("{column}" smallserial NOT NULL PRIMARY KEY)' ) try: old_field = SerialAutoField._meta.get_field("id") new_field = BigAutoField(primary_key=True) new_field.model = SerialAutoField new_field.set_attributes_from_name("id") with connection.schema_editor() as editor: editor.alter_field(SerialAutoField, old_field, new_field, strict=True) sequence_name = f"{table}_{column}_seq" with connection.cursor() as cursor: cursor.execute( "SELECT data_type FROM pg_sequences WHERE sequencename = %s", [sequence_name], ) row = cursor.fetchone() sequence_data_type = row[0] if row and row[0] else None self.assertEqual(sequence_data_type, "bigint") # Rename the column. old_field = new_field new_field = AutoField(primary_key=True) new_field.model = SerialAutoField new_field.set_attributes_from_name("renamed_id") with connection.schema_editor() as editor: editor.alter_field(SerialAutoField, old_field, new_field, strict=True) with connection.cursor() as cursor: cursor.execute( "SELECT data_type FROM pg_sequences WHERE sequencename = %s", [sequence_name], ) row = cursor.fetchone() sequence_data_type = row[0] if row and row[0] else None self.assertEqual(sequence_data_type, "integer") finally: with connection.cursor() as cursor: cursor.execute(f'DROP TABLE "{table}"') def test_alter_int_pk_to_int_unique(self): """ Should be able to rename an IntegerField(primary_key=True) to IntegerField(unique=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) # Delete the old PK old_field = IntegerPK._meta.get_field("i") new_field = IntegerField(unique=True) new_field.model = IntegerPK new_field.set_attributes_from_name("i") with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # The primary key constraint is gone. Result depends on database: # 'id' for SQLite, None for others (must not be 'i'). self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ("id", None)) # Set up a model class as it currently stands. The original IntegerPK # class is now out of date and some backends make use of the whole # model class when modifying a field (such as sqlite3 when remaking a # table) so an outdated model class leads to incorrect results. class Transitional(Model): i = IntegerField(unique=True) j = IntegerField(unique=True) class Meta: app_label = "schema" apps = new_apps db_table = "INTEGERPK" # model requires a new PK old_field = Transitional._meta.get_field("j") new_field = IntegerField(primary_key=True) new_field.model = Transitional new_field.set_attributes_from_name("j") with connection.schema_editor() as editor: editor.alter_field(Transitional, old_field, new_field, strict=True) # Create a model class representing the updated model. class IntegerUnique(Model): i = IntegerField(unique=True) j = IntegerField(primary_key=True) class Meta: app_label = "schema" apps = new_apps db_table = "INTEGERPK" # Ensure unique constraint works. IntegerUnique.objects.create(i=1, j=1) with self.assertRaises(IntegrityError): IntegerUnique.objects.create(i=1, j=2) def test_rename(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual( columns["name"][0], connection.features.introspected_field_types["CharField"], ) self.assertNotIn("display_name", columns) # Alter the name field's name old_field = Author._meta.get_field("name") new_field = CharField(max_length=254) new_field.set_attributes_from_name("display_name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) columns = self.column_classes(Author) self.assertEqual( columns["display_name"][0], connection.features.introspected_field_types["CharField"], ) self.assertNotIn("name", columns) @isolate_apps("schema") def test_rename_referenced_field(self): class Author(Model): name = CharField(max_length=255, unique=True) class Meta: app_label = "schema" class Book(Model): author = ForeignKey(Author, CASCADE, to_field="name") class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name("renamed") with connection.schema_editor( atomic=connection.features.supports_atomic_references_rename ) as editor: editor.alter_field(Author, Author._meta.get_field("name"), new_field) # Ensure the foreign key reference was updated. self.assertForeignKeyExists(Book, "author_id", "schema_author", "renamed") @skipIfDBFeature("interprets_empty_strings_as_nulls") def test_rename_keep_null_status(self): """ Renaming a field shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = TextField() new_field.set_attributes_from_name("detail_info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns["detail_info"][0], "TextField") self.assertNotIn("info", columns) with self.assertRaises(IntegrityError): NoteRename.objects.create(detail_info=None) def _test_m2m_create(self, M2MFieldClass): """ Tests M2M fields on models during creation """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2M) # Ensure there is now an m2m table there columns = self.column_classes( LocalBookWithM2M._meta.get_field("tags").remote_field.through ) self.assertEqual( columns["tagm2mtest_id"][0], connection.features.introspected_field_types["IntegerField"], ) def test_m2m_create(self): self._test_m2m_create(ManyToManyField) def test_m2m_create_custom(self): self._test_m2m_create(CustomManyToManyField) def test_m2m_create_inherited(self): self._test_m2m_create(InheritedManyToManyField) def _test_m2m_create_through(self, M2MFieldClass): """ Tests M2M fields on models during creation with through models """ class LocalTagThrough(Model): book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = "schema" apps = new_apps class LocalBookWithM2MThrough(Model): tags = M2MFieldClass( "TagM2MTest", related_name="books", through=LocalTagThrough ) class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalTagThrough, LocalBookWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalTagThrough) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2MThrough) # Ensure there is now an m2m table there columns = self.column_classes(LocalTagThrough) self.assertEqual( columns["book_id"][0], connection.features.introspected_field_types["IntegerField"], ) self.assertEqual( columns["tag_id"][0], connection.features.introspected_field_types["IntegerField"], ) def test_m2m_create_through(self): self._test_m2m_create_through(ManyToManyField) def test_m2m_create_through_custom(self): self._test_m2m_create_through(CustomManyToManyField) def test_m2m_create_through_inherited(self): self._test_m2m_create_through(InheritedManyToManyField) def test_m2m_through_remove(self): class LocalAuthorNoteThrough(Model): book = ForeignKey("schema.Author", CASCADE) tag = ForeignKey("self", CASCADE) class Meta: app_label = "schema" apps = new_apps class LocalNoteWithM2MThrough(Model): authors = ManyToManyField("schema.Author", through=LocalAuthorNoteThrough) class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalAuthorNoteThrough, LocalNoteWithM2MThrough] # Create the tables. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalAuthorNoteThrough) editor.create_model(LocalNoteWithM2MThrough) # Remove the through parameter. old_field = LocalNoteWithM2MThrough._meta.get_field("authors") new_field = ManyToManyField("Author") new_field.set_attributes_from_name("authors") msg = ( f"Cannot alter field {old_field} into {new_field} - they are not " f"compatible types (you cannot alter to or from M2M fields, or add or " f"remove through= on M2M fields)" ) with connection.schema_editor() as editor: with self.assertRaisesMessage(ValueError, msg): editor.alter_field(LocalNoteWithM2MThrough, old_field, new_field) def _test_m2m(self, M2MFieldClass): """ Tests adding/removing M2M fields on models """ class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorWithM2M) editor.create_model(TagM2MTest) # Create an M2M field new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors") new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Add the field with CaptureQueriesContext( connection ) as ctx, connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Table is not rebuilt. self.assertEqual( len( [ query["sql"] for query in ctx.captured_queries if "CREATE TABLE" in query["sql"] ] ), 1, ) self.assertIs( any("DROP TABLE" in query["sql"] for query in ctx.captured_queries), False, ) # Ensure there is now an m2m table there columns = self.column_classes(new_field.remote_field.through) self.assertEqual( columns["tagm2mtest_id"][0], connection.features.introspected_field_types["IntegerField"], ) # "Alter" the field. This should not rename the DB table to itself. with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True) # Remove the M2M table again with connection.schema_editor() as editor: editor.remove_field(LocalAuthorWithM2M, new_field) # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Make sure the model state is coherent with the table one now that # we've removed the tags field. opts = LocalAuthorWithM2M._meta opts.local_many_to_many.remove(new_field) del new_apps.all_models["schema"][ new_field.remote_field.through._meta.model_name ] opts._expire_cache() def test_m2m(self): self._test_m2m(ManyToManyField) def test_m2m_custom(self): self._test_m2m(CustomManyToManyField) def test_m2m_inherited(self): self._test_m2m(InheritedManyToManyField) def _test_m2m_through_alter(self, M2MFieldClass): """ Tests altering M2Ms with explicit through models (should no-op) """ class LocalAuthorTag(Model): author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = "schema" apps = new_apps class LocalAuthorWithM2MThrough(Model): name = CharField(max_length=255) tags = M2MFieldClass( "schema.TagM2MTest", related_name="authors", through=LocalAuthorTag ) class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorTag) editor.create_model(LocalAuthorWithM2MThrough) editor.create_model(TagM2MTest) # Ensure the m2m table is there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) # "Alter" the field's blankness. This should not actually do anything. old_field = LocalAuthorWithM2MThrough._meta.get_field("tags") new_field = M2MFieldClass( "schema.TagM2MTest", related_name="authors", through=LocalAuthorTag ) new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags") with connection.schema_editor() as editor: editor.alter_field( LocalAuthorWithM2MThrough, old_field, new_field, strict=True ) # Ensure the m2m table is still there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) def test_m2m_through_alter(self): self._test_m2m_through_alter(ManyToManyField) def test_m2m_through_alter_custom(self): self._test_m2m_through_alter(CustomManyToManyField) def test_m2m_through_alter_inherited(self): self._test_m2m_through_alter(InheritedManyToManyField) def _test_m2m_repoint(self, M2MFieldClass): """ Tests repointing M2M fields """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBookWithM2M) editor.create_model(TagM2MTest) editor.create_model(UniqueTest) # Ensure the M2M exists and points to TagM2MTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists( LocalBookWithM2M._meta.get_field("tags").remote_field.through, "tagm2mtest_id", "schema_tagm2mtest", ) # Repoint the M2M old_field = LocalBookWithM2M._meta.get_field("tags") new_field = M2MFieldClass(UniqueTest) new_field.contribute_to_class(LocalBookWithM2M, "uniques") with connection.schema_editor() as editor: editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True) # Ensure old M2M is gone with self.assertRaises(DatabaseError): self.column_classes( LocalBookWithM2M._meta.get_field("tags").remote_field.through ) # This model looks like the new model and is used for teardown. opts = LocalBookWithM2M._meta opts.local_many_to_many.remove(old_field) # Ensure the new M2M exists and points to UniqueTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists( new_field.remote_field.through, "uniquetest_id", "schema_uniquetest" ) def test_m2m_repoint(self): self._test_m2m_repoint(ManyToManyField) def test_m2m_repoint_custom(self): self._test_m2m_repoint(CustomManyToManyField) def test_m2m_repoint_inherited(self): self._test_m2m_repoint(InheritedManyToManyField) @isolate_apps("schema") def test_m2m_rename_field_in_target_model(self): class LocalTagM2MTest(Model): title = CharField(max_length=255) class Meta: app_label = "schema" class LocalM2M(Model): tags = ManyToManyField(LocalTagM2MTest) class Meta: app_label = "schema" # Create the tables. with connection.schema_editor() as editor: editor.create_model(LocalM2M) editor.create_model(LocalTagM2MTest) self.isolated_local_models = [LocalM2M, LocalTagM2MTest] # Ensure the m2m table is there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) # Alter a field in LocalTagM2MTest. old_field = LocalTagM2MTest._meta.get_field("title") new_field = CharField(max_length=254) new_field.contribute_to_class(LocalTagM2MTest, "title1") # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True) # Ensure the m2m table is still there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) @skipUnlessDBFeature( "supports_column_check_constraints", "can_introspect_check_constraints" ) def test_check_constraints(self): """ Tests creating/deleting CHECK constraints """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the constraint exists constraints = self.get_constraints(Author._meta.db_table) if not any( details["columns"] == ["height"] and details["check"] for details in constraints.values() ): self.fail("No check constraint for height found") # Alter the column to remove it old_field = Author._meta.get_field("height") new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) for details in constraints.values(): if details["columns"] == ["height"] and details["check"]: self.fail("Check constraint for height found") # Alter the column to re-add it new_field2 = Author._meta.get_field("height") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) if not any( details["columns"] == ["height"] and details["check"] for details in constraints.values() ): self.fail("No check constraint for height found") @skipUnlessDBFeature( "supports_column_check_constraints", "can_introspect_check_constraints" ) @isolate_apps("schema") def test_check_constraint_timedelta_param(self): class DurationModel(Model): duration = DurationField() class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(DurationModel) self.isolated_local_models = [DurationModel] constraint_name = "duration_gte_5_minutes" constraint = CheckConstraint( check=Q(duration__gt=datetime.timedelta(minutes=5)), name=constraint_name, ) DurationModel._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(DurationModel, constraint) constraints = self.get_constraints(DurationModel._meta.db_table) self.assertIn(constraint_name, constraints) with self.assertRaises(IntegrityError), atomic(): DurationModel.objects.create(duration=datetime.timedelta(minutes=4)) DurationModel.objects.create(duration=datetime.timedelta(minutes=10)) @skipUnlessDBFeature( "supports_column_check_constraints", "can_introspect_check_constraints" ) def test_remove_field_check_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(Author) # Add the custom check constraint constraint = CheckConstraint( check=Q(height__gte=0), name="author_height_gte_0_check" ) custom_constraint_name = constraint.name Author._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) # Ensure the constraints exist constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["height"] and details["check"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field check old_field = Author._meta.get_field("height") new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["height"] and details["check"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field check new_field2 = Author._meta.get_field("height") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["height"] and details["check"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the check constraint with connection.schema_editor() as editor: Author._meta.constraints = [] editor.remove_constraint(Author, constraint) def test_unique(self): """ Tests removing and adding unique constraints to a single column. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the field is unique to begin with Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be non-unique old_field = Tag._meta.get_field("slug") new_field = SlugField(unique=False) new_field.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) # Ensure the field is no longer unique Tag.objects.create(title="foo", slug="foo") Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be unique new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) # Ensure the field is unique again Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Rename the field new_field3 = SlugField(unique=True) new_field3.set_attributes_from_name("slug2") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field2, new_field3, strict=True) # Ensure the field is still unique TagUniqueRename.objects.create(title="foo", slug2="foo") with self.assertRaises(IntegrityError): TagUniqueRename.objects.create(title="bar", slug2="foo") Tag.objects.all().delete() def test_unique_name_quoting(self): old_table_name = TagUniqueRename._meta.db_table try: with connection.schema_editor() as editor: editor.create_model(TagUniqueRename) editor.alter_db_table(TagUniqueRename, old_table_name, "unique-table") TagUniqueRename._meta.db_table = "unique-table" # This fails if the unique index name isn't quoted. editor.alter_unique_together(TagUniqueRename, [], (("title", "slug2"),)) finally: with connection.schema_editor() as editor: editor.delete_model(TagUniqueRename) TagUniqueRename._meta.db_table = old_table_name @isolate_apps("schema") @skipUnlessDBFeature("supports_foreign_keys") def test_unique_no_unnecessary_fk_drops(self): """ If AlterField isn't selective about dropping foreign key constraints when modifying a field with a unique constraint, the AlterField incorrectly drops and recreates the Book.author foreign key even though it doesn't restrict the field being changed (#29193). """ class Author(Model): name = CharField(max_length=254, unique=True) class Meta: app_label = "schema" class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.model = Author new_field.set_attributes_from_name("name") with self.assertLogs("django.db.backends.schema", "DEBUG") as cm: with connection.schema_editor() as editor: editor.alter_field(Author, Author._meta.get_field("name"), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) @isolate_apps("schema") def test_unique_and_reverse_m2m(self): """ AlterField can modify a unique field when there's a reverse M2M relation on the model. """ class Tag(Model): title = CharField(max_length=255) slug = SlugField(unique=True) class Meta: app_label = "schema" class Book(Model): tags = ManyToManyField(Tag, related_name="books") class Meta: app_label = "schema" self.isolated_local_models = [Book._meta.get_field("tags").remote_field.through] with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Book) new_field = SlugField(max_length=75, unique=True) new_field.model = Tag new_field.set_attributes_from_name("slug") with self.assertLogs("django.db.backends.schema", "DEBUG") as cm: with connection.schema_editor() as editor: editor.alter_field(Tag, Tag._meta.get_field("slug"), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) # Ensure that the field is still unique. Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") @skipUnlessDBFeature("allows_multiple_constraints_on_same_fields") def test_remove_field_unique_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueName) self.local_models = [AuthorWithUniqueName] # Add the custom unique constraint constraint = UniqueConstraint(fields=["name"], name="author_name_uniq") custom_constraint_name = constraint.name AuthorWithUniqueName._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueName, constraint) # Ensure the constraints exist constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field uniqueness old_field = AuthorWithUniqueName._meta.get_field("name") new_field = CharField(max_length=255) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field uniqueness new_field2 = AuthorWithUniqueName._meta.get_field("name") with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueName._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueName, constraint) def test_unique_together(self): """ Tests removing and adding unique_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(UniqueTest) # Ensure the fields are unique to begin with UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2011, slug="foo") UniqueTest.objects.create(year=2011, slug="bar") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter the model to its non-unique-together companion with connection.schema_editor() as editor: editor.alter_unique_together( UniqueTest, UniqueTest._meta.unique_together, [] ) # Ensure the fields are no longer unique UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_unique_together( UniqueTest, [], UniqueTest._meta.unique_together ) # Ensure the fields are unique again UniqueTest.objects.create(year=2012, slug="foo") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() def test_unique_together_with_fk(self): """ Tests removing and adding unique_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [["author", "title"]]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [["author", "title"]], []) def test_unique_together_with_fk_with_existing_index(self): """ Tests removing and adding unique_together constraints that include a foreign key, where the foreign key is added after the model is created. """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithoutAuthor) new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") editor.add_field(BookWithoutAuthor, new_field) # Ensure the fields aren't unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [["author", "title"]]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [["author", "title"]], []) def _test_composed_index_with_fk(self, index): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) table = Book._meta.db_table self.assertEqual(Book._meta.indexes, []) Book._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(Book, index) self.assertIn(index.name, self.get_constraints(table)) Book._meta.indexes = [] with connection.schema_editor() as editor: editor.remove_index(Book, index) self.assertNotIn(index.name, self.get_constraints(table)) def test_composed_index_with_fk(self): index = Index(fields=["author", "title"], name="book_author_title_idx") self._test_composed_index_with_fk(index) def test_composed_desc_index_with_fk(self): index = Index(fields=["-author", "title"], name="book_author_title_idx") self._test_composed_index_with_fk(index) @skipUnlessDBFeature("supports_expression_indexes") def test_composed_func_index_with_fk(self): index = Index(F("author"), F("title"), name="book_author_title_idx") self._test_composed_index_with_fk(index) @skipUnlessDBFeature("supports_expression_indexes") def test_composed_desc_func_index_with_fk(self): index = Index(F("author").desc(), F("title"), name="book_author_title_idx") self._test_composed_index_with_fk(index) @skipUnlessDBFeature("supports_expression_indexes") def test_composed_func_transform_index_with_fk(self): index = Index(F("title__lower"), name="book_title_lower_idx") with register_lookup(CharField, Lower): self._test_composed_index_with_fk(index) def _test_composed_constraint_with_fk(self, constraint): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) table = Book._meta.db_table self.assertEqual(Book._meta.constraints, []) Book._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(Book, constraint) self.assertIn(constraint.name, self.get_constraints(table)) Book._meta.constraints = [] with connection.schema_editor() as editor: editor.remove_constraint(Book, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) def test_composed_constraint_with_fk(self): constraint = UniqueConstraint( fields=["author", "title"], name="book_author_title_uniq", ) self._test_composed_constraint_with_fk(constraint) @skipUnlessDBFeature( "supports_column_check_constraints", "can_introspect_check_constraints" ) def test_composed_check_constraint_with_fk(self): constraint = CheckConstraint(check=Q(author__gt=0), name="book_author_check") self._test_composed_constraint_with_fk(constraint) @skipUnlessDBFeature("allows_multiple_constraints_on_same_fields") def test_remove_unique_together_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueNameAndBirthday) self.local_models = [AuthorWithUniqueNameAndBirthday] # Add the custom unique constraint constraint = UniqueConstraint( fields=["name", "birthday"], name="author_name_birthday_uniq" ) custom_constraint_name = constraint.name AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint) # Ensure the constraints exist constraints = self.get_constraints( AuthorWithUniqueNameAndBirthday._meta.db_table ) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Remove unique together unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together with connection.schema_editor() as editor: editor.alter_unique_together( AuthorWithUniqueNameAndBirthday, unique_together, [] ) constraints = self.get_constraints( AuthorWithUniqueNameAndBirthday._meta.db_table ) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Re-add unique together with connection.schema_editor() as editor: editor.alter_unique_together( AuthorWithUniqueNameAndBirthday, [], unique_together ) constraints = self.get_constraints( AuthorWithUniqueNameAndBirthday._meta.db_table ) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueNameAndBirthday._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint) def test_unique_constraint(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint(fields=["name"], name="name_uq") # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table self.assertIs(sql.references_table(table), True) self.assertIs(sql.references_column(table, "name"), True) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint(Upper("name").desc(), name="func_upper_uq") # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table constraints = self.get_constraints(table) if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, constraint.name, ["DESC"]) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) # SQL contains a database function. self.assertIs(sql.references_column(table, "name"), True) self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql)) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_composite_func_unique_constraint(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithSlug) constraint = UniqueConstraint( Upper("title"), Lower("slug"), name="func_upper_lower_unq", ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(BookWithSlug, constraint) sql = constraint.create_sql(BookWithSlug, editor) table = BookWithSlug._meta.db_table constraints = self.get_constraints(table) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) # SQL contains database functions. self.assertIs(sql.references_column(table, "title"), True) self.assertIs(sql.references_column(table, "slug"), True) sql = str(sql) self.assertIn("UPPER(%s)" % editor.quote_name("title"), sql) self.assertIn("LOWER(%s)" % editor.quote_name("slug"), sql) self.assertLess(sql.index("UPPER"), sql.index("LOWER")) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(BookWithSlug, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_unique_constraint_field_and_expression(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint( F("height").desc(), "uuid", Lower("name").asc(), name="func_f_lower_field_unq", ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, constraint.name, ["DESC", "ASC", "ASC"]) constraints = self.get_constraints(table) self.assertIs(constraints[constraint.name]["unique"], True) self.assertEqual(len(constraints[constraint.name]["columns"]), 3) self.assertEqual(constraints[constraint.name]["columns"][1], "uuid") # SQL contains database functions and columns. self.assertIs(sql.references_column(table, "height"), True) self.assertIs(sql.references_column(table, "name"), True) self.assertIs(sql.references_column(table, "uuid"), True) self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql)) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes", "supports_partial_indexes") def test_func_unique_constraint_partial(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint( Upper("name"), name="func_upper_cond_weight_uq", condition=Q(weight__isnull=False), ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table constraints = self.get_constraints(table) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) self.assertIs(sql.references_column(table, "name"), True) self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql)) self.assertIn( "WHERE %s IS NOT NULL" % editor.quote_name("weight"), str(sql), ) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes", "supports_covering_indexes") def test_func_unique_constraint_covering(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint( Upper("name"), name="func_upper_covering_uq", include=["weight", "height"], ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table constraints = self.get_constraints(table) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) self.assertEqual( constraints[constraint.name]["columns"], [None, "weight", "height"], ) self.assertIs(sql.references_column(table, "name"), True) self.assertIs(sql.references_column(table, "weight"), True) self.assertIs(sql.references_column(table, "height"), True) self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql)) self.assertIn( "INCLUDE (%s, %s)" % ( editor.quote_name("weight"), editor.quote_name("height"), ), str(sql), ) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_lookups(self): with connection.schema_editor() as editor: editor.create_model(Author) with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs): constraint = UniqueConstraint( F("name__lower"), F("weight__abs"), name="func_lower_abs_lookup_uq", ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table constraints = self.get_constraints(table) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) # SQL contains columns. self.assertIs(sql.references_column(table, "name"), True) self.assertIs(sql.references_column(table, "weight"), True) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_collate(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("This backend does not support case-insensitive collations.") with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithSlug) constraint = UniqueConstraint( Collate(F("title"), collation=collation).desc(), Collate("slug", collation=collation), name="func_collate_uq", ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(BookWithSlug, constraint) sql = constraint.create_sql(BookWithSlug, editor) table = BookWithSlug._meta.db_table constraints = self.get_constraints(table) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, constraint.name, ["DESC", "ASC"]) # SQL contains columns and a collation. self.assertIs(sql.references_column(table, "title"), True) self.assertIs(sql.references_column(table, "slug"), True) self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql)) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(BookWithSlug, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipIfDBFeature("supports_expression_indexes") def test_func_unique_constraint_unsupported(self): # UniqueConstraint is ignored on databases that don't support indexes on # expressions. with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint(F("name"), name="func_name_uq") with connection.schema_editor() as editor, self.assertNumQueries(0): self.assertIsNone(editor.add_constraint(Author, constraint)) self.assertIsNone(editor.remove_constraint(Author, constraint)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_nonexistent_field(self): constraint = UniqueConstraint(Lower("nonexistent"), name="func_nonexistent_uq") msg = ( "Cannot resolve keyword 'nonexistent' into field. Choices are: " "height, id, name, uuid, weight" ) with self.assertRaisesMessage(FieldError, msg): with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_nondeterministic(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint(Random(), name="func_random_uq") with connection.schema_editor() as editor: with self.assertRaises(DatabaseError): editor.add_constraint(Author, constraint) @ignore_warnings(category=RemovedInDjango51Warning) def test_index_together(self): """ Tests removing and adding index_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure there's no index on the year/slug columns first self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c["columns"] == ["slug", "title"] ), False, ) # Alter the model to add an index with connection.schema_editor() as editor: editor.alter_index_together(Tag, [], [("slug", "title")]) # Ensure there is now an index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c["columns"] == ["slug", "title"] ), True, ) # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_index_together(Tag, [("slug", "title")], []) # Ensure there's no index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c["columns"] == ["slug", "title"] ), False, ) @ignore_warnings(category=RemovedInDjango51Warning) def test_index_together_with_fk(self): """ Tests removing and adding index_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.index_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_index_together(Book, [], [["author", "title"]]) # Alter it back with connection.schema_editor() as editor: editor.alter_index_together(Book, [["author", "title"]], []) @ignore_warnings(category=RemovedInDjango51Warning) @isolate_apps("schema") def test_create_index_together(self): """ Tests creating models with index_together already defined """ class TagIndexed(Model): title = CharField(max_length=255) slug = SlugField(unique=True) class Meta: app_label = "schema" index_together = [["slug", "title"]] # Create the table with connection.schema_editor() as editor: editor.create_model(TagIndexed) self.isolated_local_models = [TagIndexed] # Ensure there is an index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tagindexed").values() if c["columns"] == ["slug", "title"] ), True, ) @skipUnlessDBFeature("allows_multiple_constraints_on_same_fields") @ignore_warnings(category=RemovedInDjango51Warning) @isolate_apps("schema") def test_remove_index_together_does_not_remove_meta_indexes(self): class AuthorWithIndexedNameAndBirthday(Model): name = CharField(max_length=255) birthday = DateField() class Meta: app_label = "schema" index_together = [["name", "birthday"]] with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedNameAndBirthday) self.isolated_local_models = [AuthorWithIndexedNameAndBirthday] # Add the custom index index = Index(fields=["name", "birthday"], name="author_name_birthday_idx") custom_index_name = index.name AuthorWithIndexedNameAndBirthday._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedNameAndBirthday, index) # Ensure the indexes exist constraints = self.get_constraints( AuthorWithIndexedNameAndBirthday._meta.db_table ) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["index"] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Remove index together index_together = AuthorWithIndexedNameAndBirthday._meta.index_together with connection.schema_editor() as editor: editor.alter_index_together( AuthorWithIndexedNameAndBirthday, index_together, [] ) constraints = self.get_constraints( AuthorWithIndexedNameAndBirthday._meta.db_table ) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["index"] and name != custom_index_name ] self.assertEqual(len(other_constraints), 0) # Re-add index together with connection.schema_editor() as editor: editor.alter_index_together( AuthorWithIndexedNameAndBirthday, [], index_together ) constraints = self.get_constraints( AuthorWithIndexedNameAndBirthday._meta.db_table ) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["index"] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Drop the index with connection.schema_editor() as editor: AuthorWithIndexedNameAndBirthday._meta.indexes = [] editor.remove_index(AuthorWithIndexedNameAndBirthday, index) @isolate_apps("schema") def test_db_table(self): """ Tests renaming of the table """ class Author(Model): name = CharField(max_length=255) class Meta: app_label = "schema" class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = "schema" # Create the table and one referring it. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there to begin with columns = self.column_classes(Author) self.assertEqual( columns["name"][0], connection.features.introspected_field_types["CharField"], ) # Alter the table with connection.schema_editor( atomic=connection.features.supports_atomic_references_rename ) as editor: editor.alter_db_table(Author, "schema_author", "schema_otherauthor") Author._meta.db_table = "schema_otherauthor" columns = self.column_classes(Author) self.assertEqual( columns["name"][0], connection.features.introspected_field_types["CharField"], ) # Ensure the foreign key reference was updated self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor") # Alter the table again with connection.schema_editor( atomic=connection.features.supports_atomic_references_rename ) as editor: editor.alter_db_table(Author, "schema_otherauthor", "schema_author") # Ensure the table is still there Author._meta.db_table = "schema_author" columns = self.column_classes(Author) self.assertEqual( columns["name"][0], connection.features.introspected_field_types["CharField"], ) def test_add_remove_index(self): """ Tests index addition and removal """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the table is there and has no index self.assertNotIn("title", self.get_indexes(Author._meta.db_table)) # Add the index index = Index(fields=["name"], name="author_title_idx") with connection.schema_editor() as editor: editor.add_index(Author, index) self.assertIn("name", self.get_indexes(Author._meta.db_table)) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn("name", self.get_indexes(Author._meta.db_table)) def test_remove_db_index_doesnt_remove_custom_indexes(self): """ Changing db_index to False doesn't remove indexes from Meta.indexes. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedName) self.local_models = [AuthorWithIndexedName] # Ensure the table has its index self.assertIn("name", self.get_indexes(AuthorWithIndexedName._meta.db_table)) # Add the custom index index = Index(fields=["-name"], name="author_name_idx") author_index_name = index.name with connection.schema_editor() as editor: db_index_name = editor._create_index_name( table_name=AuthorWithIndexedName._meta.db_table, column_names=("name",), ) try: AuthorWithIndexedName._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedName, index) old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertIn(author_index_name, old_constraints) self.assertIn(db_index_name, old_constraints) # Change name field to db_index=False old_field = AuthorWithIndexedName._meta.get_field("name") new_field = CharField(max_length=255) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field( AuthorWithIndexedName, old_field, new_field, strict=True ) new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertNotIn(db_index_name, new_constraints) # The index from Meta.indexes is still in the database. self.assertIn(author_index_name, new_constraints) # Drop the index with connection.schema_editor() as editor: editor.remove_index(AuthorWithIndexedName, index) finally: AuthorWithIndexedName._meta.indexes = [] def test_order_index(self): """ Indexes defined with ordering (ASC/DESC) defined on column """ with connection.schema_editor() as editor: editor.create_model(Author) # The table doesn't have an index self.assertNotIn("title", self.get_indexes(Author._meta.db_table)) index_name = "author_name_idx" # Add the index index = Index(fields=["name", "-weight"], name=index_name) with connection.schema_editor() as editor: editor.add_index(Author, index) if connection.features.supports_index_column_ordering: self.assertIndexOrder(Author._meta.db_table, index_name, ["ASC", "DESC"]) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) def test_indexes(self): """ Tests creation/altering of indexes """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there and has the right index self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to remove the index old_field = Book._meta.get_field("title") new_field = CharField(max_length=100, db_index=False) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the table is there and has no index self.assertNotIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to re-add the index new_field2 = Book._meta.get_field("title") with connection.schema_editor() as editor: editor.alter_field(Book, new_field, new_field2, strict=True) # Ensure the table is there and has the index again self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Add a unique column, verify that creates an implicit index new_field3 = BookWithSlug._meta.get_field("slug") with connection.schema_editor() as editor: editor.add_field(Book, new_field3) self.assertIn( "slug", self.get_uniques(Book._meta.db_table), ) # Remove the unique, check the index goes with it new_field4 = CharField(max_length=20, unique=False) new_field4.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True) self.assertNotIn( "slug", self.get_uniques(Book._meta.db_table), ) def test_text_field_with_db_index(self): with connection.schema_editor() as editor: editor.create_model(AuthorTextFieldWithIndex) # The text_field index is present if the database supports it. assertion = ( self.assertIn if connection.features.supports_index_on_text_field else self.assertNotIn ) assertion( "text_field", self.get_indexes(AuthorTextFieldWithIndex._meta.db_table) ) def _index_expressions_wrappers(self): index_expression = IndexExpression() index_expression.set_wrapper_classes(connection) return ", ".join( [ wrapper_cls.__qualname__ for wrapper_cls in index_expression.wrapper_classes ] ) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_multiple_wrapper_references(self): index = Index(OrderBy(F("name").desc(), descending=True), name="name") msg = ( "Multiple references to %s can't be used in an indexed expression." % self._index_expressions_wrappers() ) with connection.schema_editor() as editor: with self.assertRaisesMessage(ValueError, msg): editor.add_index(Author, index) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_invalid_topmost_expressions(self): index = Index(Upper(F("name").desc()), name="name") msg = ( "%s must be topmost expressions in an indexed expression." % self._index_expressions_wrappers() ) with connection.schema_editor() as editor: with self.assertRaisesMessage(ValueError, msg): editor.add_index(Author, index) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index(self): with connection.schema_editor() as editor: editor.create_model(Author) index = Index(Lower("name").desc(), name="func_lower_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, index.name, ["DESC"]) # SQL contains a database function. self.assertIs(sql.references_column(table, "name"), True) self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql)) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_f(self): with connection.schema_editor() as editor: editor.create_model(Tag) index = Index("slug", F("title").desc(), name="func_f_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Tag, index) sql = index.create_sql(Tag, editor) table = Tag._meta.db_table self.assertIn(index.name, self.get_constraints(table)) if connection.features.supports_index_column_ordering: self.assertIndexOrder(Tag._meta.db_table, index.name, ["ASC", "DESC"]) # SQL contains columns. self.assertIs(sql.references_column(table, "slug"), True) self.assertIs(sql.references_column(table, "title"), True) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Tag, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_lookups(self): with connection.schema_editor() as editor: editor.create_model(Author) with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs): index = Index( F("name__lower"), F("weight__abs"), name="func_lower_abs_lookup_idx", ) # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table self.assertIn(index.name, self.get_constraints(table)) # SQL contains columns. self.assertIs(sql.references_column(table, "name"), True) self.assertIs(sql.references_column(table, "weight"), True) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_composite_func_index(self): with connection.schema_editor() as editor: editor.create_model(Author) index = Index(Lower("name"), Upper("name"), name="func_lower_upper_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table self.assertIn(index.name, self.get_constraints(table)) # SQL contains database functions. self.assertIs(sql.references_column(table, "name"), True) sql = str(sql) self.assertIn("LOWER(%s)" % editor.quote_name("name"), sql) self.assertIn("UPPER(%s)" % editor.quote_name("name"), sql) self.assertLess(sql.index("LOWER"), sql.index("UPPER")) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_composite_func_index_field_and_expression(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) index = Index( F("author").desc(), Lower("title").asc(), "pub_date", name="func_f_lower_field_idx", ) # Add index. with connection.schema_editor() as editor: editor.add_index(Book, index) sql = index.create_sql(Book, editor) table = Book._meta.db_table constraints = self.get_constraints(table) if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, index.name, ["DESC", "ASC", "ASC"]) self.assertEqual(len(constraints[index.name]["columns"]), 3) self.assertEqual(constraints[index.name]["columns"][2], "pub_date") # SQL contains database functions and columns. self.assertIs(sql.references_column(table, "author_id"), True) self.assertIs(sql.references_column(table, "title"), True) self.assertIs(sql.references_column(table, "pub_date"), True) self.assertIn("LOWER(%s)" % editor.quote_name("title"), str(sql)) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Book, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") @isolate_apps("schema") def test_func_index_f_decimalfield(self): class Node(Model): value = DecimalField(max_digits=5, decimal_places=2) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Node) index = Index(F("value"), name="func_f_decimalfield_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Node, index) sql = index.create_sql(Node, editor) table = Node._meta.db_table self.assertIn(index.name, self.get_constraints(table)) self.assertIs(sql.references_column(table, "value"), True) # SQL doesn't contain casting. self.assertNotIn("CAST", str(sql)) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Node, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_cast(self): with connection.schema_editor() as editor: editor.create_model(Author) index = Index(Cast("weight", FloatField()), name="func_cast_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table self.assertIn(index.name, self.get_constraints(table)) self.assertIs(sql.references_column(table, "weight"), True) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_collate(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("This backend does not support case-insensitive collations.") with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithSlug) index = Index( Collate(F("title"), collation=collation).desc(), Collate("slug", collation=collation), name="func_collate_idx", ) # Add index. with connection.schema_editor() as editor: editor.add_index(BookWithSlug, index) sql = index.create_sql(BookWithSlug, editor) table = Book._meta.db_table self.assertIn(index.name, self.get_constraints(table)) if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, index.name, ["DESC", "ASC"]) # SQL contains columns and a collation. self.assertIs(sql.references_column(table, "title"), True) self.assertIs(sql.references_column(table, "slug"), True) self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql)) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Book, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") @skipIfDBFeature("collate_as_index_expression") def test_func_index_collate_f_ordered(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("This backend does not support case-insensitive collations.") with connection.schema_editor() as editor: editor.create_model(Author) index = Index( Collate(F("name").desc(), collation=collation), name="func_collate_f_desc_idx", ) # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table self.assertIn(index.name, self.get_constraints(table)) if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, index.name, ["DESC"]) # SQL contains columns and a collation. self.assertIs(sql.references_column(table, "name"), True) self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql)) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_calc(self): with connection.schema_editor() as editor: editor.create_model(Author) index = Index(F("height") / (F("weight") + Value(5)), name="func_calc_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table self.assertIn(index.name, self.get_constraints(table)) # SQL contains columns and expressions. self.assertIs(sql.references_column(table, "height"), True) self.assertIs(sql.references_column(table, "weight"), True) sql = str(sql) self.assertIs( sql.index(editor.quote_name("height")) < sql.index("/") < sql.index(editor.quote_name("weight")) < sql.index("+") < sql.index("5"), True, ) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes", "supports_json_field") @isolate_apps("schema") def test_func_index_json_key_transform(self): class JSONModel(Model): field = JSONField() class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(JSONModel) self.isolated_local_models = [JSONModel] index = Index("field__some_key", name="func_json_key_idx") with connection.schema_editor() as editor: editor.add_index(JSONModel, index) sql = index.create_sql(JSONModel, editor) table = JSONModel._meta.db_table self.assertIn(index.name, self.get_constraints(table)) self.assertIs(sql.references_column(table, "field"), True) with connection.schema_editor() as editor: editor.remove_index(JSONModel, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes", "supports_json_field") @isolate_apps("schema") def test_func_index_json_key_transform_cast(self): class JSONModel(Model): field = JSONField() class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(JSONModel) self.isolated_local_models = [JSONModel] index = Index( Cast(KeyTextTransform("some_key", "field"), IntegerField()), name="func_json_key_cast_idx", ) with connection.schema_editor() as editor: editor.add_index(JSONModel, index) sql = index.create_sql(JSONModel, editor) table = JSONModel._meta.db_table self.assertIn(index.name, self.get_constraints(table)) self.assertIs(sql.references_column(table, "field"), True) with connection.schema_editor() as editor: editor.remove_index(JSONModel, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipIfDBFeature("supports_expression_indexes") def test_func_index_unsupported(self): # Index is ignored on databases that don't support indexes on # expressions. with connection.schema_editor() as editor: editor.create_model(Author) index = Index(F("name"), name="random_idx") with connection.schema_editor() as editor, self.assertNumQueries(0): self.assertIsNone(editor.add_index(Author, index)) self.assertIsNone(editor.remove_index(Author, index)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_nonexistent_field(self): index = Index(Lower("nonexistent"), name="func_nonexistent_idx") msg = ( "Cannot resolve keyword 'nonexistent' into field. Choices are: " "height, id, name, uuid, weight" ) with self.assertRaisesMessage(FieldError, msg): with connection.schema_editor() as editor: editor.add_index(Author, index) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_nondeterministic(self): with connection.schema_editor() as editor: editor.create_model(Author) index = Index(Random(), name="func_random_idx") with connection.schema_editor() as editor: with self.assertRaises(DatabaseError): editor.add_index(Author, index) def test_primary_key(self): """ Tests altering of the primary key """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the table is there and has the right PK self.assertEqual(self.get_primary_key(Tag._meta.db_table), "id") # Alter to change the PK id_field = Tag._meta.get_field("id") old_field = Tag._meta.get_field("slug") new_field = SlugField(primary_key=True) new_field.set_attributes_from_name("slug") new_field.model = Tag with connection.schema_editor() as editor: editor.remove_field(Tag, id_field) editor.alter_field(Tag, old_field, new_field) # Ensure the PK changed self.assertNotIn( "id", self.get_indexes(Tag._meta.db_table), ) self.assertEqual(self.get_primary_key(Tag._meta.db_table), "slug") def test_alter_primary_key_the_same_name(self): with connection.schema_editor() as editor: editor.create_model(Thing) old_field = Thing._meta.get_field("when") new_field = CharField(max_length=2, primary_key=True) new_field.set_attributes_from_name("when") new_field.model = Thing with connection.schema_editor() as editor: editor.alter_field(Thing, old_field, new_field, strict=True) self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when") with connection.schema_editor() as editor: editor.alter_field(Thing, new_field, old_field, strict=True) self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when") def test_context_manager_exit(self): """ Ensures transaction is correctly closed when an error occurs inside a SchemaEditor context. """ class SomeError(Exception): pass try: with connection.schema_editor(): raise SomeError except SomeError: self.assertFalse(connection.in_atomic_block) @skipIfDBFeature("can_rollback_ddl") def test_unsupported_transactional_ddl_disallowed(self): message = ( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) with atomic(), connection.schema_editor() as editor: with self.assertRaisesMessage(TransactionManagementError, message): editor.execute( editor.sql_create_table % {"table": "foo", "definition": ""} ) @skipUnlessDBFeature("supports_foreign_keys", "indexes_foreign_keys") def test_foreign_key_index_long_names_regression(self): """ Regression test for #21497. Only affects databases that supports foreign keys. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Find the properly shortened column name column_name = connection.ops.quote_name( "author_foreign_key_with_really_long_field_name_id" ) column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase # Ensure the table is there and has an index on the column self.assertIn( column_name, self.get_indexes(BookWithLongName._meta.db_table), ) @skipUnlessDBFeature("supports_foreign_keys") def test_add_foreign_key_long_names(self): """ Regression test for #23009. Only affects databases that supports foreign keys. """ # Create the initial tables with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Add a second FK, this would fail due to long ref name before the fix new_field = ForeignKey( AuthorWithEvenLongerName, CASCADE, related_name="something" ) new_field.set_attributes_from_name( "author_other_really_long_named_i_mean_so_long_fk" ) with connection.schema_editor() as editor: editor.add_field(BookWithLongName, new_field) @isolate_apps("schema") @skipUnlessDBFeature("supports_foreign_keys") def test_add_foreign_key_quoted_db_table(self): class Author(Model): class Meta: db_table = '"table_author_double_quoted"' app_label = "schema" class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) self.isolated_local_models = [Author] if connection.vendor == "mysql": self.assertForeignKeyExists( Book, "author_id", '"table_author_double_quoted"' ) else: self.assertForeignKeyExists(Book, "author_id", "table_author_double_quoted") def test_add_foreign_object(self): with connection.schema_editor() as editor: editor.create_model(BookForeignObj) self.local_models = [BookForeignObj] new_field = ForeignObject( Author, on_delete=CASCADE, from_fields=["author_id"], to_fields=["id"] ) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.add_field(BookForeignObj, new_field) def test_creation_deletion_reserved_names(self): """ Tries creating a model's table, and then deleting it when it has a SQL reserved name. """ # Create the table with connection.schema_editor() as editor: try: editor.create_model(Thing) except OperationalError as e: self.fail( "Errors when applying initial migration for a model " "with a table named after an SQL reserved word: %s" % e ) # The table is there list(Thing.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Thing) # The table is gone with self.assertRaises(DatabaseError): list(Thing.objects.all()) def test_remove_constraints_capital_letters(self): """ #23065 - Constraint names must be quoted if they contain capital letters. """ def get_field(*args, field_class=IntegerField, **kwargs): kwargs["db_column"] = "CamelCase" field = field_class(*args, **kwargs) field.set_attributes_from_name("CamelCase") return field model = Author field = get_field() table = model._meta.db_table column = field.column identifier_converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: editor.create_model(model) editor.add_field(model, field) constraint_name = "CamelCaseIndex" expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_index % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "using": "", "columns": editor.quote_name(column), "extra": "", "condition": "", "include": "", } ) self.assertIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) editor.alter_field(model, get_field(db_index=True), field, strict=True) self.assertNotIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) constraint_name = "CamelCaseUniqConstraint" expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor._create_unique_sql(model, [field], constraint_name)) self.assertIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) editor.alter_field(model, get_field(unique=True), field, strict=True) self.assertNotIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) if editor.sql_create_fk and connection.features.can_introspect_foreign_keys: constraint_name = "CamelCaseFKConstraint" expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_fk % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "column": editor.quote_name(column), "to_table": editor.quote_name(table), "to_column": editor.quote_name(model._meta.auto_field.column), "deferrable": connection.ops.deferrable_sql(), } ) self.assertIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) editor.alter_field( model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True, ) self.assertNotIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) def test_add_field_use_effective_default(self): """ #23987 - effective_default() should be used as the field default when adding a new field. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name="Anonymous1") # Add new CharField to ensure default will be used from effective_default new_field = CharField(max_length=15, blank=True) new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual( item[0], None if connection.features.interprets_empty_strings_as_nulls else "", ) def test_add_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name="Anonymous1") # Add new CharField with a default new_field = CharField(max_length=15, blank=True, default="surname default") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], "surname default") # And that the default is no longer set in the database. field = next( f for f in connection.introspection.get_table_description( cursor, "schema_author" ) if f.name == "surname" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_add_field_default_nullable(self): with connection.schema_editor() as editor: editor.create_model(Author) # Add new nullable CharField with a default. new_field = CharField(max_length=15, blank=True, null=True, default="surname") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) Author.objects.create(name="Anonymous1") with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertIsNone(item[0]) field = next( f for f in connection.introspection.get_table_description( cursor, "schema_author", ) if f.name == "surname" ) # Field is still nullable. self.assertTrue(field.null_ok) # The database default is no longer set. if connection.features.can_introspect_default: self.assertIn(field.default, ["NULL", None]) def test_add_textfield_default_nullable(self): with connection.schema_editor() as editor: editor.create_model(Author) # Add new nullable TextField with a default. new_field = TextField(blank=True, null=True, default="text") new_field.set_attributes_from_name("description") with connection.schema_editor() as editor: editor.add_field(Author, new_field) Author.objects.create(name="Anonymous1") with connection.cursor() as cursor: cursor.execute("SELECT description FROM schema_author;") item = cursor.fetchall()[0] self.assertIsNone(item[0]) field = next( f for f in connection.introspection.get_table_description( cursor, "schema_author", ) if f.name == "description" ) # Field is still nullable. self.assertTrue(field.null_ok) # The database default is no longer set. if connection.features.can_introspect_default: self.assertIn(field.default, ["NULL", None]) def test_alter_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name="Anonymous1") self.assertIsNone(Author.objects.get().height) old_field = Author._meta.get_field("height") # The default from the new field is used in updating existing rows. new_field = IntegerField(blank=True, default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual(Author.objects.get().height, 42) # The database default should be removed. with connection.cursor() as cursor: field = next( f for f in connection.introspection.get_table_description( cursor, "schema_author" ) if f.name == "height" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_alter_field_default_doesnt_perform_queries(self): """ No queries are performed if a field default changes and the field's not changing from null to non-null. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) old_field = AuthorWithDefaultHeight._meta.get_field("height") new_default = old_field.default * 2 new_field = PositiveIntegerField(null=True, blank=True, default=new_default) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field( AuthorWithDefaultHeight, old_field, new_field, strict=True ) @skipUnlessDBFeature("supports_foreign_keys") def test_alter_field_fk_attributes_noop(self): """ No queries are performed when changing field attributes that don't affect the schema. """ with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) old_field = Book._meta.get_field("author") new_field = ForeignKey( Author, blank=True, editable=False, error_messages={"invalid": "error message"}, help_text="help text", limit_choices_to={"limit": "choice"}, on_delete=PROTECT, related_name="related_name", related_query_name="related_query_name", validators=[lambda x: x], verbose_name="verbose name", ) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(Book, old_field, new_field, strict=True) with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(Book, new_field, old_field, strict=True) def test_alter_field_choices_noop(self): with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("name") new_field = CharField( choices=(("Jane", "Jane"), ("Joe", "Joe")), max_length=255, ) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(Author, old_field, new_field, strict=True) with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(Author, new_field, old_field, strict=True) def test_add_textfield_unhashable_default(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name="Anonymous1") # Create a field that has an unhashable default new_field = TextField(default={}) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.add_field(Author, new_field) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_add_indexed_charfield(self): field = CharField(max_length=255, db_index=True) field.set_attributes_from_name("nom_de_plume") with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, "nom_de_plume"), [ "schema_author_nom_de_plume_7570a851", "schema_author_nom_de_plume_7570a851_like", ], ) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_add_unique_charfield(self): field = CharField(max_length=255, unique=True) field.set_attributes_from_name("nom_de_plume") with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, "nom_de_plume"), [ "schema_author_nom_de_plume_7570a851_like", "schema_author_nom_de_plume_key", ], ) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_add_index_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, "name"), []) # Alter to add db_index=True and create 2 indexes. old_field = Author._meta.get_field("name") new_field = CharField(max_length=255, db_index=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, "name"), ["schema_author_name_1fbc5617", "schema_author_name_1fbc5617_like"], ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, "name"), []) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_add_unique_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, "name"), []) # Alter to add unique=True and create 2 indexes. old_field = Author._meta.get_field("name") new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, "name"), ["schema_author_name_1fbc5617_like", "schema_author_name_1fbc5617_uniq"], ) # Remove unique=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, "name"), []) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_add_index_to_textfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Note) self.assertEqual(self.get_constraints_for_column(Note, "info"), []) # Alter to add db_index=True and create 2 indexes. old_field = Note._meta.get_field("info") new_field = TextField(db_index=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Note, "info"), ["schema_note_info_4b0ea695", "schema_note_info_4b0ea695_like"], ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Note, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Note, "info"), []) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_add_unique_to_charfield_with_db_index(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"], ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field("title") new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"], ) # Alter to remove unique=True (should drop unique index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"], ) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_remove_unique_and_db_index_from_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"], ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field("title") new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"], ) # Alter to remove both unique=True and db_index=True (should drop all indexes) new_field2 = CharField(max_length=100) new_field2.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), [] ) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_swap_unique_and_db_index_with_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"], ) # Alter to set unique=True and remove db_index=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field("title") new_field = CharField(max_length=100, unique=True) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"], ) # Alter to set db_index=True and remove unique=True (should restore index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"], ) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_add_db_index_to_charfield_with_unique(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(Tag) self.assertEqual( self.get_constraints_for_column(Tag, "slug"), ["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"], ) # Alter to add db_index=True old_field = Tag._meta.get_field("slug") new_field = SlugField(db_index=True, unique=True) new_field.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, "slug"), ["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"], ) # Alter to remove db_index=True new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, "slug"), ["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"], ) def test_alter_field_add_index_to_integerfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, "weight"), []) # Alter to add db_index=True and create index. old_field = Author._meta.get_field("weight") new_field = IntegerField(null=True, db_index=True) new_field.set_attributes_from_name("weight") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, "weight"), ["schema_author_weight_587740f9"], ) # Remove db_index=True to drop index. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, "weight"), []) def test_alter_pk_with_self_referential_field(self): """ Changing the primary key field name of a model with a self-referential foreign key (#26384). """ with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field("node_id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") with connection.schema_editor() as editor: editor.alter_field(Node, old_field, new_field, strict=True) self.assertForeignKeyExists(Node, "parent_id", Node._meta.db_table) @mock.patch("django.db.backends.base.schema.datetime") @mock.patch("django.db.backends.base.schema.timezone") def test_add_datefield_and_datetimefield_use_effective_default( self, mocked_datetime, mocked_tz ): """ effective_default() should be used for DateField, DateTimeField, and TimeField if auto_now or auto_now_add is set (#25005). """ now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1) now_tz = datetime.datetime( month=1, day=1, year=2000, hour=1, minute=1, tzinfo=datetime.timezone.utc ) mocked_datetime.now = mock.MagicMock(return_value=now) mocked_tz.now = mock.MagicMock(return_value=now_tz) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Check auto_now/auto_now_add attributes are not defined columns = self.column_classes(Author) self.assertNotIn("dob_auto_now", columns) self.assertNotIn("dob_auto_now_add", columns) self.assertNotIn("dtob_auto_now", columns) self.assertNotIn("dtob_auto_now_add", columns) self.assertNotIn("tob_auto_now", columns) self.assertNotIn("tob_auto_now_add", columns) # Create a row Author.objects.create(name="Anonymous1") # Ensure fields were added with the correct defaults dob_auto_now = DateField(auto_now=True) dob_auto_now.set_attributes_from_name("dob_auto_now") self.check_added_field_default( editor, Author, dob_auto_now, "dob_auto_now", now.date(), cast_function=lambda x: x.date(), ) dob_auto_now_add = DateField(auto_now_add=True) dob_auto_now_add.set_attributes_from_name("dob_auto_now_add") self.check_added_field_default( editor, Author, dob_auto_now_add, "dob_auto_now_add", now.date(), cast_function=lambda x: x.date(), ) dtob_auto_now = DateTimeField(auto_now=True) dtob_auto_now.set_attributes_from_name("dtob_auto_now") self.check_added_field_default( editor, Author, dtob_auto_now, "dtob_auto_now", now, ) dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True) dt_tm_of_birth_auto_now_add.set_attributes_from_name("dtob_auto_now_add") self.check_added_field_default( editor, Author, dt_tm_of_birth_auto_now_add, "dtob_auto_now_add", now, ) tob_auto_now = TimeField(auto_now=True) tob_auto_now.set_attributes_from_name("tob_auto_now") self.check_added_field_default( editor, Author, tob_auto_now, "tob_auto_now", now.time(), cast_function=lambda x: x.time(), ) tob_auto_now_add = TimeField(auto_now_add=True) tob_auto_now_add.set_attributes_from_name("tob_auto_now_add") self.check_added_field_default( editor, Author, tob_auto_now_add, "tob_auto_now_add", now.time(), cast_function=lambda x: x.time(), ) def test_namespaced_db_table_create_index_name(self): """ Table names are stripped of their namespace/schema before being used to generate index names. """ with connection.schema_editor() as editor: max_name_length = connection.ops.max_name_length() or 200 namespace = "n" * max_name_length table_name = "t" * max_name_length namespaced_table_name = '"%s"."%s"' % (namespace, table_name) self.assertEqual( editor._create_index_name(table_name, []), editor._create_index_name(namespaced_table_name, []), ) @unittest.skipUnless( connection.vendor == "oracle", "Oracle specific db_table syntax" ) def test_creation_with_db_table_double_quotes(self): oracle_user = connection.creation._test_database_user() class Student(Model): name = CharField(max_length=30) class Meta: app_label = "schema" apps = new_apps db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user class Document(Model): name = CharField(max_length=30) students = ManyToManyField(Student) class Meta: app_label = "schema" apps = new_apps db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user self.isolated_local_models = [Student, Document] with connection.schema_editor() as editor: editor.create_model(Student) editor.create_model(Document) doc = Document.objects.create(name="Test Name") student = Student.objects.create(name="Some man") doc.students.add(student) @isolate_apps("schema") @unittest.skipUnless( connection.vendor == "postgresql", "PostgreSQL specific db_table syntax." ) def test_namespaced_db_table_foreign_key_reference(self): with connection.cursor() as cursor: cursor.execute("CREATE SCHEMA django_schema_tests") def delete_schema(): with connection.cursor() as cursor: cursor.execute("DROP SCHEMA django_schema_tests CASCADE") self.addCleanup(delete_schema) class Author(Model): class Meta: app_label = "schema" class Book(Model): class Meta: app_label = "schema" db_table = '"django_schema_tests"."schema_book"' author = ForeignKey(Author, CASCADE) author.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) editor.add_field(Book, author) def test_rename_table_renames_deferred_sql_references(self): atomic_rename = connection.features.supports_atomic_references_rename with connection.schema_editor(atomic=atomic_rename) as editor: editor.create_model(Author) editor.create_model(Book) editor.alter_db_table(Author, "schema_author", "schema_renamed_author") editor.alter_db_table(Author, "schema_book", "schema_renamed_book") try: self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_table("schema_author"), False) self.assertIs(statement.references_table("schema_book"), False) finally: editor.alter_db_table(Author, "schema_renamed_author", "schema_author") editor.alter_db_table(Author, "schema_renamed_book", "schema_book") def test_rename_column_renames_deferred_sql_references(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) old_title = Book._meta.get_field("title") new_title = CharField(max_length=100, db_index=True) new_title.set_attributes_from_name("renamed_title") editor.alter_field(Book, old_title, new_title) old_author = Book._meta.get_field("author") new_author = ForeignKey(Author, CASCADE) new_author.set_attributes_from_name("renamed_author") editor.alter_field(Book, old_author, new_author) self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_column("book", "title"), False) self.assertIs(statement.references_column("book", "author_id"), False) @isolate_apps("schema") def test_referenced_field_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the field they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = "schema" class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False) class Meta: app_label = "schema" self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name("renamed") with connection.schema_editor(atomic=True) as editor: editor.alter_field(Foo, Foo._meta.get_field("field"), new_field) @isolate_apps("schema") def test_referenced_table_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the table they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = "schema" class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False) class Meta: app_label = "schema" self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name("renamed") with connection.schema_editor(atomic=True) as editor: editor.alter_db_table(Foo, Foo._meta.db_table, "renamed_table") Foo._meta.db_table = "renamed_table" @isolate_apps("schema") @skipUnlessDBFeature("supports_collation_on_charfield") def test_db_collation_charfield(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") class Foo(Model): field = CharField(max_length=255, db_collation=collation) class Meta: app_label = "schema" self.isolated_local_models = [Foo] with connection.schema_editor() as editor: editor.create_model(Foo) self.assertEqual( self.get_column_collation(Foo._meta.db_table, "field"), collation, ) @isolate_apps("schema") @skipUnlessDBFeature("supports_collation_on_textfield") def test_db_collation_textfield(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") class Foo(Model): field = TextField(db_collation=collation) class Meta: app_label = "schema" self.isolated_local_models = [Foo] with connection.schema_editor() as editor: editor.create_model(Foo) self.assertEqual( self.get_column_collation(Foo._meta.db_table, "field"), collation, ) @skipUnlessDBFeature("supports_collation_on_charfield") def test_add_field_db_collation(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") with connection.schema_editor() as editor: editor.create_model(Author) new_field = CharField(max_length=255, db_collation=collation) new_field.set_attributes_from_name("alias") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) self.assertEqual( columns["alias"][0], connection.features.introspected_field_types["CharField"], ) self.assertEqual(columns["alias"][1][8], collation) @skipUnlessDBFeature("supports_collation_on_charfield") def test_alter_field_db_collation(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("name") new_field = CharField(max_length=255, db_collation=collation) new_field.set_attributes_from_name("name") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_column_collation(Author._meta.db_table, "name"), collation, ) with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertIsNone(self.get_column_collation(Author._meta.db_table, "name")) @skipUnlessDBFeature("supports_collation_on_charfield") def test_alter_primary_key_db_collation(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") with connection.schema_editor() as editor: editor.create_model(Thing) old_field = Thing._meta.get_field("when") new_field = CharField(max_length=1, db_collation=collation, primary_key=True) new_field.set_attributes_from_name("when") new_field.model = Thing with connection.schema_editor() as editor: editor.alter_field(Thing, old_field, new_field, strict=True) self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when") self.assertEqual( self.get_column_collation(Thing._meta.db_table, "when"), collation, ) with connection.schema_editor() as editor: editor.alter_field(Thing, new_field, old_field, strict=True) self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when") self.assertIsNone(self.get_column_collation(Thing._meta.db_table, "when")) @skipUnlessDBFeature( "supports_collation_on_charfield", "supports_collation_on_textfield" ) def test_alter_field_type_and_db_collation(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("info") new_field = CharField(max_length=255, db_collation=collation) new_field.set_attributes_from_name("info") new_field.model = Note with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual( columns["info"][0], connection.features.introspected_field_types["CharField"], ) self.assertEqual(columns["info"][1][8], collation) with connection.schema_editor() as editor: editor.alter_field(Note, new_field, old_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns["info"][0], "TextField") self.assertIsNone(columns["info"][1][8]) @skipUnlessDBFeature( "supports_collation_on_charfield", "supports_non_deterministic_collations", ) def test_ci_cs_db_collation(self): cs_collation = connection.features.test_collations.get("cs") ci_collation = connection.features.test_collations.get("ci") try: if connection.vendor == "mysql": cs_collation = "latin1_general_cs" elif connection.vendor == "postgresql": cs_collation = "en-x-icu" with connection.cursor() as cursor: cursor.execute( "CREATE COLLATION IF NOT EXISTS case_insensitive " "(provider = icu, locale = 'und-u-ks-level2', " "deterministic = false)" ) ci_collation = "case_insensitive" # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) # Case-insensitive collation. old_field = Author._meta.get_field("name") new_field_ci = CharField(max_length=255, db_collation=ci_collation) new_field_ci.set_attributes_from_name("name") new_field_ci.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field_ci, strict=True) Author.objects.create(name="ANDREW") self.assertIs(Author.objects.filter(name="Andrew").exists(), True) # Case-sensitive collation. new_field_cs = CharField(max_length=255, db_collation=cs_collation) new_field_cs.set_attributes_from_name("name") new_field_cs.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, new_field_ci, new_field_cs, strict=True) self.assertIs(Author.objects.filter(name="Andrew").exists(), False) finally: if connection.vendor == "postgresql": with connection.cursor() as cursor: cursor.execute("DROP COLLATION IF EXISTS case_insensitive")
93fd81fe482ac6d5a77df5ebf6454a757351d8be42f7bff638031bcc96f3d5ce
from django.contrib.admin import ModelAdmin, TabularInline from django.contrib.admin.helpers import InlineAdminForm from django.contrib.admin.tests import AdminSeleniumTestCase from django.contrib.auth.models import Permission, User from django.contrib.contenttypes.models import ContentType from django.test import RequestFactory, TestCase, override_settings from django.urls import reverse from .admin import InnerInline from .admin import site as admin_site from .models import ( Author, BinaryTree, Book, BothVerboseNameProfile, Chapter, Child, ChildModel1, ChildModel2, Fashionista, FootNote, Holder, Holder2, Holder3, Holder4, Inner, Inner2, Inner3, Inner4Stacked, Inner4Tabular, Novel, OutfitItem, Parent, ParentModelWithCustomPk, Person, Poll, Profile, ProfileCollection, Question, ShowInlineParent, Sighting, SomeChildModel, SomeParentModel, Teacher, VerboseNamePluralProfile, VerboseNameProfile, ) INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>' class TestDataMixin: @classmethod def setUpTestData(cls): cls.superuser = User.objects.create_superuser( username="super", email="[email protected]", password="secret" ) @override_settings(ROOT_URLCONF="admin_inlines.urls") class TestInline(TestDataMixin, TestCase): factory = RequestFactory() @classmethod def setUpTestData(cls): super().setUpTestData() cls.holder = Holder.objects.create(dummy=13) Inner.objects.create(dummy=42, holder=cls.holder) cls.parent = SomeParentModel.objects.create(name="a") SomeChildModel.objects.create(name="b", position="0", parent=cls.parent) SomeChildModel.objects.create(name="c", position="1", parent=cls.parent) cls.view_only_user = User.objects.create_user( username="user", password="pwd", is_staff=True, ) parent_ct = ContentType.objects.get_for_model(SomeParentModel) child_ct = ContentType.objects.get_for_model(SomeChildModel) permission = Permission.objects.get( codename="view_someparentmodel", content_type=parent_ct, ) cls.view_only_user.user_permissions.add(permission) permission = Permission.objects.get( codename="view_somechildmodel", content_type=child_ct, ) cls.view_only_user.user_permissions.add(permission) def setUp(self): self.client.force_login(self.superuser) def test_can_delete(self): """ can_delete should be passed to inlineformset factory. """ response = self.client.get( reverse("admin:admin_inlines_holder_change", args=(self.holder.id,)) ) inner_formset = response.context["inline_admin_formsets"][0].formset expected = InnerInline.can_delete actual = inner_formset.can_delete self.assertEqual(expected, actual, "can_delete must be equal") def test_readonly_stacked_inline_label(self): """Bug #13174.""" holder = Holder.objects.create(dummy=42) Inner.objects.create(holder=holder, dummy=42, readonly="") response = self.client.get( reverse("admin:admin_inlines_holder_change", args=(holder.id,)) ) self.assertContains(response, "<label>Inner readonly label:</label>") def test_many_to_many_inlines(self): "Autogenerated many-to-many inlines are displayed correctly (#13407)" response = self.client.get(reverse("admin:admin_inlines_author_add")) # The heading for the m2m inline block uses the right text self.assertContains(response, "<h2>Author-book relationships</h2>") # The "add another" label is correct self.assertContains(response, "Add another Author-book relationship") # The '+' is dropped from the autogenerated form prefix (Author_books+) self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_primary(self): person = Person.objects.create(firstname="Imelda") item = OutfitItem.objects.create(name="Shoes") # Imelda likes shoes, but can't carry her own bags. data = { "shoppingweakness_set-TOTAL_FORMS": 1, "shoppingweakness_set-INITIAL_FORMS": 0, "shoppingweakness_set-MAX_NUM_FORMS": 0, "_save": "Save", "person": person.id, "max_weight": 0, "shoppingweakness_set-0-item": item.id, } response = self.client.post( reverse("admin:admin_inlines_fashionista_add"), data ) self.assertEqual(response.status_code, 302) self.assertEqual(len(Fashionista.objects.filter(person__firstname="Imelda")), 1) def test_tabular_inline_column_css_class(self): """ Field names are included in the context to output a field-specific CSS class name in the column headers. """ response = self.client.get(reverse("admin:admin_inlines_poll_add")) text_field, call_me_field = list( response.context["inline_admin_formset"].fields() ) # Editable field. self.assertEqual(text_field["name"], "text") self.assertContains(response, '<th class="column-text required">') # Read-only field. self.assertEqual(call_me_field["name"], "call_me") self.assertContains(response, '<th class="column-call_me">') def test_custom_form_tabular_inline_label(self): """ A model form with a form field specified (TitleForm.title1) should have its label rendered in the tabular inline. """ response = self.client.get(reverse("admin:admin_inlines_titlecollection_add")) self.assertContains( response, '<th class="column-title1 required">Title1</th>', html=True ) def test_custom_form_tabular_inline_extra_field_label(self): response = self.client.get(reverse("admin:admin_inlines_outfititem_add")) _, extra_field = list(response.context["inline_admin_formset"].fields()) self.assertEqual(extra_field["label"], "Extra field") def test_non_editable_custom_form_tabular_inline_extra_field_label(self): response = self.client.get(reverse("admin:admin_inlines_chapter_add")) _, extra_field = list(response.context["inline_admin_formset"].fields()) self.assertEqual(extra_field["label"], "Extra field") def test_custom_form_tabular_inline_overridden_label(self): """ SomeChildModelForm.__init__() overrides the label of a form field. That label is displayed in the TabularInline. """ response = self.client.get(reverse("admin:admin_inlines_someparentmodel_add")) field = list(response.context["inline_admin_formset"].fields())[0] self.assertEqual(field["label"], "new label") self.assertContains( response, '<th class="column-name required">New label</th>', html=True ) def test_tabular_non_field_errors(self): """ non_field_errors are displayed correctly, including the correct value for colspan. """ data = { "title_set-TOTAL_FORMS": 1, "title_set-INITIAL_FORMS": 0, "title_set-MAX_NUM_FORMS": 0, "_save": "Save", "title_set-0-title1": "a title", "title_set-0-title2": "a different title", } response = self.client.post( reverse("admin:admin_inlines_titlecollection_add"), data ) # Here colspan is "4": two fields (title1 and title2), one hidden field # and the delete checkbox. self.assertContains( response, '<tr class="row-form-errors"><td colspan="4">' '<ul class="errorlist nonfield">' "<li>The two titles must be the same</li></ul></td></tr>", ) def test_no_parent_callable_lookup(self): """Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable""" # Identically named callable isn't present in the parent ModelAdmin, # rendering of the add view shouldn't explode response = self.client.get(reverse("admin:admin_inlines_novel_add")) # View should have the child inlines section self.assertContains( response, '<div class="js-inline-admin-formset inline-group" id="chapter_set-group"', ) def test_callable_lookup(self): """ Admin inline should invoke local callable when its name is listed in readonly_fields. """ response = self.client.get(reverse("admin:admin_inlines_poll_add")) # Add parent object view should have the child inlines section self.assertContains( response, '<div class="js-inline-admin-formset inline-group" id="question_set-group"', ) # The right callable should be used for the inline readonly_fields # column cells self.assertContains(response, "<p>Callable in QuestionInline</p>") def test_model_error_inline_with_readonly_field(self): poll = Poll.objects.create(name="Test poll") data = { "question_set-TOTAL_FORMS": 1, "question_set-INITIAL_FORMS": 0, "question_set-MAX_NUM_FORMS": 0, "_save": "Save", "question_set-0-text": "Question", "question_set-0-poll": poll.pk, } response = self.client.post( reverse("admin:admin_inlines_poll_change", args=(poll.pk,)), data, ) self.assertContains(response, "Always invalid model.") def test_help_text(self): """ The inlines' model field help texts are displayed when using both the stacked and tabular layouts. """ response = self.client.get(reverse("admin:admin_inlines_holder4_add")) self.assertContains(response, "Awesome stacked help text is awesome.", 4) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Awesome tabular help text is awesome.)" ' 'title="Awesome tabular help text is awesome.">', 1, ) # ReadOnly fields response = self.client.get(reverse("admin:admin_inlines_capofamiglia_add")) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Help text for ReadOnlyInline)" ' 'title="Help text for ReadOnlyInline">', 1, ) def test_tabular_model_form_meta_readonly_field(self): """ Tabular inlines use ModelForm.Meta.help_texts and labels for read-only fields. """ response = self.client.get(reverse("admin:admin_inlines_someparentmodel_add")) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Help text from ModelForm.Meta)" ' 'title="Help text from ModelForm.Meta">', ) self.assertContains(response, "Label from ModelForm.Meta") def test_inline_hidden_field_no_column(self): """#18263 -- Make sure hidden fields don't get a column in tabular inlines""" parent = SomeParentModel.objects.create(name="a") SomeChildModel.objects.create(name="b", position="0", parent=parent) SomeChildModel.objects.create(name="c", position="1", parent=parent) response = self.client.get( reverse("admin:admin_inlines_someparentmodel_change", args=(parent.pk,)) ) self.assertNotContains(response, '<td class="field-position">') self.assertInHTML( '<input id="id_somechildmodel_set-1-position" ' 'name="somechildmodel_set-1-position" type="hidden" value="1">', response.rendered_content, ) def test_tabular_inline_hidden_field_with_view_only_permissions(self): """ Content of hidden field is not visible in tabular inline when user has view-only permission. """ self.client.force_login(self.view_only_user) url = reverse( "tabular_inline_hidden_field_admin:admin_inlines_someparentmodel_change", args=(self.parent.pk,), ) response = self.client.get(url) self.assertInHTML( '<th class="column-position hidden">Position</th>', response.rendered_content, ) self.assertInHTML( '<td class="field-position hidden"><p>0</p></td>', response.rendered_content ) self.assertInHTML( '<td class="field-position hidden"><p>1</p></td>', response.rendered_content ) def test_stacked_inline_hidden_field_with_view_only_permissions(self): """ Content of hidden field is not visible in stacked inline when user has view-only permission. """ self.client.force_login(self.view_only_user) url = reverse( "stacked_inline_hidden_field_in_group_admin:" "admin_inlines_someparentmodel_change", args=(self.parent.pk,), ) response = self.client.get(url) # The whole line containing name + position fields is not hidden. self.assertContains( response, '<div class="form-row field-name field-position">' ) # The div containing the position field is hidden. self.assertInHTML( '<div class="fieldBox field-position hidden">' '<label class="inline">Position:</label>' '<div class="readonly">0</div></div>', response.rendered_content, ) self.assertInHTML( '<div class="fieldBox field-position hidden">' '<label class="inline">Position:</label>' '<div class="readonly">1</div></div>', response.rendered_content, ) def test_stacked_inline_single_hidden_field_in_line_with_view_only_permissions( self, ): """ Content of hidden field is not visible in stacked inline when user has view-only permission and the field is grouped on a separate line. """ self.client.force_login(self.view_only_user) url = reverse( "stacked_inline_hidden_field_on_single_line_admin:" "admin_inlines_someparentmodel_change", args=(self.parent.pk,), ) response = self.client.get(url) # The whole line containing position field is hidden. self.assertInHTML( '<div class="form-row hidden field-position">' "<div><label>Position:</label>" '<div class="readonly">0</div></div></div>', response.rendered_content, ) self.assertInHTML( '<div class="form-row hidden field-position">' "<div><label>Position:</label>" '<div class="readonly">1</div></div></div>', response.rendered_content, ) def test_tabular_inline_with_hidden_field_non_field_errors_has_correct_colspan( self, ): """ In tabular inlines, when a form has non-field errors, those errors are rendered in a table line with a single cell spanning the whole table width. Colspan must be equal to the number of visible columns. """ parent = SomeParentModel.objects.create(name="a") child = SomeChildModel.objects.create(name="b", position="0", parent=parent) url = reverse( "tabular_inline_hidden_field_admin:admin_inlines_someparentmodel_change", args=(parent.id,), ) data = { "name": parent.name, "somechildmodel_set-TOTAL_FORMS": 1, "somechildmodel_set-INITIAL_FORMS": 1, "somechildmodel_set-MIN_NUM_FORMS": 0, "somechildmodel_set-MAX_NUM_FORMS": 1000, "_save": "Save", "somechildmodel_set-0-id": child.id, "somechildmodel_set-0-parent": parent.id, "somechildmodel_set-0-name": child.name, "somechildmodel_set-0-position": 1, } response = self.client.post(url, data) # Form has 3 visible columns and 1 hidden column. self.assertInHTML( '<thead><tr><th class="original"></th>' '<th class="column-name required">Name</th>' '<th class="column-position required hidden">Position</th>' "<th>Delete?</th></tr></thead>", response.rendered_content, ) # The non-field error must be spanned on 3 (visible) columns. self.assertInHTML( '<tr class="row-form-errors"><td colspan="3">' '<ul class="errorlist nonfield"><li>A non-field error</li></ul></td></tr>', response.rendered_content, ) def test_non_related_name_inline(self): """ Multiple inlines with related_name='+' have correct form prefixes. """ response = self.client.get(reverse("admin:admin_inlines_capofamiglia_add")) self.assertContains( response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id">', html=True ) self.assertContains( response, '<input type="hidden" name="-1-0-capo_famiglia" ' 'id="id_-1-0-capo_famiglia">', html=True, ) self.assertContains( response, '<input id="id_-1-0-name" type="text" class="vTextField" name="-1-0-name" ' 'maxlength="100">', html=True, ) self.assertContains( response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id">', html=True ) self.assertContains( response, '<input type="hidden" name="-2-0-capo_famiglia" ' 'id="id_-2-0-capo_famiglia">', html=True, ) self.assertContains( response, '<input id="id_-2-0-name" type="text" class="vTextField" name="-2-0-name" ' 'maxlength="100">', html=True, ) @override_settings(USE_THOUSAND_SEPARATOR=True) def test_localize_pk_shortcut(self): """ The "View on Site" link is correct for locales that use thousand separators. """ holder = Holder.objects.create(pk=123456789, dummy=42) inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly="") response = self.client.get( reverse("admin:admin_inlines_holder_change", args=(holder.id,)) ) inner_shortcut = "r/%s/%s/" % ( ContentType.objects.get_for_model(inner).pk, inner.pk, ) self.assertContains(response, inner_shortcut) def test_custom_pk_shortcut(self): """ The "View on Site" link is correct for models with a custom primary key field. """ parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo") child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent) child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent) response = self.client.get( reverse("admin:admin_inlines_parentmodelwithcustompk_change", args=("foo",)) ) child1_shortcut = "r/%s/%s/" % ( ContentType.objects.get_for_model(child1).pk, child1.pk, ) child2_shortcut = "r/%s/%s/" % ( ContentType.objects.get_for_model(child2).pk, child2.pk, ) self.assertContains(response, child1_shortcut) self.assertContains(response, child2_shortcut) def test_create_inlines_on_inherited_model(self): """ An object can be created with inlines when it inherits another class. """ data = { "name": "Martian", "sighting_set-TOTAL_FORMS": 1, "sighting_set-INITIAL_FORMS": 0, "sighting_set-MAX_NUM_FORMS": 0, "sighting_set-0-place": "Zone 51", "_save": "Save", } response = self.client.post( reverse("admin:admin_inlines_extraterrestrial_add"), data ) self.assertEqual(response.status_code, 302) self.assertEqual(Sighting.objects.filter(et__name="Martian").count(), 1) def test_custom_get_extra_form(self): bt_head = BinaryTree.objects.create(name="Tree Head") BinaryTree.objects.create(name="First Child", parent=bt_head) # The maximum number of forms should respect 'get_max_num' on the # ModelAdmin max_forms_input = ( '<input id="id_binarytree_set-MAX_NUM_FORMS" ' 'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d">' ) # The total number of forms will remain the same in either case total_forms_hidden = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2">' ) response = self.client.get(reverse("admin:admin_inlines_binarytree_add")) self.assertInHTML(max_forms_input % 3, response.rendered_content) self.assertInHTML(total_forms_hidden, response.rendered_content) response = self.client.get( reverse("admin:admin_inlines_binarytree_change", args=(bt_head.id,)) ) self.assertInHTML(max_forms_input % 2, response.rendered_content) self.assertInHTML(total_forms_hidden, response.rendered_content) def test_min_num(self): """ min_num and extra determine number of forms. """ class MinNumInline(TabularInline): model = BinaryTree min_num = 2 extra = 3 modeladmin = ModelAdmin(BinaryTree, admin_site) modeladmin.inlines = [MinNumInline] min_forms = ( '<input id="id_binarytree_set-MIN_NUM_FORMS" ' 'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2">' ) total_forms = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5">' ) request = self.factory.get(reverse("admin:admin_inlines_binarytree_add")) request.user = User(username="super", is_superuser=True) response = modeladmin.changeform_view(request) self.assertInHTML(min_forms, response.rendered_content) self.assertInHTML(total_forms, response.rendered_content) def test_custom_min_num(self): bt_head = BinaryTree.objects.create(name="Tree Head") BinaryTree.objects.create(name="First Child", parent=bt_head) class MinNumInline(TabularInline): model = BinaryTree extra = 3 def get_min_num(self, request, obj=None, **kwargs): if obj: return 5 return 2 modeladmin = ModelAdmin(BinaryTree, admin_site) modeladmin.inlines = [MinNumInline] min_forms = ( '<input id="id_binarytree_set-MIN_NUM_FORMS" ' 'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d">' ) total_forms = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d">' ) request = self.factory.get(reverse("admin:admin_inlines_binarytree_add")) request.user = User(username="super", is_superuser=True) response = modeladmin.changeform_view(request) self.assertInHTML(min_forms % 2, response.rendered_content) self.assertInHTML(total_forms % 5, response.rendered_content) request = self.factory.get( reverse("admin:admin_inlines_binarytree_change", args=(bt_head.id,)) ) request.user = User(username="super", is_superuser=True) response = modeladmin.changeform_view(request, object_id=str(bt_head.id)) self.assertInHTML(min_forms % 5, response.rendered_content) self.assertInHTML(total_forms % 8, response.rendered_content) def test_inline_nonauto_noneditable_pk(self): response = self.client.get(reverse("admin:admin_inlines_author_add")) self.assertContains( response, '<input id="id_nonautopkbook_set-0-rand_pk" ' 'name="nonautopkbook_set-0-rand_pk" type="hidden">', html=True, ) self.assertContains( response, '<input id="id_nonautopkbook_set-2-0-rand_pk" ' 'name="nonautopkbook_set-2-0-rand_pk" type="hidden">', html=True, ) def test_inline_nonauto_noneditable_inherited_pk(self): response = self.client.get(reverse("admin:admin_inlines_author_add")) self.assertContains( response, '<input id="id_nonautopkbookchild_set-0-nonautopkbook_ptr" ' 'name="nonautopkbookchild_set-0-nonautopkbook_ptr" type="hidden">', html=True, ) self.assertContains( response, '<input id="id_nonautopkbookchild_set-2-nonautopkbook_ptr" ' 'name="nonautopkbookchild_set-2-nonautopkbook_ptr" type="hidden">', html=True, ) def test_inline_editable_pk(self): response = self.client.get(reverse("admin:admin_inlines_author_add")) self.assertContains( response, '<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" ' 'name="editablepkbook_set-0-manual_pk" type="number">', html=True, count=1, ) self.assertContains( response, '<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" ' 'name="editablepkbook_set-2-0-manual_pk" type="number">', html=True, count=1, ) def test_stacked_inline_edit_form_contains_has_original_class(self): holder = Holder.objects.create(dummy=1) holder.inner_set.create(dummy=1) response = self.client.get( reverse("admin:admin_inlines_holder_change", args=(holder.pk,)) ) self.assertContains( response, '<div class="inline-related has_original" id="inner_set-0">', count=1, ) self.assertContains( response, '<div class="inline-related" id="inner_set-1">', count=1 ) def test_inlines_show_change_link_registered(self): "Inlines `show_change_link` for registered models when enabled." holder = Holder4.objects.create(dummy=1) item1 = Inner4Stacked.objects.create(dummy=1, holder=holder) item2 = Inner4Tabular.objects.create(dummy=1, holder=holder) items = ( ("inner4stacked", item1.pk), ("inner4tabular", item2.pk), ) response = self.client.get( reverse("admin:admin_inlines_holder4_change", args=(holder.pk,)) ) self.assertTrue( response.context["inline_admin_formset"].opts.has_registered_model ) for model, pk in items: url = reverse("admin:admin_inlines_%s_change" % model, args=(pk,)) self.assertContains( response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML) ) def test_inlines_show_change_link_unregistered(self): "Inlines `show_change_link` disabled for unregistered models." parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo") ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent) ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent) response = self.client.get( reverse("admin:admin_inlines_parentmodelwithcustompk_change", args=("foo",)) ) self.assertFalse( response.context["inline_admin_formset"].opts.has_registered_model ) self.assertNotContains(response, INLINE_CHANGELINK_HTML) def test_tabular_inline_show_change_link_false_registered(self): "Inlines `show_change_link` disabled by default." poll = Poll.objects.create(name="New poll") Question.objects.create(poll=poll) response = self.client.get( reverse("admin:admin_inlines_poll_change", args=(poll.pk,)) ) self.assertTrue( response.context["inline_admin_formset"].opts.has_registered_model ) self.assertNotContains(response, INLINE_CHANGELINK_HTML) def test_noneditable_inline_has_field_inputs(self): """Inlines without change permission shows field inputs on add form.""" response = self.client.get( reverse("admin:admin_inlines_novelreadonlychapter_add") ) self.assertContains( response, '<input type="text" name="chapter_set-0-name" ' 'class="vTextField" maxlength="40" id="id_chapter_set-0-name">', html=True, ) def test_inlines_plural_heading_foreign_key(self): response = self.client.get(reverse("admin:admin_inlines_holder4_add")) self.assertContains(response, "<h2>Inner4 stackeds</h2>", html=True) self.assertContains(response, "<h2>Inner4 tabulars</h2>", html=True) def test_inlines_singular_heading_one_to_one(self): response = self.client.get(reverse("admin:admin_inlines_person_add")) self.assertContains(response, "<h2>Author</h2>", html=True) # Tabular. self.assertContains(response, "<h2>Fashionista</h2>", html=True) # Stacked. def test_inlines_based_on_model_state(self): parent = ShowInlineParent.objects.create(show_inlines=False) data = { "show_inlines": "on", "_save": "Save", } change_url = reverse( "admin:admin_inlines_showinlineparent_change", args=(parent.id,), ) response = self.client.post(change_url, data) self.assertEqual(response.status_code, 302) parent.refresh_from_db() self.assertIs(parent.show_inlines, True) @override_settings(ROOT_URLCONF="admin_inlines.urls") class TestInlineMedia(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) def test_inline_media_only_base(self): holder = Holder(dummy=13) holder.save() Inner(dummy=42, holder=holder).save() change_url = reverse("admin:admin_inlines_holder_change", args=(holder.id,)) response = self.client.get(change_url) self.assertContains(response, "my_awesome_admin_scripts.js") def test_inline_media_only_inline(self): holder = Holder3(dummy=13) holder.save() Inner3(dummy=42, holder=holder).save() change_url = reverse("admin:admin_inlines_holder3_change", args=(holder.id,)) response = self.client.get(change_url) self.assertEqual( response.context["inline_admin_formsets"][0].media._js, [ "admin/js/vendor/jquery/jquery.min.js", "my_awesome_inline_scripts.js", "custom_number.js", "admin/js/jquery.init.js", "admin/js/inlines.js", ], ) self.assertContains(response, "my_awesome_inline_scripts.js") def test_all_inline_media(self): holder = Holder2(dummy=13) holder.save() Inner2(dummy=42, holder=holder).save() change_url = reverse("admin:admin_inlines_holder2_change", args=(holder.id,)) response = self.client.get(change_url) self.assertContains(response, "my_awesome_admin_scripts.js") self.assertContains(response, "my_awesome_inline_scripts.js") @override_settings(ROOT_URLCONF="admin_inlines.urls") class TestInlineAdminForm(TestCase): def test_immutable_content_type(self): """Regression for #9362 The problem depends only on InlineAdminForm and its "original" argument, so we can safely set the other arguments to None/{}. We just need to check that the content_type argument of Child isn't altered by the internals of the inline form.""" sally = Teacher.objects.create(name="Sally") john = Parent.objects.create(name="John") joe = Child.objects.create(name="Joe", teacher=sally, parent=john) iaf = InlineAdminForm(None, None, {}, {}, joe) parent_ct = ContentType.objects.get_for_model(Parent) self.assertEqual(iaf.original.content_type, parent_ct) @override_settings(ROOT_URLCONF="admin_inlines.urls") class TestInlineProtectedOnDelete(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) def test_deleting_inline_with_protected_delete_does_not_validate(self): lotr = Novel.objects.create(name="Lord of the rings") chapter = Chapter.objects.create(novel=lotr, name="Many Meetings") foot_note = FootNote.objects.create(chapter=chapter, note="yadda yadda") change_url = reverse("admin:admin_inlines_novel_change", args=(lotr.id,)) response = self.client.get(change_url) data = { "name": lotr.name, "chapter_set-TOTAL_FORMS": 1, "chapter_set-INITIAL_FORMS": 1, "chapter_set-MAX_NUM_FORMS": 1000, "_save": "Save", "chapter_set-0-id": chapter.id, "chapter_set-0-name": chapter.name, "chapter_set-0-novel": lotr.id, "chapter_set-0-DELETE": "on", } response = self.client.post(change_url, data) self.assertContains( response, "Deleting chapter %s would require deleting " "the following protected related objects: foot note %s" % (chapter, foot_note), ) @override_settings(ROOT_URLCONF="admin_inlines.urls") class TestInlinePermissions(TestCase): """ Make sure the admin respects permissions for objects that are edited inline. Refs #8060. """ @classmethod def setUpTestData(cls): cls.user = User(username="admin", is_staff=True, is_active=True) cls.user.set_password("secret") cls.user.save() cls.author_ct = ContentType.objects.get_for_model(Author) cls.holder_ct = ContentType.objects.get_for_model(Holder2) cls.book_ct = ContentType.objects.get_for_model(Book) cls.inner_ct = ContentType.objects.get_for_model(Inner2) # User always has permissions to add and change Authors, and Holders, # the main (parent) models of the inlines. Permissions on the inlines # vary per test. permission = Permission.objects.get( codename="add_author", content_type=cls.author_ct ) cls.user.user_permissions.add(permission) permission = Permission.objects.get( codename="change_author", content_type=cls.author_ct ) cls.user.user_permissions.add(permission) permission = Permission.objects.get( codename="add_holder2", content_type=cls.holder_ct ) cls.user.user_permissions.add(permission) permission = Permission.objects.get( codename="change_holder2", content_type=cls.holder_ct ) cls.user.user_permissions.add(permission) author = Author.objects.create(pk=1, name="The Author") cls.book = author.books.create(name="The inline Book") cls.author_change_url = reverse( "admin:admin_inlines_author_change", args=(author.id,) ) # Get the ID of the automatically created intermediate model for the # Author-Book m2m. author_book_auto_m2m_intermediate = Author.books.through.objects.get( author=author, book=cls.book ) cls.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk cls.holder = Holder2.objects.create(dummy=13) cls.inner2 = Inner2.objects.create(dummy=42, holder=cls.holder) def setUp(self): self.holder_change_url = reverse( "admin:admin_inlines_holder2_change", args=(self.holder.id,) ) self.client.force_login(self.user) def test_inline_add_m2m_noperm(self): response = self.client.get(reverse("admin:admin_inlines_author_add")) # No change permission on books, so no inline self.assertNotContains(response, "<h2>Author-book relationships</h2>") self.assertNotContains(response, "Add another Author-Book Relationship") self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_add_fk_noperm(self): response = self.client.get(reverse("admin:admin_inlines_holder2_add")) # No permissions on Inner2s, so no inline self.assertNotContains(response, "<h2>Inner2s</h2>") self.assertNotContains(response, "Add another Inner2") self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"') def test_inline_change_m2m_noperm(self): response = self.client.get(self.author_change_url) # No change permission on books, so no inline self.assertNotContains(response, "<h2>Author-book relationships</h2>") self.assertNotContains(response, "Add another Author-Book Relationship") self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_change_fk_noperm(self): response = self.client.get(self.holder_change_url) # No permissions on Inner2s, so no inline self.assertNotContains(response, "<h2>Inner2s</h2>") self.assertNotContains(response, "Add another Inner2") self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"') def test_inline_add_m2m_view_only_perm(self): permission = Permission.objects.get( codename="view_book", content_type=self.book_ct ) self.user.user_permissions.add(permission) response = self.client.get(reverse("admin:admin_inlines_author_add")) # View-only inlines. (It could be nicer to hide the empty, non-editable # inlines on the add page.) self.assertIs( response.context["inline_admin_formset"].has_view_permission, True ) self.assertIs( response.context["inline_admin_formset"].has_add_permission, False ) self.assertIs( response.context["inline_admin_formset"].has_change_permission, False ) self.assertIs( response.context["inline_admin_formset"].has_delete_permission, False ) self.assertContains(response, "<h2>Author-book relationships</h2>") self.assertContains( response, '<input type="hidden" name="Author_books-TOTAL_FORMS" value="0" ' 'id="id_Author_books-TOTAL_FORMS">', html=True, ) self.assertNotContains(response, "Add another Author-Book Relationship") def test_inline_add_m2m_add_perm(self): permission = Permission.objects.get( codename="add_book", content_type=self.book_ct ) self.user.user_permissions.add(permission) response = self.client.get(reverse("admin:admin_inlines_author_add")) # No change permission on Books, so no inline self.assertNotContains(response, "<h2>Author-book relationships</h2>") self.assertNotContains(response, "Add another Author-Book Relationship") self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_add_fk_add_perm(self): permission = Permission.objects.get( codename="add_inner2", content_type=self.inner_ct ) self.user.user_permissions.add(permission) response = self.client.get(reverse("admin:admin_inlines_holder2_add")) # Add permission on inner2s, so we get the inline self.assertContains(response, "<h2>Inner2s</h2>") self.assertContains(response, "Add another Inner2") self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" ' 'value="3" name="inner2_set-TOTAL_FORMS">', html=True, ) def test_inline_change_m2m_add_perm(self): permission = Permission.objects.get( codename="add_book", content_type=self.book_ct ) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # No change permission on books, so no inline self.assertNotContains(response, "<h2>Author-book relationships</h2>") self.assertNotContains(response, "Add another Author-Book Relationship") self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') self.assertNotContains(response, 'id="id_Author_books-0-DELETE"') def test_inline_change_m2m_view_only_perm(self): permission = Permission.objects.get( codename="view_book", content_type=self.book_ct ) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # View-only inlines. self.assertIs( response.context["inline_admin_formset"].has_view_permission, True ) self.assertIs( response.context["inline_admin_formset"].has_add_permission, False ) self.assertIs( response.context["inline_admin_formset"].has_change_permission, False ) self.assertIs( response.context["inline_admin_formset"].has_delete_permission, False ) self.assertContains(response, "<h2>Author-book relationships</h2>") self.assertContains( response, '<input type="hidden" name="Author_books-TOTAL_FORMS" value="1" ' 'id="id_Author_books-TOTAL_FORMS">', html=True, ) # The field in the inline is read-only. self.assertContains(response, "<p>%s</p>" % self.book) self.assertNotContains( response, '<input type="checkbox" name="Author_books-0-DELETE" ' 'id="id_Author_books-0-DELETE">', html=True, ) def test_inline_change_m2m_change_perm(self): permission = Permission.objects.get( codename="change_book", content_type=self.book_ct ) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # We have change perm on books, so we can add/change/delete inlines self.assertIs( response.context["inline_admin_formset"].has_view_permission, True ) self.assertIs(response.context["inline_admin_formset"].has_add_permission, True) self.assertIs( response.context["inline_admin_formset"].has_change_permission, True ) self.assertIs( response.context["inline_admin_formset"].has_delete_permission, True ) self.assertContains(response, "<h2>Author-book relationships</h2>") self.assertContains(response, "Add another Author-book relationship") self.assertContains( response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" ' 'value="4" name="Author_books-TOTAL_FORMS">', html=True, ) self.assertContains( response, '<input type="hidden" id="id_Author_books-0-id" value="%i" ' 'name="Author_books-0-id">' % self.author_book_auto_m2m_intermediate_id, html=True, ) self.assertContains(response, 'id="id_Author_books-0-DELETE"') def test_inline_change_fk_add_perm(self): permission = Permission.objects.get( codename="add_inner2", content_type=self.inner_ct ) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Add permission on inner2s, so we can add but not modify existing self.assertContains(response, "<h2>Inner2s</h2>") self.assertContains(response, "Add another Inner2") # 3 extra forms only, not the existing instance form self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" ' 'name="inner2_set-TOTAL_FORMS">', html=True, ) self.assertNotContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" ' 'name="inner2_set-0-id">' % self.inner2.id, html=True, ) def test_inline_change_fk_change_perm(self): permission = Permission.objects.get( codename="change_inner2", content_type=self.inner_ct ) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Change permission on inner2s, so we can change existing but not add new self.assertContains(response, "<h2>Inner2s</h2>", count=2) # Just the one form for existing instances self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" ' 'name="inner2_set-TOTAL_FORMS">', html=True, ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" ' 'name="inner2_set-0-id">' % self.inner2.id, html=True, ) # max-num 0 means we can't add new ones self.assertContains( response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" value="0" ' 'name="inner2_set-MAX_NUM_FORMS">', html=True, ) # TabularInline self.assertContains( response, '<th class="column-dummy required">Dummy</th>', html=True ) self.assertContains( response, '<input type="number" name="inner2_set-2-0-dummy" value="%s" ' 'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy, html=True, ) def test_inline_change_fk_add_change_perm(self): permission = Permission.objects.get( codename="add_inner2", content_type=self.inner_ct ) self.user.user_permissions.add(permission) permission = Permission.objects.get( codename="change_inner2", content_type=self.inner_ct ) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Add/change perm, so we can add new and change existing self.assertContains(response, "<h2>Inner2s</h2>") # One form for existing instance and three extra for new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" ' 'name="inner2_set-TOTAL_FORMS">', html=True, ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" ' 'name="inner2_set-0-id">' % self.inner2.id, html=True, ) def test_inline_change_fk_change_del_perm(self): permission = Permission.objects.get( codename="change_inner2", content_type=self.inner_ct ) self.user.user_permissions.add(permission) permission = Permission.objects.get( codename="delete_inner2", content_type=self.inner_ct ) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Change/delete perm on inner2s, so we can change/delete existing self.assertContains(response, "<h2>Inner2s</h2>") # One form for existing instance only, no new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" ' 'name="inner2_set-TOTAL_FORMS">', html=True, ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" ' 'name="inner2_set-0-id">' % self.inner2.id, html=True, ) self.assertContains(response, 'id="id_inner2_set-0-DELETE"') def test_inline_change_fk_all_perms(self): permission = Permission.objects.get( codename="add_inner2", content_type=self.inner_ct ) self.user.user_permissions.add(permission) permission = Permission.objects.get( codename="change_inner2", content_type=self.inner_ct ) self.user.user_permissions.add(permission) permission = Permission.objects.get( codename="delete_inner2", content_type=self.inner_ct ) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # All perms on inner2s, so we can add/change/delete self.assertContains(response, "<h2>Inner2s</h2>", count=2) # One form for existing instance only, three for new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" ' 'name="inner2_set-TOTAL_FORMS">', html=True, ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" ' 'name="inner2_set-0-id">' % self.inner2.id, html=True, ) self.assertContains(response, 'id="id_inner2_set-0-DELETE"') # TabularInline self.assertContains( response, '<th class="column-dummy required">Dummy</th>', html=True ) self.assertContains( response, '<input type="number" name="inner2_set-2-0-dummy" value="%s" ' 'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy, html=True, ) @override_settings(ROOT_URLCONF="admin_inlines.urls") class TestReadOnlyChangeViewInlinePermissions(TestCase): @classmethod def setUpTestData(cls): cls.user = User.objects.create_user( "testing", password="password", is_staff=True ) cls.user.user_permissions.add( Permission.objects.get( codename="view_poll", content_type=ContentType.objects.get_for_model(Poll), ) ) cls.user.user_permissions.add( *Permission.objects.filter( codename__endswith="question", content_type=ContentType.objects.get_for_model(Question), ).values_list("pk", flat=True) ) cls.poll = Poll.objects.create(name="Survey") cls.add_url = reverse("admin:admin_inlines_poll_add") cls.change_url = reverse("admin:admin_inlines_poll_change", args=(cls.poll.id,)) def setUp(self): self.client.force_login(self.user) def test_add_url_not_allowed(self): response = self.client.get(self.add_url) self.assertEqual(response.status_code, 403) response = self.client.post(self.add_url, {}) self.assertEqual(response.status_code, 403) def test_post_to_change_url_not_allowed(self): response = self.client.post(self.change_url, {}) self.assertEqual(response.status_code, 403) def test_get_to_change_url_is_allowed(self): response = self.client.get(self.change_url) self.assertEqual(response.status_code, 200) def test_main_model_is_rendered_as_read_only(self): response = self.client.get(self.change_url) self.assertContains( response, '<div class="readonly">%s</div>' % self.poll.name, html=True ) input = ( '<input type="text" name="name" value="%s" class="vTextField" ' 'maxlength="40" required id="id_name">' ) self.assertNotContains(response, input % self.poll.name, html=True) def test_inlines_are_rendered_as_read_only(self): question = Question.objects.create( text="How will this be rendered?", poll=self.poll ) response = self.client.get(self.change_url) self.assertContains( response, '<td class="field-text"><p>%s</p></td>' % question.text, html=True ) self.assertNotContains(response, 'id="id_question_set-0-text"') self.assertNotContains(response, 'id="id_related_objs-0-DELETE"') def test_submit_line_shows_only_close_button(self): response = self.client.get(self.change_url) self.assertContains( response, '<a href="/admin/admin_inlines/poll/" class="closelink">Close</a>', html=True, ) delete_link = ( '<a href="/admin/admin_inlines/poll/%s/delete/" class="deletelink">Delete' "</a>" ) self.assertNotContains(response, delete_link % self.poll.id, html=True) self.assertNotContains( response, '<input type="submit" value="Save and add another" name="_addanother">', ) self.assertNotContains( response, '<input type="submit" value="Save and continue editing" name="_continue">', ) def test_inline_delete_buttons_are_not_shown(self): Question.objects.create(text="How will this be rendered?", poll=self.poll) response = self.client.get(self.change_url) self.assertNotContains( response, '<input type="checkbox" name="question_set-0-DELETE" ' 'id="id_question_set-0-DELETE">', html=True, ) def test_extra_inlines_are_not_shown(self): response = self.client.get(self.change_url) self.assertNotContains(response, 'id="id_question_set-0-text"') @override_settings(ROOT_URLCONF="admin_inlines.urls") class TestVerboseNameInlineForms(TestDataMixin, TestCase): factory = RequestFactory() def test_verbose_name_inline(self): class NonVerboseProfileInline(TabularInline): model = Profile verbose_name = "Non-verbose childs" class VerboseNameProfileInline(TabularInline): model = VerboseNameProfile verbose_name = "Childs with verbose name" class VerboseNamePluralProfileInline(TabularInline): model = VerboseNamePluralProfile verbose_name = "Childs with verbose name plural" class BothVerboseNameProfileInline(TabularInline): model = BothVerboseNameProfile verbose_name = "Childs with both verbose names" modeladmin = ModelAdmin(ProfileCollection, admin_site) modeladmin.inlines = [ NonVerboseProfileInline, VerboseNameProfileInline, VerboseNamePluralProfileInline, BothVerboseNameProfileInline, ] obj = ProfileCollection.objects.create() url = reverse("admin:admin_inlines_profilecollection_change", args=(obj.pk,)) request = self.factory.get(url) request.user = self.superuser response = modeladmin.changeform_view(request) self.assertNotContains(response, "Add another Profile") # Non-verbose model. self.assertContains(response, "<h2>Non-verbose childss</h2>") self.assertContains(response, "Add another Non-verbose child") self.assertNotContains(response, "<h2>Profiles</h2>") # Model with verbose name. self.assertContains(response, "<h2>Childs with verbose names</h2>") self.assertContains(response, "Add another Childs with verbose name") self.assertNotContains(response, "<h2>Model with verbose name onlys</h2>") self.assertNotContains(response, "Add another Model with verbose name only") # Model with verbose name plural. self.assertContains(response, "<h2>Childs with verbose name plurals</h2>") self.assertContains(response, "Add another Childs with verbose name plural") self.assertNotContains(response, "<h2>Model with verbose name plural only</h2>") # Model with both verbose names. self.assertContains(response, "<h2>Childs with both verbose namess</h2>") self.assertContains(response, "Add another Childs with both verbose names") self.assertNotContains(response, "<h2>Model with both - plural name</h2>") self.assertNotContains(response, "Add another Model with both - name") def test_verbose_name_plural_inline(self): class NonVerboseProfileInline(TabularInline): model = Profile verbose_name_plural = "Non-verbose childs" class VerboseNameProfileInline(TabularInline): model = VerboseNameProfile verbose_name_plural = "Childs with verbose name" class VerboseNamePluralProfileInline(TabularInline): model = VerboseNamePluralProfile verbose_name_plural = "Childs with verbose name plural" class BothVerboseNameProfileInline(TabularInline): model = BothVerboseNameProfile verbose_name_plural = "Childs with both verbose names" modeladmin = ModelAdmin(ProfileCollection, admin_site) modeladmin.inlines = [ NonVerboseProfileInline, VerboseNameProfileInline, VerboseNamePluralProfileInline, BothVerboseNameProfileInline, ] obj = ProfileCollection.objects.create() url = reverse("admin:admin_inlines_profilecollection_change", args=(obj.pk,)) request = self.factory.get(url) request.user = self.superuser response = modeladmin.changeform_view(request) # Non-verbose model. self.assertContains(response, "<h2>Non-verbose childs</h2>") self.assertContains(response, "Add another Profile") self.assertNotContains(response, "<h2>Profiles</h2>") # Model with verbose name. self.assertContains(response, "<h2>Childs with verbose name</h2>") self.assertContains(response, "Add another Model with verbose name only") self.assertNotContains(response, "<h2>Model with verbose name onlys</h2>") # Model with verbose name plural. self.assertContains(response, "<h2>Childs with verbose name plural</h2>") self.assertContains(response, "Add another Profile") self.assertNotContains(response, "<h2>Model with verbose name plural only</h2>") # Model with both verbose names. self.assertContains(response, "<h2>Childs with both verbose names</h2>") self.assertContains(response, "Add another Model with both - name") self.assertNotContains(response, "<h2>Model with both - plural name</h2>") def test_both_verbose_names_inline(self): class NonVerboseProfileInline(TabularInline): model = Profile verbose_name = "Non-verbose childs - name" verbose_name_plural = "Non-verbose childs - plural name" class VerboseNameProfileInline(TabularInline): model = VerboseNameProfile verbose_name = "Childs with verbose name - name" verbose_name_plural = "Childs with verbose name - plural name" class VerboseNamePluralProfileInline(TabularInline): model = VerboseNamePluralProfile verbose_name = "Childs with verbose name plural - name" verbose_name_plural = "Childs with verbose name plural - plural name" class BothVerboseNameProfileInline(TabularInline): model = BothVerboseNameProfile verbose_name = "Childs with both - name" verbose_name_plural = "Childs with both - plural name" modeladmin = ModelAdmin(ProfileCollection, admin_site) modeladmin.inlines = [ NonVerboseProfileInline, VerboseNameProfileInline, VerboseNamePluralProfileInline, BothVerboseNameProfileInline, ] obj = ProfileCollection.objects.create() url = reverse("admin:admin_inlines_profilecollection_change", args=(obj.pk,)) request = self.factory.get(url) request.user = self.superuser response = modeladmin.changeform_view(request) self.assertNotContains(response, "Add another Profile") # Non-verbose model. self.assertContains(response, "<h2>Non-verbose childs - plural name</h2>") self.assertContains(response, "Add another Non-verbose childs - name") self.assertNotContains(response, "<h2>Profiles</h2>") # Model with verbose name. self.assertContains(response, "<h2>Childs with verbose name - plural name</h2>") self.assertContains(response, "Add another Childs with verbose name - name") self.assertNotContains(response, "<h2>Model with verbose name onlys</h2>") # Model with verbose name plural. self.assertContains( response, "<h2>Childs with verbose name plural - plural name</h2>", ) self.assertContains( response, "Add another Childs with verbose name plural - name", ) self.assertNotContains(response, "<h2>Model with verbose name plural only</h2>") # Model with both verbose names. self.assertContains(response, "<h2>Childs with both - plural name</h2>") self.assertContains(response, "Add another Childs with both - name") self.assertNotContains(response, "<h2>Model with both - plural name</h2>") self.assertNotContains(response, "Add another Model with both - name") @override_settings(ROOT_URLCONF="admin_inlines.urls") class SeleniumTests(AdminSeleniumTestCase): available_apps = ["admin_inlines"] + AdminSeleniumTestCase.available_apps def setUp(self): User.objects.create_superuser( username="super", password="secret", email="[email protected]" ) def test_add_stackeds(self): """ The "Add another XXX" link correctly adds items to the stacked formset. """ from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_inlines_holder4_add") ) inline_id = "#inner4stacked_set-group" rows_selector = "%s .dynamic-inner4stacked_set" % inline_id self.assertCountSeleniumElements(rows_selector, 3) add_button = self.selenium.find_element( By.LINK_TEXT, "Add another Inner4 stacked" ) add_button.click() self.assertCountSeleniumElements(rows_selector, 4) def test_delete_stackeds(self): from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_inlines_holder4_add") ) inline_id = "#inner4stacked_set-group" rows_selector = "%s .dynamic-inner4stacked_set" % inline_id self.assertCountSeleniumElements(rows_selector, 3) add_button = self.selenium.find_element( By.LINK_TEXT, "Add another Inner4 stacked" ) add_button.click() add_button.click() self.assertCountSeleniumElements(rows_selector, 5) for delete_link in self.selenium.find_elements( By.CSS_SELECTOR, "%s .inline-deletelink" % inline_id ): delete_link.click() with self.disable_implicit_wait(): self.assertCountSeleniumElements(rows_selector, 0) def test_delete_invalid_stacked_inlines(self): from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_inlines_holder4_add") ) inline_id = "#inner4stacked_set-group" rows_selector = "%s .dynamic-inner4stacked_set" % inline_id self.assertCountSeleniumElements(rows_selector, 3) add_button = self.selenium.find_element( By.LINK_TEXT, "Add another Inner4 stacked", ) add_button.click() add_button.click() self.assertCountSeleniumElements("#id_inner4stacked_set-4-dummy", 1) # Enter some data and click 'Save'. self.selenium.find_element(By.NAME, "dummy").send_keys("1") self.selenium.find_element(By.NAME, "inner4stacked_set-0-dummy").send_keys( "100" ) self.selenium.find_element(By.NAME, "inner4stacked_set-1-dummy").send_keys( "101" ) self.selenium.find_element(By.NAME, "inner4stacked_set-2-dummy").send_keys( "222" ) self.selenium.find_element(By.NAME, "inner4stacked_set-3-dummy").send_keys( "103" ) self.selenium.find_element(By.NAME, "inner4stacked_set-4-dummy").send_keys( "222" ) with self.wait_page_loaded(): self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click() # Sanity check. self.assertCountSeleniumElements(rows_selector, 5) errorlist = self.selenium.find_element( By.CSS_SELECTOR, "%s .dynamic-inner4stacked_set .errorlist li" % inline_id, ) self.assertEqual("Please correct the duplicate values below.", errorlist.text) delete_link = self.selenium.find_element( By.CSS_SELECTOR, "#inner4stacked_set-4 .inline-deletelink" ) delete_link.click() self.assertCountSeleniumElements(rows_selector, 4) with self.disable_implicit_wait(), self.assertRaises(NoSuchElementException): self.selenium.find_element( By.CSS_SELECTOR, "%s .dynamic-inner4stacked_set .errorlist li" % inline_id, ) with self.wait_page_loaded(): self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click() # The objects have been created in the database. self.assertEqual(Inner4Stacked.objects.count(), 4) def test_delete_invalid_tabular_inlines(self): from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_inlines_holder4_add") ) inline_id = "#inner4tabular_set-group" rows_selector = "%s .dynamic-inner4tabular_set" % inline_id self.assertCountSeleniumElements(rows_selector, 3) add_button = self.selenium.find_element( By.LINK_TEXT, "Add another Inner4 tabular" ) add_button.click() add_button.click() self.assertCountSeleniumElements("#id_inner4tabular_set-4-dummy", 1) # Enter some data and click 'Save'. self.selenium.find_element(By.NAME, "dummy").send_keys("1") self.selenium.find_element(By.NAME, "inner4tabular_set-0-dummy").send_keys( "100" ) self.selenium.find_element(By.NAME, "inner4tabular_set-1-dummy").send_keys( "101" ) self.selenium.find_element(By.NAME, "inner4tabular_set-2-dummy").send_keys( "222" ) self.selenium.find_element(By.NAME, "inner4tabular_set-3-dummy").send_keys( "103" ) self.selenium.find_element(By.NAME, "inner4tabular_set-4-dummy").send_keys( "222" ) with self.wait_page_loaded(): self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click() # Sanity Check. self.assertCountSeleniumElements(rows_selector, 5) # Non-field errorlist is in its own <tr> just before # tr#inner4tabular_set-3: errorlist = self.selenium.find_element( By.CSS_SELECTOR, "%s #inner4tabular_set-3 + .row-form-errors .errorlist li" % inline_id, ) self.assertEqual("Please correct the duplicate values below.", errorlist.text) delete_link = self.selenium.find_element( By.CSS_SELECTOR, "#inner4tabular_set-4 .inline-deletelink" ) delete_link.click() self.assertCountSeleniumElements(rows_selector, 4) with self.disable_implicit_wait(), self.assertRaises(NoSuchElementException): self.selenium.find_element( By.CSS_SELECTOR, "%s .dynamic-inner4tabular_set .errorlist li" % inline_id, ) with self.wait_page_loaded(): self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click() # The objects have been created in the database. self.assertEqual(Inner4Tabular.objects.count(), 4) def test_add_inlines(self): """ The "Add another XXX" link correctly adds items to the inline form. """ from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_inlines_profilecollection_add") ) # There's only one inline to start with and it has the correct ID. self.assertCountSeleniumElements(".dynamic-profile_set", 1) self.assertEqual( self.selenium.find_elements(By.CSS_SELECTOR, ".dynamic-profile_set")[ 0 ].get_attribute("id"), "profile_set-0", ) self.assertCountSeleniumElements( ".dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]", 1 ) self.assertCountSeleniumElements( ".dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]", 1 ) # Add an inline self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click() # The inline has been added, it has the right id, and it contains the # correct fields. self.assertCountSeleniumElements(".dynamic-profile_set", 2) self.assertEqual( self.selenium.find_elements(By.CSS_SELECTOR, ".dynamic-profile_set")[ 1 ].get_attribute("id"), "profile_set-1", ) self.assertCountSeleniumElements( ".dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]", 1 ) self.assertCountSeleniumElements( ".dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]", 1 ) # Let's add another one to be sure self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click() self.assertCountSeleniumElements(".dynamic-profile_set", 3) self.assertEqual( self.selenium.find_elements(By.CSS_SELECTOR, ".dynamic-profile_set")[ 2 ].get_attribute("id"), "profile_set-2", ) self.assertCountSeleniumElements( ".dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]", 1 ) self.assertCountSeleniumElements( ".dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]", 1 ) # Enter some data and click 'Save' self.selenium.find_element(By.NAME, "profile_set-0-first_name").send_keys( "0 first name 1" ) self.selenium.find_element(By.NAME, "profile_set-0-last_name").send_keys( "0 last name 2" ) self.selenium.find_element(By.NAME, "profile_set-1-first_name").send_keys( "1 first name 1" ) self.selenium.find_element(By.NAME, "profile_set-1-last_name").send_keys( "1 last name 2" ) self.selenium.find_element(By.NAME, "profile_set-2-first_name").send_keys( "2 first name 1" ) self.selenium.find_element(By.NAME, "profile_set-2-last_name").send_keys( "2 last name 2" ) with self.wait_page_loaded(): self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click() # The objects have been created in the database self.assertEqual(ProfileCollection.objects.count(), 1) self.assertEqual(Profile.objects.count(), 3) def test_add_inline_link_absent_for_view_only_parent_model(self): from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.by import By user = User.objects.create_user("testing", password="password", is_staff=True) user.user_permissions.add( Permission.objects.get( codename="view_poll", content_type=ContentType.objects.get_for_model(Poll), ) ) user.user_permissions.add( *Permission.objects.filter( codename__endswith="question", content_type=ContentType.objects.get_for_model(Question), ).values_list("pk", flat=True) ) self.admin_login(username="testing", password="password") poll = Poll.objects.create(name="Survey") change_url = reverse("admin:admin_inlines_poll_change", args=(poll.id,)) self.selenium.get(self.live_server_url + change_url) with self.disable_implicit_wait(): with self.assertRaises(NoSuchElementException): self.selenium.find_element(By.LINK_TEXT, "Add another Question") def test_delete_inlines(self): from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_inlines_profilecollection_add") ) # Add a few inlines self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click() self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click() self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click() self.selenium.find_element(By.LINK_TEXT, "Add another Profile").click() self.assertCountSeleniumElements( "#profile_set-group table tr.dynamic-profile_set", 5 ) self.assertCountSeleniumElements( "form#profilecollection_form tr.dynamic-profile_set#profile_set-0", 1 ) self.assertCountSeleniumElements( "form#profilecollection_form tr.dynamic-profile_set#profile_set-1", 1 ) self.assertCountSeleniumElements( "form#profilecollection_form tr.dynamic-profile_set#profile_set-2", 1 ) self.assertCountSeleniumElements( "form#profilecollection_form tr.dynamic-profile_set#profile_set-3", 1 ) self.assertCountSeleniumElements( "form#profilecollection_form tr.dynamic-profile_set#profile_set-4", 1 ) # Click on a few delete buttons self.selenium.find_element( By.CSS_SELECTOR, "form#profilecollection_form tr.dynamic-profile_set#profile_set-1 " "td.delete a", ).click() self.selenium.find_element( By.CSS_SELECTOR, "form#profilecollection_form tr.dynamic-profile_set#profile_set-2 " "td.delete a", ).click() # The rows are gone and the IDs have been re-sequenced self.assertCountSeleniumElements( "#profile_set-group table tr.dynamic-profile_set", 3 ) self.assertCountSeleniumElements( "form#profilecollection_form tr.dynamic-profile_set#profile_set-0", 1 ) self.assertCountSeleniumElements( "form#profilecollection_form tr.dynamic-profile_set#profile_set-1", 1 ) self.assertCountSeleniumElements( "form#profilecollection_form tr.dynamic-profile_set#profile_set-2", 1 ) def test_collapsed_inlines(self): from selenium.webdriver.common.by import By # Collapsed inlines have SHOW/HIDE links. self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_inlines_author_add") ) # One field is in a stacked inline, other in a tabular one. test_fields = [ "#id_nonautopkbook_set-0-title", "#id_nonautopkbook_set-2-0-title", ] show_links = self.selenium.find_elements(By.LINK_TEXT, "SHOW") self.assertEqual(len(show_links), 3) for show_index, field_name in enumerate(test_fields, 0): self.wait_until_invisible(field_name) show_links[show_index].click() self.wait_until_visible(field_name) hide_links = self.selenium.find_elements(By.LINK_TEXT, "HIDE") self.assertEqual(len(hide_links), 2) for hide_index, field_name in enumerate(test_fields, 0): self.wait_until_visible(field_name) hide_links[hide_index].click() self.wait_until_invisible(field_name) def test_added_stacked_inline_with_collapsed_fields(self): from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_inlines_teacher_add") ) self.selenium.find_element(By.LINK_TEXT, "Add another Child").click() test_fields = ["#id_child_set-0-name", "#id_child_set-1-name"] show_links = self.selenium.find_elements(By.LINK_TEXT, "SHOW") self.assertEqual(len(show_links), 2) for show_index, field_name in enumerate(test_fields, 0): self.wait_until_invisible(field_name) show_links[show_index].click() self.wait_until_visible(field_name) hide_links = self.selenium.find_elements(By.LINK_TEXT, "HIDE") self.assertEqual(len(hide_links), 2) for hide_index, field_name in enumerate(test_fields, 0): self.wait_until_visible(field_name) hide_links[hide_index].click() self.wait_until_invisible(field_name) def assertBorder(self, element, border): width, style, color = border.split(" ") border_properties = [ "border-bottom-%s", "border-left-%s", "border-right-%s", "border-top-%s", ] for prop in border_properties: self.assertEqual(element.value_of_css_property(prop % "width"), width) for prop in border_properties: self.assertEqual(element.value_of_css_property(prop % "style"), style) # Convert hex color to rgb. self.assertRegex(color, "#[0-9a-f]{6}") r, g, b = int(color[1:3], 16), int(color[3:5], 16), int(color[5:], 16) # The value may be expressed as either rgb() or rgba() depending on the # browser. colors = [ "rgb(%d, %d, %d)" % (r, g, b), "rgba(%d, %d, %d, 1)" % (r, g, b), ] for prop in border_properties: self.assertIn(element.value_of_css_property(prop % "color"), colors) def test_inline_formset_error_input_border(self): from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_inlines_holder5_add") ) self.wait_until_visible("#id_dummy") self.selenium.find_element(By.ID, "id_dummy").send_keys(1) fields = ["id_inner5stacked_set-0-dummy", "id_inner5tabular_set-0-dummy"] show_links = self.selenium.find_elements(By.LINK_TEXT, "SHOW") for show_index, field_name in enumerate(fields): show_links[show_index].click() self.wait_until_visible("#" + field_name) self.selenium.find_element(By.ID, field_name).send_keys(1) # Before save all inputs have default border for inline in ("stacked", "tabular"): for field_name in ("name", "select", "text"): element_id = "id_inner5%s_set-0-%s" % (inline, field_name) self.assertBorder( self.selenium.find_element(By.ID, element_id), "1px solid #cccccc", ) self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click() # Test the red border around inputs by css selectors stacked_selectors = [".errors input", ".errors select", ".errors textarea"] for selector in stacked_selectors: self.assertBorder( self.selenium.find_element(By.CSS_SELECTOR, selector), "1px solid #ba2121", ) tabular_selectors = [ "td ul.errorlist + input", "td ul.errorlist + select", "td ul.errorlist + textarea", ] for selector in tabular_selectors: self.assertBorder( self.selenium.find_element(By.CSS_SELECTOR, selector), "1px solid #ba2121", ) def test_inline_formset_error(self): from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_inlines_holder5_add") ) stacked_inline_formset_selector = ( "div#inner5stacked_set-group fieldset.module.collapse" ) tabular_inline_formset_selector = ( "div#inner5tabular_set-group fieldset.module.collapse" ) # Inlines without errors, both inlines collapsed self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click() self.assertCountSeleniumElements( stacked_inline_formset_selector + ".collapsed", 1 ) self.assertCountSeleniumElements( tabular_inline_formset_selector + ".collapsed", 1 ) show_links = self.selenium.find_elements(By.LINK_TEXT, "SHOW") self.assertEqual(len(show_links), 2) # Inlines with errors, both inlines expanded test_fields = ["#id_inner5stacked_set-0-dummy", "#id_inner5tabular_set-0-dummy"] for show_index, field_name in enumerate(test_fields): show_links[show_index].click() self.wait_until_visible(field_name) self.selenium.find_element(By.ID, field_name[1:]).send_keys(1) hide_links = self.selenium.find_elements(By.LINK_TEXT, "HIDE") self.assertEqual(len(hide_links), 2) for hide_index, field_name in enumerate(test_fields): hide_link = hide_links[hide_index] self.selenium.execute_script( "window.scrollTo(0, %s);" % hide_link.location["y"] ) hide_link.click() self.wait_until_invisible(field_name) with self.wait_page_loaded(): self.selenium.find_element(By.XPATH, '//input[@value="Save"]').click() with self.disable_implicit_wait(): self.assertCountSeleniumElements( stacked_inline_formset_selector + ".collapsed", 0 ) self.assertCountSeleniumElements( tabular_inline_formset_selector + ".collapsed", 0 ) self.assertCountSeleniumElements(stacked_inline_formset_selector, 1) self.assertCountSeleniumElements(tabular_inline_formset_selector, 1) def test_inlines_verbose_name(self): """ The item added by the "Add another XXX" link must use the correct verbose_name in the inline form. """ from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") # Hide sidebar. self.selenium.get( self.live_server_url + reverse("admin:admin_inlines_course_add") ) toggle_button = self.selenium.find_element( By.CSS_SELECTOR, "#toggle-nav-sidebar" ) toggle_button.click() # Each combination of horizontal/vertical filter with stacked/tabular # inlines. tests = [ "admin:admin_inlines_course_add", "admin:admin_inlines_courseproxy_add", "admin:admin_inlines_courseproxy1_add", "admin:admin_inlines_courseproxy2_add", ] css_selector = ".dynamic-class_set#class_set-%s h2" for url_name in tests: with self.subTest(url=url_name): self.selenium.get(self.live_server_url + reverse(url_name)) # First inline shows the verbose_name. available, chosen = self.selenium.find_elements( By.CSS_SELECTOR, css_selector % 0 ) self.assertEqual(available.text, "AVAILABLE ATTENDANT") self.assertEqual(chosen.text, "CHOSEN ATTENDANT") # Added inline should also have the correct verbose_name. self.selenium.find_element(By.LINK_TEXT, "Add another Class").click() available, chosen = self.selenium.find_elements( By.CSS_SELECTOR, css_selector % 1 ) self.assertEqual(available.text, "AVAILABLE ATTENDANT") self.assertEqual(chosen.text, "CHOSEN ATTENDANT") # Third inline should also have the correct verbose_name. self.selenium.find_element(By.LINK_TEXT, "Add another Class").click() available, chosen = self.selenium.find_elements( By.CSS_SELECTOR, css_selector % 2 ) self.assertEqual(available.text, "AVAILABLE ATTENDANT") self.assertEqual(chosen.text, "CHOSEN ATTENDANT")
92300e5e664f1300c833dadfdc5d6b9595ec7d2b243c569b92e1cbd73185d91a
import datetime import pickle import unittest import uuid from collections import namedtuple from copy import deepcopy from decimal import Decimal from unittest import mock from django.core.exceptions import FieldError from django.db import DatabaseError, NotSupportedError, connection from django.db.models import ( AutoField, Avg, BinaryField, BooleanField, Case, CharField, Count, DateField, DateTimeField, DecimalField, DurationField, Exists, Expression, ExpressionList, ExpressionWrapper, F, FloatField, Func, IntegerField, Max, Min, Model, OrderBy, OuterRef, Q, StdDev, Subquery, Sum, TimeField, UUIDField, Value, Variance, When, ) from django.db.models.expressions import ( Col, Combinable, CombinedExpression, NegatedExpression, RawSQL, Ref, ) from django.db.models.functions import ( Coalesce, Concat, Left, Length, Lower, Substr, Upper, ) from django.db.models.sql import constants from django.db.models.sql.datastructures import Join from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import ( Approximate, CaptureQueriesContext, isolate_apps, register_lookup, ) from django.utils.deprecation import RemovedInDjango50Warning from django.utils.functional import SimpleLazyObject from .models import ( UUID, UUIDPK, Company, Employee, Experiment, Manager, Number, RemoteEmployee, Result, SimulationRun, Time, ) class BasicExpressionsTests(TestCase): @classmethod def setUpTestData(cls): cls.example_inc = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10), ) cls.foobar_ltd = Company.objects.create( name="Foobar Ltd.", num_employees=3, num_chairs=4, based_in_eu=True, ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20), ) cls.max = Employee.objects.create( firstname="Max", lastname="Mustermann", salary=30 ) cls.gmbh = Company.objects.create( name="Test GmbH", num_employees=32, num_chairs=1, ceo=cls.max ) def setUp(self): self.company_query = Company.objects.values( "name", "num_employees", "num_chairs" ).order_by("name", "num_employees", "num_chairs") def test_annotate_values_aggregate(self): companies = ( Company.objects.annotate( salaries=F("ceo__salary"), ) .values("num_employees", "salaries") .aggregate( result=Sum( F("salaries") + F("num_employees"), output_field=IntegerField() ), ) ) self.assertEqual(companies["result"], 2395) def test_annotate_values_filter(self): companies = ( Company.objects.annotate( foo=RawSQL("%s", ["value"]), ) .filter(foo="value") .order_by("name") ) self.assertSequenceEqual( companies, [self.example_inc, self.foobar_ltd, self.gmbh], ) def test_annotate_values_count(self): companies = Company.objects.annotate(foo=RawSQL("%s", ["value"])) self.assertEqual(companies.count(), 3) @skipUnlessDBFeature("supports_boolean_expr_in_select_clause") def test_filtering_on_annotate_that_uses_q(self): self.assertEqual( Company.objects.annotate( num_employees_check=ExpressionWrapper( Q(num_employees__gt=3), output_field=BooleanField() ) ) .filter(num_employees_check=True) .count(), 2, ) def test_filtering_on_q_that_is_boolean(self): self.assertEqual( Company.objects.filter( ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField()) ).count(), 2, ) def test_filtering_on_rawsql_that_is_boolean(self): self.assertEqual( Company.objects.filter( RawSQL("num_employees > %s", (3,), output_field=BooleanField()), ).count(), 2, ) def test_filter_inter_attribute(self): # We can filter on attribute relationships on same model obj, e.g. # find companies where the number of employees is greater # than the number of chairs. self.assertSequenceEqual( self.company_query.filter(num_employees__gt=F("num_chairs")), [ { "num_chairs": 5, "name": "Example Inc.", "num_employees": 2300, }, {"num_chairs": 1, "name": "Test GmbH", "num_employees": 32}, ], ) def test_update(self): # We can set one field to have the value of another field # Make sure we have enough chairs self.company_query.update(num_chairs=F("num_employees")) self.assertSequenceEqual( self.company_query, [ {"num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300}, {"num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3}, {"num_chairs": 32, "name": "Test GmbH", "num_employees": 32}, ], ) def test_arithmetic(self): # We can perform arithmetic operations in expressions # Make sure we have 2 spare chairs self.company_query.update(num_chairs=F("num_employees") + 2) self.assertSequenceEqual( self.company_query, [ {"num_chairs": 2302, "name": "Example Inc.", "num_employees": 2300}, {"num_chairs": 5, "name": "Foobar Ltd.", "num_employees": 3}, {"num_chairs": 34, "name": "Test GmbH", "num_employees": 32}, ], ) def test_order_of_operations(self): # Law of order of operations is followed self.company_query.update( num_chairs=F("num_employees") + 2 * F("num_employees") ) self.assertSequenceEqual( self.company_query, [ {"num_chairs": 6900, "name": "Example Inc.", "num_employees": 2300}, {"num_chairs": 9, "name": "Foobar Ltd.", "num_employees": 3}, {"num_chairs": 96, "name": "Test GmbH", "num_employees": 32}, ], ) def test_parenthesis_priority(self): # Law of order of operations can be overridden by parentheses self.company_query.update( num_chairs=(F("num_employees") + 2) * F("num_employees") ) self.assertSequenceEqual( self.company_query, [ {"num_chairs": 5294600, "name": "Example Inc.", "num_employees": 2300}, {"num_chairs": 15, "name": "Foobar Ltd.", "num_employees": 3}, {"num_chairs": 1088, "name": "Test GmbH", "num_employees": 32}, ], ) def test_update_with_fk(self): # ForeignKey can become updated with the value of another ForeignKey. self.assertEqual(Company.objects.update(point_of_contact=F("ceo")), 3) self.assertQuerySetEqual( Company.objects.all(), ["Joe Smith", "Frank Meyer", "Max Mustermann"], lambda c: str(c.point_of_contact), ordered=False, ) def test_update_with_none(self): Number.objects.create(integer=1, float=1.0) Number.objects.create(integer=2) Number.objects.filter(float__isnull=False).update(float=Value(None)) self.assertQuerySetEqual( Number.objects.all(), [None, None], lambda n: n.float, ordered=False ) def test_filter_with_join(self): # F Expressions can also span joins Company.objects.update(point_of_contact=F("ceo")) c = Company.objects.first() c.point_of_contact = Employee.objects.create( firstname="Guido", lastname="van Rossum" ) c.save() self.assertQuerySetEqual( Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), ["Foobar Ltd.", "Test GmbH"], lambda c: c.name, ordered=False, ) Company.objects.exclude(ceo__firstname=F("point_of_contact__firstname")).update( name="foo" ) self.assertEqual( Company.objects.exclude(ceo__firstname=F("point_of_contact__firstname")) .get() .name, "foo", ) msg = "Joined field references are not permitted in this query" with self.assertRaisesMessage(FieldError, msg): Company.objects.exclude( ceo__firstname=F("point_of_contact__firstname") ).update(name=F("point_of_contact__lastname")) def test_object_update(self): # F expressions can be used to update attributes on single objects self.gmbh.num_employees = F("num_employees") + 4 self.gmbh.save() self.gmbh.refresh_from_db() self.assertEqual(self.gmbh.num_employees, 36) def test_new_object_save(self): # We should be able to use Funcs when inserting new data test_co = Company( name=Lower(Value("UPPER")), num_employees=32, num_chairs=1, ceo=self.max ) test_co.save() test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_new_object_create(self): test_co = Company.objects.create( name=Lower(Value("UPPER")), num_employees=32, num_chairs=1, ceo=self.max ) test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_object_create_with_aggregate(self): # Aggregates are not allowed when inserting new data msg = ( "Aggregate functions are not allowed in this query " "(num_employees=Max(Value(1)))." ) with self.assertRaisesMessage(FieldError, msg): Company.objects.create( name="Company", num_employees=Max(Value(1)), num_chairs=1, ceo=Employee.objects.create( firstname="Just", lastname="Doit", salary=30 ), ) def test_object_update_fk(self): # F expressions cannot be used to update attributes which are foreign # keys, or attributes which involve joins. test_gmbh = Company.objects.get(pk=self.gmbh.pk) msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.' with self.assertRaisesMessage(ValueError, msg): test_gmbh.point_of_contact = F("ceo") test_gmbh.point_of_contact = self.gmbh.ceo test_gmbh.save() test_gmbh.name = F("ceo__lastname") msg = "Joined field references are not permitted in this query" with self.assertRaisesMessage(FieldError, msg): test_gmbh.save() def test_update_inherited_field_value(self): msg = "Joined field references are not permitted in this query" with self.assertRaisesMessage(FieldError, msg): RemoteEmployee.objects.update(adjusted_salary=F("salary") * 5) def test_object_update_unsaved_objects(self): # F expressions cannot be used to update attributes on objects which do # not yet exist in the database acme = Company( name="The Acme Widget Co.", num_employees=12, num_chairs=5, ceo=self.max ) acme.num_employees = F("num_employees") + 16 msg = ( 'Failed to insert expression "Col(expressions_company, ' 'expressions.Company.num_employees) + Value(16)" on ' "expressions.Company.num_employees. F() expressions can only be " "used to update, not to insert." ) with self.assertRaisesMessage(ValueError, msg): acme.save() acme.num_employees = 12 acme.name = Lower(F("name")) msg = ( 'Failed to insert expression "Lower(Col(expressions_company, ' 'expressions.Company.name))" on expressions.Company.name. F() ' "expressions can only be used to update, not to insert." ) with self.assertRaisesMessage(ValueError, msg): acme.save() def test_ticket_11722_iexact_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") test = Employee.objects.create(firstname="Test", lastname="test") queryset = Employee.objects.filter(firstname__iexact=F("lastname")) self.assertSequenceEqual(queryset, [test]) def test_ticket_16731_startswith_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") e2 = Employee.objects.create(firstname="Jack", lastname="Jackson") e3 = Employee.objects.create(firstname="Jack", lastname="jackson") self.assertSequenceEqual( Employee.objects.filter(lastname__startswith=F("firstname")), [e2, e3] if connection.features.has_case_insensitive_like else [e2], ) qs = Employee.objects.filter(lastname__istartswith=F("firstname")).order_by( "pk" ) self.assertSequenceEqual(qs, [e2, e3]) def test_ticket_18375_join_reuse(self): # Reverse multijoin F() references and the lookup target the same join. # Pre #18375 the F() join was generated first and the lookup couldn't # reuse that join. qs = Employee.objects.filter( company_ceo_set__num_chairs=F("company_ceo_set__num_employees") ) self.assertEqual(str(qs.query).count("JOIN"), 1) def test_ticket_18375_kwarg_ordering(self): # The next query was dict-randomization dependent - if the "gte=1" # was seen first, then the F() will reuse the join generated by the # gte lookup, if F() was seen first, then it generated a join the # other lookups could not reuse. qs = Employee.objects.filter( company_ceo_set__num_chairs=F("company_ceo_set__num_employees"), company_ceo_set__num_chairs__gte=1, ) self.assertEqual(str(qs.query).count("JOIN"), 1) def test_ticket_18375_kwarg_ordering_2(self): # Another similar case for F() than above. Now we have the same join # in two filter kwargs, one in the lhs lookup, one in F. Here pre # #18375 the amount of joins generated was random if dict # randomization was enabled, that is the generated query dependent # on which clause was seen first. qs = Employee.objects.filter( company_ceo_set__num_employees=F("pk"), pk=F("company_ceo_set__num_employees"), ) self.assertEqual(str(qs.query).count("JOIN"), 1) def test_ticket_18375_chained_filters(self): # F() expressions do not reuse joins from previous filter. qs = Employee.objects.filter(company_ceo_set__num_employees=F("pk")).filter( company_ceo_set__num_employees=F("company_ceo_set__num_employees") ) self.assertEqual(str(qs.query).count("JOIN"), 2) def test_order_by_exists(self): mary = Employee.objects.create( firstname="Mary", lastname="Mustermann", salary=20 ) mustermanns_by_seniority = Employee.objects.filter( lastname="Mustermann" ).order_by( # Order by whether the employee is the CEO of a company Exists(Company.objects.filter(ceo=OuterRef("pk"))).desc() ) self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary]) def test_order_by_multiline_sql(self): raw_order_by = ( RawSQL( """ CASE WHEN num_employees > 1000 THEN num_chairs ELSE 0 END """, [], ).desc(), RawSQL( """ CASE WHEN num_chairs > 1 THEN 1 ELSE 0 END """, [], ).asc(), ) for qs in ( Company.objects.all(), Company.objects.distinct(), ): with self.subTest(qs=qs): self.assertSequenceEqual( qs.order_by(*raw_order_by), [self.example_inc, self.gmbh, self.foobar_ltd], ) def test_outerref(self): inner = Company.objects.filter(point_of_contact=OuterRef("pk")) msg = ( "This queryset contains a reference to an outer query and may only " "be used in a subquery." ) with self.assertRaisesMessage(ValueError, msg): inner.exists() outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) self.assertIs(outer.exists(), True) def test_exist_single_field_output_field(self): queryset = Company.objects.values("pk") self.assertIsInstance(Exists(queryset).output_field, BooleanField) def test_subquery(self): Company.objects.filter(name="Example Inc.").update( point_of_contact=Employee.objects.get(firstname="Joe", lastname="Smith"), ceo=self.max, ) Employee.objects.create(firstname="Bob", lastname="Brown", salary=40) qs = ( Employee.objects.annotate( is_point_of_contact=Exists( Company.objects.filter(point_of_contact=OuterRef("pk")) ), is_not_point_of_contact=~Exists( Company.objects.filter(point_of_contact=OuterRef("pk")) ), is_ceo_of_small_company=Exists( Company.objects.filter(num_employees__lt=200, ceo=OuterRef("pk")) ), is_ceo_small_2=~~Exists( Company.objects.filter(num_employees__lt=200, ceo=OuterRef("pk")) ), largest_company=Subquery( Company.objects.order_by("-num_employees") .filter(Q(ceo=OuterRef("pk")) | Q(point_of_contact=OuterRef("pk"))) .values("name")[:1], output_field=CharField(), ), ) .values( "firstname", "is_point_of_contact", "is_not_point_of_contact", "is_ceo_of_small_company", "is_ceo_small_2", "largest_company", ) .order_by("firstname") ) results = list(qs) # Could use Coalesce(subq, Value('')) instead except for the bug in # cx_Oracle mentioned in #23843. bob = results[0] if ( bob["largest_company"] == "" and connection.features.interprets_empty_strings_as_nulls ): bob["largest_company"] = None self.assertEqual( results, [ { "firstname": "Bob", "is_point_of_contact": False, "is_not_point_of_contact": True, "is_ceo_of_small_company": False, "is_ceo_small_2": False, "largest_company": None, }, { "firstname": "Frank", "is_point_of_contact": False, "is_not_point_of_contact": True, "is_ceo_of_small_company": True, "is_ceo_small_2": True, "largest_company": "Foobar Ltd.", }, { "firstname": "Joe", "is_point_of_contact": True, "is_not_point_of_contact": False, "is_ceo_of_small_company": False, "is_ceo_small_2": False, "largest_company": "Example Inc.", }, { "firstname": "Max", "is_point_of_contact": False, "is_not_point_of_contact": True, "is_ceo_of_small_company": True, "is_ceo_small_2": True, "largest_company": "Example Inc.", }, ], ) # A less elegant way to write the same query: this uses a LEFT OUTER # JOIN and an IS NULL, inside a WHERE NOT IN which is probably less # efficient than EXISTS. self.assertCountEqual( qs.filter(is_point_of_contact=True).values("pk"), Employee.objects.exclude(company_point_of_contact_set=None).values("pk"), ) def test_subquery_eq(self): qs = Employee.objects.annotate( is_ceo=Exists(Company.objects.filter(ceo=OuterRef("pk"))), is_point_of_contact=Exists( Company.objects.filter(point_of_contact=OuterRef("pk")), ), small_company=Exists( queryset=Company.objects.filter(num_employees__lt=200), ), ).filter(is_ceo=True, is_point_of_contact=False, small_company=True) self.assertNotEqual( qs.query.annotations["is_ceo"], qs.query.annotations["is_point_of_contact"], ) self.assertNotEqual( qs.query.annotations["is_ceo"], qs.query.annotations["small_company"], ) def test_subquery_sql(self): employees = Employee.objects.all() employees_subquery = Subquery(employees) self.assertIs(employees_subquery.query.subquery, True) self.assertIs(employees.query.subquery, False) compiler = employees_subquery.query.get_compiler(connection=connection) sql, _ = employees_subquery.as_sql(compiler, connection) self.assertIn("(SELECT ", sql) def test_in_subquery(self): # This is a contrived test (and you really wouldn't write this query), # but it is a succinct way to test the __in=Subquery() construct. small_companies = Company.objects.filter(num_employees__lt=200).values("pk") subquery_test = Company.objects.filter(pk__in=Subquery(small_companies)) self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh]) subquery_test2 = Company.objects.filter( pk=Subquery(small_companies.filter(num_employees=3)) ) self.assertCountEqual(subquery_test2, [self.foobar_ltd]) def test_uuid_pk_subquery(self): u = UUIDPK.objects.create() UUID.objects.create(uuid_fk=u) qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values("uuid_fk__id"))) self.assertCountEqual(qs, [u]) def test_nested_subquery(self): inner = Company.objects.filter(point_of_contact=OuterRef("pk")) outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) contrived = Employee.objects.annotate( is_point_of_contact=Subquery( outer.filter(pk=OuterRef("pk")).values("is_point_of_contact"), output_field=BooleanField(), ), ) self.assertCountEqual(contrived.values_list(), outer.values_list()) def test_nested_subquery_join_outer_ref(self): inner = Employee.objects.filter(pk=OuterRef("ceo__pk")).values("pk") qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( ceo__in=inner, ceo__pk=OuterRef("pk"), ).values("pk"), ), ) self.assertSequenceEqual( qs.values_list("ceo_company", flat=True), [self.example_inc.pk, self.foobar_ltd.pk, self.gmbh.pk], ) def test_nested_subquery_outer_ref_2(self): first = Time.objects.create(time="09:00") second = Time.objects.create(time="17:00") third = Time.objects.create(time="21:00") SimulationRun.objects.bulk_create( [ SimulationRun(start=first, end=second, midpoint="12:00"), SimulationRun(start=first, end=third, midpoint="15:00"), SimulationRun(start=second, end=first, midpoint="00:00"), ] ) inner = Time.objects.filter( time=OuterRef(OuterRef("time")), pk=OuterRef("start") ).values("time") middle = SimulationRun.objects.annotate(other=Subquery(inner)).values("other")[ :1 ] outer = Time.objects.annotate(other=Subquery(middle, output_field=TimeField())) # This is a contrived example. It exercises the double OuterRef form. self.assertCountEqual(outer, [first, second, third]) def test_nested_subquery_outer_ref_with_autofield(self): first = Time.objects.create(time="09:00") second = Time.objects.create(time="17:00") SimulationRun.objects.create(start=first, end=second, midpoint="12:00") inner = SimulationRun.objects.filter(start=OuterRef(OuterRef("pk"))).values( "start" ) middle = Time.objects.annotate(other=Subquery(inner)).values("other")[:1] outer = Time.objects.annotate( other=Subquery(middle, output_field=IntegerField()) ) # This exercises the double OuterRef form with AutoField as pk. self.assertCountEqual(outer, [first, second]) def test_annotations_within_subquery(self): Company.objects.filter(num_employees__lt=50).update( ceo=Employee.objects.get(firstname="Frank") ) inner = ( Company.objects.filter(ceo=OuterRef("pk")) .values("ceo") .annotate(total_employees=Sum("num_employees")) .values("total_employees") ) outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter( salary__lte=Subquery(inner) ) self.assertSequenceEqual( outer.order_by("-total_employees").values("salary", "total_employees"), [ {"salary": 10, "total_employees": 2300}, {"salary": 20, "total_employees": 35}, ], ) def test_subquery_references_joined_table_twice(self): inner = Company.objects.filter( num_chairs__gte=OuterRef("ceo__salary"), num_employees__gte=OuterRef("point_of_contact__salary"), ) # Another contrived example (there is no need to have a subquery here) outer = Company.objects.filter(pk__in=Subquery(inner.values("pk"))) self.assertFalse(outer.exists()) def test_subquery_filter_by_aggregate(self): Number.objects.create(integer=1000, float=1.2) Employee.objects.create(salary=1000) qs = Number.objects.annotate( min_valuable_count=Subquery( Employee.objects.filter( salary=OuterRef("integer"), ) .annotate(cnt=Count("salary")) .filter(cnt__gt=0) .values("cnt")[:1] ), ) self.assertEqual(qs.get().float, 1.2) def test_subquery_filter_by_lazy(self): self.max.manager = Manager.objects.create(name="Manager") self.max.save() max_manager = SimpleLazyObject( lambda: Manager.objects.get(pk=self.max.manager.pk) ) qs = Company.objects.annotate( ceo_manager=Subquery( Employee.objects.filter( lastname=OuterRef("ceo__lastname"), ).values("manager"), ), ).filter(ceo_manager=max_manager) self.assertEqual(qs.get(), self.gmbh) def test_aggregate_subquery_annotation(self): with self.assertNumQueries(1) as ctx: aggregate = Company.objects.annotate( ceo_salary=Subquery( Employee.objects.filter( id=OuterRef("ceo_id"), ).values("salary") ), ).aggregate( ceo_salary_gt_20=Count("pk", filter=Q(ceo_salary__gt=20)), ) self.assertEqual(aggregate, {"ceo_salary_gt_20": 1}) # Aggregation over a subquery annotation doesn't annotate the subquery # twice in the inner query. sql = ctx.captured_queries[0]["sql"] self.assertLessEqual(sql.count("SELECT"), 3) # GROUP BY isn't required to aggregate over a query that doesn't # contain nested aggregates. self.assertNotIn("GROUP BY", sql) @skipUnlessDBFeature("supports_over_clause") def test_aggregate_rawsql_annotation(self): with self.assertNumQueries(1) as ctx: aggregate = Company.objects.annotate( salary=RawSQL("SUM(num_chairs) OVER (ORDER BY num_employees)", []), ).aggregate( count=Count("pk"), ) self.assertEqual(aggregate, {"count": 3}) sql = ctx.captured_queries[0]["sql"] self.assertNotIn("GROUP BY", sql) def test_explicit_output_field(self): class FuncA(Func): output_field = CharField() class FuncB(Func): pass expr = FuncB(FuncA()) self.assertEqual(expr.output_field, FuncA.output_field) def test_outerref_mixed_case_table_name(self): inner = Result.objects.filter(result_time__gte=OuterRef("experiment__assigned")) outer = Result.objects.filter(pk__in=Subquery(inner.values("pk"))) self.assertFalse(outer.exists()) def test_outerref_with_operator(self): inner = Company.objects.filter(num_employees=OuterRef("ceo__salary") + 2) outer = Company.objects.filter(pk__in=Subquery(inner.values("pk"))) self.assertEqual(outer.get().name, "Test GmbH") def test_nested_outerref_with_function(self): self.gmbh.point_of_contact = Employee.objects.get(lastname="Meyer") self.gmbh.save() inner = Employee.objects.filter( lastname__startswith=Left(OuterRef(OuterRef("lastname")), 1), ) qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( point_of_contact__in=inner, ceo__pk=OuterRef("pk"), ).values("name"), ), ).filter(ceo_company__isnull=False) self.assertEqual(qs.get().ceo_company, "Test GmbH") def test_annotation_with_outerref(self): gmbh_salary = Company.objects.annotate( max_ceo_salary_raise=Subquery( Company.objects.annotate( salary_raise=OuterRef("num_employees") + F("num_employees"), ) .order_by("-salary_raise") .values("salary_raise")[:1], output_field=IntegerField(), ), ).get(pk=self.gmbh.pk) self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332) def test_annotation_with_nested_outerref(self): self.gmbh.point_of_contact = Employee.objects.get(lastname="Meyer") self.gmbh.save() inner = Employee.objects.annotate( outer_lastname=OuterRef(OuterRef("lastname")), ).filter(lastname__startswith=Left("outer_lastname", 1)) qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( point_of_contact__in=inner, ceo__pk=OuterRef("pk"), ).values("name"), ), ).filter(ceo_company__isnull=False) self.assertEqual(qs.get().ceo_company, "Test GmbH") def test_pickle_expression(self): expr = Value(1) expr.convert_value # populate cached property self.assertEqual(pickle.loads(pickle.dumps(expr)), expr) def test_incorrect_field_in_F_expression(self): with self.assertRaisesMessage( FieldError, "Cannot resolve keyword 'nope' into field." ): list(Employee.objects.filter(firstname=F("nope"))) def test_incorrect_joined_field_in_F_expression(self): with self.assertRaisesMessage( FieldError, "Cannot resolve keyword 'nope' into field." ): list(Company.objects.filter(ceo__pk=F("point_of_contact__nope"))) def test_exists_in_filter(self): inner = Company.objects.filter(ceo=OuterRef("pk")).values("pk") qs1 = Employee.objects.filter(Exists(inner)) qs2 = Employee.objects.annotate(found=Exists(inner)).filter(found=True) self.assertCountEqual(qs1, qs2) self.assertFalse(Employee.objects.exclude(Exists(inner)).exists()) self.assertCountEqual(qs2, Employee.objects.exclude(~Exists(inner))) def test_subquery_in_filter(self): inner = Company.objects.filter(ceo=OuterRef("pk")).values("based_in_eu") self.assertSequenceEqual( Employee.objects.filter(Subquery(inner)), [self.foobar_ltd.ceo], ) def test_subquery_group_by_outerref_in_filter(self): inner = ( Company.objects.annotate( employee=OuterRef("pk"), ) .values("employee") .annotate( min_num_chairs=Min("num_chairs"), ) .values("ceo") ) self.assertIs(Employee.objects.filter(pk__in=Subquery(inner)).exists(), True) def test_case_in_filter_if_boolean_output_field(self): is_ceo = Company.objects.filter(ceo=OuterRef("pk")) is_poc = Company.objects.filter(point_of_contact=OuterRef("pk")) qs = Employee.objects.filter( Case( When(Exists(is_ceo), then=True), When(Exists(is_poc), then=True), default=False, output_field=BooleanField(), ), ) self.assertCountEqual(qs, [self.example_inc.ceo, self.foobar_ltd.ceo, self.max]) def test_boolean_expression_combined(self): is_ceo = Company.objects.filter(ceo=OuterRef("pk")) is_poc = Company.objects.filter(point_of_contact=OuterRef("pk")) self.gmbh.point_of_contact = self.max self.gmbh.save() self.assertCountEqual( Employee.objects.filter(Exists(is_ceo) | Exists(is_poc)), [self.example_inc.ceo, self.foobar_ltd.ceo, self.max], ) self.assertCountEqual( Employee.objects.filter(Exists(is_ceo) & Exists(is_poc)), [self.max], ) self.assertCountEqual( Employee.objects.filter(Exists(is_ceo) & Q(salary__gte=30)), [self.max], ) self.assertCountEqual( Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)), [self.example_inc.ceo, self.max], ) self.assertCountEqual( Employee.objects.filter(Q(salary__gte=30) & Exists(is_ceo)), [self.max], ) self.assertCountEqual( Employee.objects.filter(Q(salary__lt=15) | Exists(is_poc)), [self.example_inc.ceo, self.max], ) def test_boolean_expression_combined_with_empty_Q(self): is_poc = Company.objects.filter(point_of_contact=OuterRef("pk")) self.gmbh.point_of_contact = self.max self.gmbh.save() tests = [ Exists(is_poc) & Q(), Q() & Exists(is_poc), Exists(is_poc) | Q(), Q() | Exists(is_poc), Q(Exists(is_poc)) & Q(), Q() & Q(Exists(is_poc)), Q(Exists(is_poc)) | Q(), Q() | Q(Exists(is_poc)), ] for conditions in tests: with self.subTest(conditions): self.assertCountEqual(Employee.objects.filter(conditions), [self.max]) def test_boolean_expression_in_Q(self): is_poc = Company.objects.filter(point_of_contact=OuterRef("pk")) self.gmbh.point_of_contact = self.max self.gmbh.save() self.assertCountEqual(Employee.objects.filter(Q(Exists(is_poc))), [self.max]) class IterableLookupInnerExpressionsTests(TestCase): @classmethod def setUpTestData(cls): ceo = Employee.objects.create(firstname="Just", lastname="Doit", salary=30) # MySQL requires that the values calculated for expressions don't pass # outside of the field's range, so it's inconvenient to use the values # in the more general tests. cls.c5020 = Company.objects.create( name="5020 Ltd", num_employees=50, num_chairs=20, ceo=ceo ) cls.c5040 = Company.objects.create( name="5040 Ltd", num_employees=50, num_chairs=40, ceo=ceo ) cls.c5050 = Company.objects.create( name="5050 Ltd", num_employees=50, num_chairs=50, ceo=ceo ) cls.c5060 = Company.objects.create( name="5060 Ltd", num_employees=50, num_chairs=60, ceo=ceo ) cls.c99300 = Company.objects.create( name="99300 Ltd", num_employees=99, num_chairs=300, ceo=ceo ) def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self): # __in lookups can use F() expressions for integers. queryset = Company.objects.filter(num_employees__in=([F("num_chairs") - 10])) self.assertSequenceEqual(queryset, [self.c5060]) self.assertCountEqual( Company.objects.filter( num_employees__in=([F("num_chairs") - 10, F("num_chairs") + 10]) ), [self.c5040, self.c5060], ) self.assertCountEqual( Company.objects.filter( num_employees__in=( [F("num_chairs") - 10, F("num_chairs"), F("num_chairs") + 10] ) ), [self.c5040, self.c5050, self.c5060], ) def test_expressions_in_lookups_join_choice(self): midpoint = datetime.time(13, 0) t1 = Time.objects.create(time=datetime.time(12, 0)) t2 = Time.objects.create(time=datetime.time(14, 0)) s1 = SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint) SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=None, end=None, midpoint=midpoint) queryset = SimulationRun.objects.filter( midpoint__range=[F("start__time"), F("end__time")] ) self.assertSequenceEqual(queryset, [s1]) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.INNER) queryset = SimulationRun.objects.exclude( midpoint__range=[F("start__time"), F("end__time")] ) self.assertQuerySetEqual(queryset, [], ordered=False) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.LOUTER) def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self): # Range lookups can use F() expressions for integers. Company.objects.filter(num_employees__exact=F("num_chairs")) self.assertCountEqual( Company.objects.filter(num_employees__range=(F("num_chairs"), 100)), [self.c5020, self.c5040, self.c5050], ) self.assertCountEqual( Company.objects.filter( num_employees__range=(F("num_chairs") - 10, F("num_chairs") + 10) ), [self.c5040, self.c5050, self.c5060], ) self.assertCountEqual( Company.objects.filter(num_employees__range=(F("num_chairs") - 10, 100)), [self.c5020, self.c5040, self.c5050, self.c5060], ) self.assertCountEqual( Company.objects.filter(num_employees__range=(1, 100)), [self.c5020, self.c5040, self.c5050, self.c5060, self.c99300], ) def test_range_lookup_namedtuple(self): EmployeeRange = namedtuple("EmployeeRange", ["minimum", "maximum"]) qs = Company.objects.filter( num_employees__range=EmployeeRange(minimum=51, maximum=100), ) self.assertSequenceEqual(qs, [self.c99300]) @unittest.skipUnless( connection.vendor == "sqlite", "This defensive test only works on databases that don't validate parameter " "types", ) def test_expressions_not_introduce_sql_injection_via_untrusted_string_inclusion( self, ): """ This tests that SQL injection isn't possible using compilation of expressions in iterable filters, as their compilation happens before the main query compilation. It's limited to SQLite, as PostgreSQL, Oracle and other vendors have defense in depth against this by type checking. Testing against SQLite (the most permissive of the built-in databases) demonstrates that the problem doesn't exist while keeping the test simple. """ queryset = Company.objects.filter(name__in=[F("num_chairs") + "1)) OR ((1==1"]) self.assertQuerySetEqual(queryset, [], ordered=False) def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self): start = datetime.datetime(2016, 2, 3, 15, 0, 0) end = datetime.datetime(2016, 2, 5, 15, 0, 0) experiment_1 = Experiment.objects.create( name="Integrity testing", assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) experiment_2 = Experiment.objects.create( name="Taste testing", assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) r1 = Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 2, 4, 15, 0, 0), ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 3, 10, 2, 0, 0), ) Result.objects.create( experiment=experiment_2, result_time=datetime.datetime(2016, 1, 8, 5, 0, 0), ) within_experiment_time = [F("experiment__start"), F("experiment__end")] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertSequenceEqual(queryset, [r1]) class FTests(SimpleTestCase): def test_deepcopy(self): f = F("foo") g = deepcopy(f) self.assertEqual(f.name, g.name) def test_deconstruct(self): f = F("name") path, args, kwargs = f.deconstruct() self.assertEqual(path, "django.db.models.F") self.assertEqual(args, (f.name,)) self.assertEqual(kwargs, {}) def test_equal(self): f = F("name") same_f = F("name") other_f = F("username") self.assertEqual(f, same_f) self.assertNotEqual(f, other_f) def test_hash(self): d = {F("name"): "Bob"} self.assertIn(F("name"), d) self.assertEqual(d[F("name")], "Bob") def test_not_equal_Value(self): f = F("name") value = Value("name") self.assertNotEqual(f, value) self.assertNotEqual(value, f) class ExpressionsTests(TestCase): def test_F_reuse(self): f = F("id") n = Number.objects.create(integer=-1) c = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith"), ) c_qs = Company.objects.filter(id=f) self.assertEqual(c_qs.get(), c) # Reuse the same F-object for another queryset n_qs = Number.objects.filter(id=f) self.assertEqual(n_qs.get(), n) # The original query still works correctly self.assertEqual(c_qs.get(), c) def test_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a pattern lookup with an expression refs #16731 """ Employee.objects.bulk_create( [ Employee(firstname="Johnny", lastname="%John"), Employee(firstname="Jean-Claude", lastname="Claud_"), Employee(firstname="Jean-Claude", lastname="Claude%"), Employee(firstname="Johnny", lastname="Joh\\n"), Employee(firstname="Johnny", lastname="_ohn"), ] ) claude = Employee.objects.create(firstname="Jean-Claude", lastname="Claude") john = Employee.objects.create(firstname="Johnny", lastname="John") john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%Joh\\n") self.assertCountEqual( Employee.objects.filter(firstname__contains=F("lastname")), [john_sign, john, claude], ) self.assertCountEqual( Employee.objects.filter(firstname__startswith=F("lastname")), [john_sign, john], ) self.assertSequenceEqual( Employee.objects.filter(firstname__endswith=F("lastname")), [claude], ) def test_insensitive_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a case insensitive pattern lookup with an expression -- refs #16731 """ Employee.objects.bulk_create( [ Employee(firstname="Johnny", lastname="%john"), Employee(firstname="Jean-Claude", lastname="claud_"), Employee(firstname="Jean-Claude", lastname="claude%"), Employee(firstname="Johnny", lastname="joh\\n"), Employee(firstname="Johnny", lastname="_ohn"), ] ) claude = Employee.objects.create(firstname="Jean-Claude", lastname="claude") john = Employee.objects.create(firstname="Johnny", lastname="john") john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%joh\\n") self.assertCountEqual( Employee.objects.filter(firstname__icontains=F("lastname")), [john_sign, john, claude], ) self.assertCountEqual( Employee.objects.filter(firstname__istartswith=F("lastname")), [john_sign, john], ) self.assertSequenceEqual( Employee.objects.filter(firstname__iendswith=F("lastname")), [claude], ) @isolate_apps("expressions") class SimpleExpressionTests(SimpleTestCase): def test_equal(self): self.assertEqual(Expression(), Expression()) self.assertEqual( Expression(IntegerField()), Expression(output_field=IntegerField()) ) self.assertEqual(Expression(IntegerField()), mock.ANY) self.assertNotEqual(Expression(IntegerField()), Expression(CharField())) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( Expression(TestModel._meta.get_field("field")), Expression(TestModel._meta.get_field("other_field")), ) def test_hash(self): self.assertEqual(hash(Expression()), hash(Expression())) self.assertEqual( hash(Expression(IntegerField())), hash(Expression(output_field=IntegerField())), ) self.assertNotEqual( hash(Expression(IntegerField())), hash(Expression(CharField())), ) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( hash(Expression(TestModel._meta.get_field("field"))), hash(Expression(TestModel._meta.get_field("other_field"))), ) class ExpressionsNumericTests(TestCase): @classmethod def setUpTestData(cls): Number(integer=-1).save() Number(integer=42).save() Number(integer=1337).save() Number.objects.update(float=F("integer")) def test_fill_with_value_from_same_object(self): """ We can fill a value in all objects with an other value of the same object. """ self.assertQuerySetEqual( Number.objects.all(), [(-1, -1), (42, 42), (1337, 1337)], lambda n: (n.integer, round(n.float)), ordered=False, ) def test_increment_value(self): """ We can increment a value of all objects in a query set. """ self.assertEqual( Number.objects.filter(integer__gt=0).update(integer=F("integer") + 1), 2 ) self.assertQuerySetEqual( Number.objects.all(), [(-1, -1), (43, 42), (1338, 1337)], lambda n: (n.integer, round(n.float)), ordered=False, ) def test_filter_not_equals_other_field(self): """ We can filter for objects, where a value is not equals the value of an other field. """ self.assertEqual( Number.objects.filter(integer__gt=0).update(integer=F("integer") + 1), 2 ) self.assertQuerySetEqual( Number.objects.exclude(float=F("integer")), [(43, 42), (1338, 1337)], lambda n: (n.integer, round(n.float)), ordered=False, ) def test_filter_decimal_expression(self): obj = Number.objects.create(integer=0, float=1, decimal_value=Decimal("1")) qs = Number.objects.annotate( x=ExpressionWrapper(Value(1), output_field=DecimalField()), ).filter(Q(x=1, integer=0) & Q(x=Decimal("1"))) self.assertSequenceEqual(qs, [obj]) def test_complex_expressions(self): """ Complex expressions of different connection types are possible. """ n = Number.objects.create(integer=10, float=123.45) self.assertEqual( Number.objects.filter(pk=n.pk).update(float=F("integer") + F("float") * 2), 1, ) self.assertEqual(Number.objects.get(pk=n.pk).integer, 10) self.assertEqual( Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3) ) def test_decimal_expression(self): n = Number.objects.create(integer=1, decimal_value=Decimal("0.5")) n.decimal_value = F("decimal_value") - Decimal("0.4") n.save() n.refresh_from_db() self.assertEqual(n.decimal_value, Decimal("0.1")) class ExpressionOperatorTests(TestCase): @classmethod def setUpTestData(cls): cls.n = Number.objects.create(integer=42, float=15.5) cls.n1 = Number.objects.create(integer=-42, float=-15.5) def test_lefthand_addition(self): # LH Addition of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F("integer") + 15, float=F("float") + 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3) ) def test_lefthand_subtraction(self): # LH Subtraction of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F("integer") - 15, float=F("float") - 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3) ) def test_lefthand_multiplication(self): # Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F("integer") * 15, float=F("float") * 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3) ) def test_lefthand_division(self): # LH Division of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F("integer") / 2, float=F("float") / 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3) ) def test_lefthand_modulo(self): # LH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=F("integer") % 20) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2) def test_lefthand_modulo_null(self): # LH Modulo arithmetic on integers. Employee.objects.create(firstname="John", lastname="Doe", salary=None) qs = Employee.objects.annotate(modsalary=F("salary") % 20) self.assertIsNone(qs.get().salary) def test_lefthand_bitwise_and(self): # LH Bitwise ands on integers Number.objects.filter(pk=self.n.pk).update(integer=F("integer").bitand(56)) Number.objects.filter(pk=self.n1.pk).update(integer=F("integer").bitand(-56)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64) def test_lefthand_bitwise_left_shift_operator(self): Number.objects.update(integer=F("integer").bitleftshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168) def test_lefthand_bitwise_right_shift_operator(self): Number.objects.update(integer=F("integer").bitrightshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11) def test_lefthand_bitwise_or(self): # LH Bitwise or on integers Number.objects.update(integer=F("integer").bitor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10) def test_lefthand_transformed_field_bitwise_or(self): Employee.objects.create(firstname="Max", lastname="Mustermann") with register_lookup(CharField, Length): qs = Employee.objects.annotate(bitor=F("lastname__length").bitor(48)) self.assertEqual(qs.get().bitor, 58) def test_lefthand_power(self): # LH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F("integer") ** 2, float=F("float") ** 1.5 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2) ) def test_lefthand_bitwise_xor(self): Number.objects.update(integer=F("integer").bitxor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 26) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -26) def test_lefthand_bitwise_xor_null(self): employee = Employee.objects.create(firstname="John", lastname="Doe") Employee.objects.update(salary=F("salary").bitxor(48)) employee.refresh_from_db() self.assertIsNone(employee.salary) def test_lefthand_bitwise_xor_right_null(self): employee = Employee.objects.create(firstname="John", lastname="Doe", salary=48) Employee.objects.update(salary=F("salary").bitxor(None)) employee.refresh_from_db() self.assertIsNone(employee.salary) @unittest.skipUnless( connection.vendor == "oracle", "Oracle doesn't support bitwise XOR." ) def test_lefthand_bitwise_xor_not_supported(self): msg = "Bitwise XOR is not supported in Oracle." with self.assertRaisesMessage(NotSupportedError, msg): Number.objects.update(integer=F("integer").bitxor(48)) def test_right_hand_addition(self): # Right hand operators Number.objects.filter(pk=self.n.pk).update( integer=15 + F("integer"), float=42.7 + F("float") ) # RH Addition of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3) ) def test_right_hand_subtraction(self): Number.objects.filter(pk=self.n.pk).update( integer=15 - F("integer"), float=42.7 - F("float") ) # RH Subtraction of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3) ) def test_right_hand_multiplication(self): # RH Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=15 * F("integer"), float=42.7 * F("float") ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3) ) def test_right_hand_division(self): # RH Division of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=640 / F("integer"), float=42.7 / F("float") ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3) ) def test_right_hand_modulo(self): # RH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=69 % F("integer")) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) def test_righthand_power(self): # RH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update( integer=2 ** F("integer"), float=1.5 ** F("float") ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104) self.assertEqual( Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3) ) class FTimeDeltaTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) midnight = datetime.time(0) delta0 = datetime.timedelta(0) delta1 = datetime.timedelta(microseconds=253000) delta2 = datetime.timedelta(seconds=44) delta3 = datetime.timedelta(hours=21, minutes=8) delta4 = datetime.timedelta(days=10) delta5 = datetime.timedelta(days=90) # Test data is set so that deltas and delays will be # strictly increasing. cls.deltas = [] cls.delays = [] cls.days_long = [] # e0: started same day as assigned, zero duration end = stime + delta0 cls.e0 = Experiment.objects.create( name="e0", assigned=sday, start=stime, end=end, completed=end.date(), estimated_time=delta0, ) cls.deltas.append(delta0) cls.delays.append( cls.e0.start - datetime.datetime.combine(cls.e0.assigned, midnight) ) cls.days_long.append(cls.e0.completed - cls.e0.assigned) # e1: started one day after assigned, tiny duration, data # set so that end time has no fractional seconds, which # tests an edge case on sqlite. delay = datetime.timedelta(1) end = stime + delay + delta1 e1 = Experiment.objects.create( name="e1", assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta1, ) cls.deltas.append(delta1) cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight)) cls.days_long.append(e1.completed - e1.assigned) # e2: started three days after assigned, small duration end = stime + delta2 e2 = Experiment.objects.create( name="e2", assigned=sday - datetime.timedelta(3), start=stime, end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1), ) cls.deltas.append(delta2) cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight)) cls.days_long.append(e2.completed - e2.assigned) # e3: started four days after assigned, medium duration delay = datetime.timedelta(4) end = stime + delay + delta3 e3 = Experiment.objects.create( name="e3", assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3, ) cls.deltas.append(delta3) cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight)) cls.days_long.append(e3.completed - e3.assigned) # e4: started 10 days after assignment, long duration end = stime + delta4 e4 = Experiment.objects.create( name="e4", assigned=sday - datetime.timedelta(10), start=stime, end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1), ) cls.deltas.append(delta4) cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight)) cls.days_long.append(e4.completed - e4.assigned) # e5: started a month after assignment, very long duration delay = datetime.timedelta(30) end = stime + delay + delta5 e5 = Experiment.objects.create( name="e5", assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta5, ) cls.deltas.append(delta5) cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight)) cls.days_long.append(e5.completed - e5.assigned) cls.expnames = [e.name for e in Experiment.objects.all()] def test_multiple_query_compilation(self): # Ticket #21643 queryset = Experiment.objects.filter( end__lt=F("start") + datetime.timedelta(hours=1) ) q1 = str(queryset.query) q2 = str(queryset.query) self.assertEqual(q1, q2) def test_query_clone(self): # Ticket #21643 - Crash when compiling query more than once qs = Experiment.objects.filter(end__lt=F("start") + datetime.timedelta(hours=1)) qs2 = qs.all() list(qs) list(qs2) # Intentionally no assert def test_delta_add(self): for i, delta in enumerate(self.deltas): test_set = [ e.name for e in Experiment.objects.filter(end__lt=F("start") + delta) ] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(end__lt=delta + F("start")) ] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(end__lte=F("start") + delta) ] self.assertEqual(test_set, self.expnames[: i + 1]) def test_delta_subtract(self): for i, delta in enumerate(self.deltas): test_set = [ e.name for e in Experiment.objects.filter(start__gt=F("end") - delta) ] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(start__gte=F("end") - delta) ] self.assertEqual(test_set, self.expnames[: i + 1]) def test_exclude(self): for i, delta in enumerate(self.deltas): test_set = [ e.name for e in Experiment.objects.exclude(end__lt=F("start") + delta) ] self.assertEqual(test_set, self.expnames[i:]) test_set = [ e.name for e in Experiment.objects.exclude(end__lte=F("start") + delta) ] self.assertEqual(test_set, self.expnames[i + 1 :]) def test_date_comparison(self): for i, days in enumerate(self.days_long): test_set = [ e.name for e in Experiment.objects.filter(completed__lt=F("assigned") + days) ] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(completed__lte=F("assigned") + days) ] self.assertEqual(test_set, self.expnames[: i + 1]) def test_datetime_and_durationfield_addition_with_filter(self): test_set = Experiment.objects.filter(end=F("start") + F("estimated_time")) self.assertGreater(test_set.count(), 0) self.assertEqual( [e.name for e in test_set], [ e.name for e in Experiment.objects.all() if e.end == e.start + e.estimated_time ], ) def test_datetime_and_duration_field_addition_with_annotate_and_no_output_field( self, ): test_set = Experiment.objects.annotate( estimated_end=F("start") + F("estimated_time") ) self.assertEqual( [e.estimated_end for e in test_set], [e.start + e.estimated_time for e in test_set], ) @skipUnlessDBFeature("supports_temporal_subtraction") def test_datetime_subtraction_with_annotate_and_no_output_field(self): test_set = Experiment.objects.annotate( calculated_duration=F("end") - F("start") ) self.assertEqual( [e.calculated_duration for e in test_set], [e.end - e.start for e in test_set], ) def test_mixed_comparisons1(self): for i, delay in enumerate(self.delays): test_set = [ e.name for e in Experiment.objects.filter(assigned__gt=F("start") - delay) ] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(assigned__gte=F("start") - delay) ] self.assertEqual(test_set, self.expnames[: i + 1]) def test_mixed_comparisons2(self): for i, delay in enumerate(self.delays): delay = datetime.timedelta(delay.days) test_set = [ e.name for e in Experiment.objects.filter(start__lt=F("assigned") + delay) ] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter( start__lte=F("assigned") + delay + datetime.timedelta(1) ) ] self.assertEqual(test_set, self.expnames[: i + 1]) def test_delta_update(self): for delta in self.deltas: exps = Experiment.objects.all() expected_durations = [e.duration() for e in exps] expected_starts = [e.start + delta for e in exps] expected_ends = [e.end + delta for e in exps] Experiment.objects.update(start=F("start") + delta, end=F("end") + delta) exps = Experiment.objects.all() new_starts = [e.start for e in exps] new_ends = [e.end for e in exps] new_durations = [e.duration() for e in exps] self.assertEqual(expected_starts, new_starts) self.assertEqual(expected_ends, new_ends) self.assertEqual(expected_durations, new_durations) def test_invalid_operator(self): with self.assertRaises(DatabaseError): list(Experiment.objects.filter(start=F("start") * datetime.timedelta(0))) def test_durationfield_add(self): zeros = [ e.name for e in Experiment.objects.filter(start=F("start") + F("estimated_time")) ] self.assertEqual(zeros, ["e0"]) end_less = [ e.name for e in Experiment.objects.filter(end__lt=F("start") + F("estimated_time")) ] self.assertEqual(end_less, ["e2"]) delta_math = [ e.name for e in Experiment.objects.filter( end__gte=F("start") + F("estimated_time") + datetime.timedelta(hours=1) ) ] self.assertEqual(delta_math, ["e4"]) queryset = Experiment.objects.annotate( shifted=ExpressionWrapper( F("start") + Value(None, output_field=DurationField()), output_field=DateTimeField(), ) ) self.assertIsNone(queryset.first().shifted) def test_durationfield_multiply_divide(self): Experiment.objects.update(scalar=2) tests = [ (Decimal("2"), 2), (F("scalar"), 2), (2, 2), (3.2, 3.2), ] for expr, scalar in tests: with self.subTest(expr=expr): qs = Experiment.objects.annotate( multiplied=ExpressionWrapper( expr * F("estimated_time"), output_field=DurationField(), ), divided=ExpressionWrapper( F("estimated_time") / expr, output_field=DurationField(), ), ) for experiment in qs: self.assertEqual( experiment.multiplied, experiment.estimated_time * scalar, ) self.assertEqual( experiment.divided, experiment.estimated_time / scalar, ) def test_duration_expressions(self): for delta in self.deltas: qs = Experiment.objects.annotate(duration=F("estimated_time") + delta) for obj in qs: self.assertEqual(obj.duration, obj.estimated_time + delta) @skipUnlessDBFeature("supports_temporal_subtraction") def test_date_subtraction(self): queryset = Experiment.objects.annotate( completion_duration=F("completed") - F("assigned"), ) at_least_5_days = { e.name for e in queryset.filter( completion_duration__gte=datetime.timedelta(days=5) ) } self.assertEqual(at_least_5_days, {"e3", "e4", "e5"}) at_least_120_days = { e.name for e in queryset.filter( completion_duration__gte=datetime.timedelta(days=120) ) } self.assertEqual(at_least_120_days, {"e5"}) less_than_5_days = { e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5)) } self.assertEqual(less_than_5_days, {"e0", "e1", "e2"}) queryset = Experiment.objects.annotate( difference=F("completed") - Value(None, output_field=DateField()), ) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate( shifted=ExpressionWrapper( F("completed") - Value(None, output_field=DurationField()), output_field=DateField(), ) ) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature("supports_temporal_subtraction") def test_date_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef("pk")).values("completed") queryset = Experiment.objects.annotate( difference=subquery - F("completed"), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature("supports_temporal_subtraction") def test_date_case_subtraction(self): queryset = Experiment.objects.annotate( date_case=Case( When(Q(name="e0"), then=F("completed")), output_field=DateField(), ), completed_value=Value( self.e0.completed, output_field=DateField(), ), difference=F("date_case") - F("completed_value"), ).filter(difference=datetime.timedelta()) self.assertEqual(queryset.get(), self.e0) @skipUnlessDBFeature("supports_temporal_subtraction") def test_time_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) queryset = Time.objects.annotate( difference=F("time") - Value(datetime.time(11, 15, 0)), ) self.assertEqual( queryset.get().difference, datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345), ) queryset = Time.objects.annotate( difference=F("time") - Value(None, output_field=TimeField()), ) self.assertIsNone(queryset.first().difference) queryset = Time.objects.annotate( shifted=ExpressionWrapper( F("time") - Value(None, output_field=DurationField()), output_field=TimeField(), ) ) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature("supports_temporal_subtraction") def test_time_subquery_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) subquery = Time.objects.filter(pk=OuterRef("pk")).values("time") queryset = Time.objects.annotate( difference=subquery - F("time"), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature("supports_temporal_subtraction") def test_datetime_subtraction(self): under_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__gt=F("end") - F("start")) ] self.assertEqual(under_estimate, ["e2"]) over_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__lt=F("end") - F("start")) ] self.assertEqual(over_estimate, ["e4"]) queryset = Experiment.objects.annotate( difference=F("start") - Value(None, output_field=DateTimeField()), ) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate( shifted=ExpressionWrapper( F("start") - Value(None, output_field=DurationField()), output_field=DateTimeField(), ) ) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature("supports_temporal_subtraction") def test_datetime_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef("pk")).values("start") queryset = Experiment.objects.annotate( difference=subquery - F("start"), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature("supports_temporal_subtraction") def test_datetime_subtraction_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) Experiment.objects.update(end=F("start") + delta) qs = Experiment.objects.annotate(delta=F("end") - F("start")) for e in qs: self.assertEqual(e.delta, delta) def test_duration_with_datetime(self): # Exclude e1 which has very high precision so we can test this on all # backends regardless of whether or not it supports # microsecond_precision. over_estimate = ( Experiment.objects.exclude(name="e1") .filter( completed__gt=self.stime + F("estimated_time"), ) .order_by("name") ) self.assertQuerySetEqual(over_estimate, ["e3", "e4", "e5"], lambda e: e.name) def test_duration_with_datetime_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) qs = Experiment.objects.annotate( dt=ExpressionWrapper( F("start") + delta, output_field=DateTimeField(), ) ) for e in qs: self.assertEqual(e.dt, e.start + delta) def test_date_minus_duration(self): more_than_4_days = Experiment.objects.filter( assigned__lt=F("completed") - Value(datetime.timedelta(days=4)) ) self.assertQuerySetEqual(more_than_4_days, ["e3", "e4", "e5"], lambda e: e.name) def test_negative_timedelta_update(self): # subtract 30 seconds, 30 minutes, 2 hours and 2 days experiments = ( Experiment.objects.filter(name="e0") .annotate( start_sub_seconds=F("start") + datetime.timedelta(seconds=-30), ) .annotate( start_sub_minutes=F("start_sub_seconds") + datetime.timedelta(minutes=-30), ) .annotate( start_sub_hours=F("start_sub_minutes") + datetime.timedelta(hours=-2), ) .annotate( new_start=F("start_sub_hours") + datetime.timedelta(days=-2), ) ) expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0) # subtract 30 microseconds experiments = experiments.annotate( new_start=F("new_start") + datetime.timedelta(microseconds=-30) ) expected_start += datetime.timedelta(microseconds=+746970) experiments.update(start=F("new_start")) e0 = Experiment.objects.get(name="e0") self.assertEqual(e0.start, expected_start) class ValueTests(TestCase): def test_update_TimeField_using_Value(self): Time.objects.create() Time.objects.update(time=Value(datetime.time(1), output_field=TimeField())) self.assertEqual(Time.objects.get().time, datetime.time(1)) def test_update_UUIDField_using_Value(self): UUID.objects.create() UUID.objects.update( uuid=Value( uuid.UUID("12345678901234567890123456789012"), output_field=UUIDField() ) ) self.assertEqual( UUID.objects.get().uuid, uuid.UUID("12345678901234567890123456789012") ) def test_deconstruct(self): value = Value("name") path, args, kwargs = value.deconstruct() self.assertEqual(path, "django.db.models.Value") self.assertEqual(args, (value.value,)) self.assertEqual(kwargs, {}) def test_deconstruct_output_field(self): value = Value("name", output_field=CharField()) path, args, kwargs = value.deconstruct() self.assertEqual(path, "django.db.models.Value") self.assertEqual(args, (value.value,)) self.assertEqual(len(kwargs), 1) self.assertEqual( kwargs["output_field"].deconstruct(), CharField().deconstruct() ) def test_repr(self): tests = [ (None, "Value(None)"), ("str", "Value('str')"), (True, "Value(True)"), (42, "Value(42)"), ( datetime.datetime(2019, 5, 15), "Value(datetime.datetime(2019, 5, 15, 0, 0))", ), (Decimal("3.14"), "Value(Decimal('3.14'))"), ] for value, expected in tests: with self.subTest(value=value): self.assertEqual(repr(Value(value)), expected) def test_equal(self): value = Value("name") self.assertEqual(value, Value("name")) self.assertNotEqual(value, Value("username")) def test_hash(self): d = {Value("name"): "Bob"} self.assertIn(Value("name"), d) self.assertEqual(d[Value("name")], "Bob") def test_equal_output_field(self): value = Value("name", output_field=CharField()) same_value = Value("name", output_field=CharField()) other_value = Value("name", output_field=TimeField()) no_output_field = Value("name") self.assertEqual(value, same_value) self.assertNotEqual(value, other_value) self.assertNotEqual(value, no_output_field) def test_raise_empty_expressionlist(self): msg = "ExpressionList requires at least one expression" with self.assertRaisesMessage(ValueError, msg): ExpressionList() def test_compile_unresolved(self): # This test might need to be revisited later on if #25425 is enforced. compiler = Time.objects.all().query.get_compiler(connection=connection) value = Value("foo") self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"])) value = Value("foo", output_field=CharField()) self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"])) def test_output_field_decimalfield(self): Time.objects.create() time = Time.objects.annotate(one=Value(1, output_field=DecimalField())).first() self.assertEqual(time.one, 1) def test_resolve_output_field(self): value_types = [ ("str", CharField), (True, BooleanField), (42, IntegerField), (3.14, FloatField), (datetime.date(2019, 5, 15), DateField), (datetime.datetime(2019, 5, 15), DateTimeField), (datetime.time(3, 16), TimeField), (datetime.timedelta(1), DurationField), (Decimal("3.14"), DecimalField), (b"", BinaryField), (uuid.uuid4(), UUIDField), ] for value, output_field_type in value_types: with self.subTest(type=type(value)): expr = Value(value) self.assertIsInstance(expr.output_field, output_field_type) def test_resolve_output_field_failure(self): msg = "Cannot resolve expression type, unknown output_field" with self.assertRaisesMessage(FieldError, msg): Value(object()).output_field def test_output_field_does_not_create_broken_validators(self): """ The output field for a given Value doesn't get cleaned & validated, however validators may still be instantiated for a given field type and this demonstrates that they don't throw an exception. """ value_types = [ "str", True, 42, 3.14, datetime.date(2019, 5, 15), datetime.datetime(2019, 5, 15), datetime.time(3, 16), datetime.timedelta(1), Decimal("3.14"), b"", uuid.uuid4(), ] for value in value_types: with self.subTest(type=type(value)): field = Value(value)._resolve_output_field() field.clean(value, model_instance=None) class ExistsTests(TestCase): def test_optimizations(self): with CaptureQueriesContext(connection) as context: list( Experiment.objects.values( exists=Exists( Experiment.objects.order_by("pk"), ) ).order_by() ) captured_queries = context.captured_queries self.assertEqual(len(captured_queries), 1) captured_sql = captured_queries[0]["sql"] self.assertNotIn( connection.ops.quote_name(Experiment._meta.pk.column), captured_sql, ) self.assertIn( connection.ops.limit_offset_sql(None, 1), captured_sql, ) self.assertNotIn("ORDER BY", captured_sql) def test_negated_empty_exists(self): manager = Manager.objects.create() qs = Manager.objects.filter(~Exists(Manager.objects.none()) & Q(pk=manager.pk)) self.assertSequenceEqual(qs, [manager]) def test_select_negated_empty_exists(self): manager = Manager.objects.create() qs = Manager.objects.annotate( not_exists=~Exists(Manager.objects.none()) ).filter(pk=manager.pk) self.assertSequenceEqual(qs, [manager]) self.assertIs(qs.get().not_exists, True) class FieldTransformTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) cls.ex1 = Experiment.objects.create( name="Experiment 1", assigned=sday, completed=sday + datetime.timedelta(2), estimated_time=datetime.timedelta(2), start=stime, end=stime + datetime.timedelta(2), ) def test_month_aggregation(self): self.assertEqual( Experiment.objects.aggregate(month_count=Count("assigned__month")), {"month_count": 1}, ) def test_transform_in_values(self): self.assertSequenceEqual( Experiment.objects.values("assigned__month"), [{"assigned__month": 6}], ) def test_multiple_transforms_in_values(self): self.assertSequenceEqual( Experiment.objects.values("end__date__month"), [{"end__date__month": 6}], ) class ReprTests(SimpleTestCase): def test_expressions(self): self.assertEqual( repr(Case(When(a=1))), "<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>", ) self.assertEqual( repr(When(Q(age__gte=18), then=Value("legal"))), "<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value('legal')>", ) self.assertEqual(repr(Col("alias", "field")), "Col(alias, field)") self.assertEqual(repr(F("published")), "F(published)") self.assertEqual( repr(F("cost") + F("tax")), "<CombinedExpression: F(cost) + F(tax)>" ) self.assertEqual( repr(ExpressionWrapper(F("cost") + F("tax"), IntegerField())), "ExpressionWrapper(F(cost) + F(tax))", ) self.assertEqual( repr(Func("published", function="TO_CHAR")), "Func(F(published), function=TO_CHAR)", ) self.assertEqual(repr(OrderBy(Value(1))), "OrderBy(Value(1), descending=False)") self.assertEqual(repr(RawSQL("table.col", [])), "RawSQL(table.col, [])") self.assertEqual( repr(Ref("sum_cost", Sum("cost"))), "Ref(sum_cost, Sum(F(cost)))" ) self.assertEqual(repr(Value(1)), "Value(1)") self.assertEqual( repr(ExpressionList(F("col"), F("anothercol"))), "ExpressionList(F(col), F(anothercol))", ) self.assertEqual( repr(ExpressionList(OrderBy(F("col"), descending=False))), "ExpressionList(OrderBy(F(col), descending=False))", ) def test_functions(self): self.assertEqual(repr(Coalesce("a", "b")), "Coalesce(F(a), F(b))") self.assertEqual(repr(Concat("a", "b")), "Concat(ConcatPair(F(a), F(b)))") self.assertEqual(repr(Length("a")), "Length(F(a))") self.assertEqual(repr(Lower("a")), "Lower(F(a))") self.assertEqual(repr(Substr("a", 1, 3)), "Substr(F(a), Value(1), Value(3))") self.assertEqual(repr(Upper("a")), "Upper(F(a))") def test_aggregates(self): self.assertEqual(repr(Avg("a")), "Avg(F(a))") self.assertEqual(repr(Count("a")), "Count(F(a))") self.assertEqual(repr(Count("*")), "Count('*')") self.assertEqual(repr(Max("a")), "Max(F(a))") self.assertEqual(repr(Min("a")), "Min(F(a))") self.assertEqual(repr(StdDev("a")), "StdDev(F(a), sample=False)") self.assertEqual(repr(Sum("a")), "Sum(F(a))") self.assertEqual( repr(Variance("a", sample=True)), "Variance(F(a), sample=True)" ) def test_distinct_aggregates(self): self.assertEqual(repr(Count("a", distinct=True)), "Count(F(a), distinct=True)") self.assertEqual(repr(Count("*", distinct=True)), "Count('*', distinct=True)") def test_filtered_aggregates(self): filter = Q(a=1) self.assertEqual( repr(Avg("a", filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))" ) self.assertEqual( repr(Count("a", filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))" ) self.assertEqual( repr(Max("a", filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))" ) self.assertEqual( repr(Min("a", filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))" ) self.assertEqual( repr(StdDev("a", filter=filter)), "StdDev(F(a), filter=(AND: ('a', 1)), sample=False)", ) self.assertEqual( repr(Sum("a", filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))" ) self.assertEqual( repr(Variance("a", sample=True, filter=filter)), "Variance(F(a), filter=(AND: ('a', 1)), sample=True)", ) self.assertEqual( repr(Count("a", filter=filter, distinct=True)), "Count(F(a), distinct=True, filter=(AND: ('a', 1)))", ) class CombinableTests(SimpleTestCase): bitwise_msg = ( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def test_negation(self): c = Combinable() self.assertEqual(-c, c * -1) def test_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() & Combinable() def test_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() | Combinable() def test_xor(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() ^ Combinable() def test_reversed_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() & Combinable() def test_reversed_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() | Combinable() def test_reversed_xor(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() ^ Combinable() class CombinedExpressionTests(SimpleTestCase): def test_resolve_output_field_number(self): tests = [ (IntegerField, AutoField, IntegerField), (AutoField, IntegerField, IntegerField), (IntegerField, DecimalField, DecimalField), (DecimalField, IntegerField, DecimalField), (IntegerField, FloatField, FloatField), (FloatField, IntegerField, FloatField), ] connectors = [ Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV, Combinable.MOD, ] for lhs, rhs, combined in tests: for connector in connectors: with self.subTest( lhs=lhs, connector=connector, rhs=rhs, combined=combined ): expr = CombinedExpression( Expression(lhs()), connector, Expression(rhs()), ) self.assertIsInstance(expr.output_field, combined) def test_resolve_output_field_with_null(self): def null(): return Value(None) tests = [ # Numbers. (AutoField, Combinable.ADD, null), (DecimalField, Combinable.ADD, null), (FloatField, Combinable.ADD, null), (IntegerField, Combinable.ADD, null), (IntegerField, Combinable.SUB, null), (null, Combinable.ADD, IntegerField), # Dates. (DateField, Combinable.ADD, null), (DateTimeField, Combinable.ADD, null), (DurationField, Combinable.ADD, null), (TimeField, Combinable.ADD, null), (TimeField, Combinable.SUB, null), (null, Combinable.ADD, DateTimeField), (DateField, Combinable.SUB, null), ] for lhs, connector, rhs in tests: msg = ( f"Cannot infer type of {connector!r} expression involving these types: " ) with self.subTest(lhs=lhs, connector=connector, rhs=rhs): expr = CombinedExpression( Expression(lhs()), connector, Expression(rhs()), ) with self.assertRaisesMessage(FieldError, msg): expr.output_field def test_resolve_output_field_dates(self): tests = [ # Add - same type. (DateField, Combinable.ADD, DateField, FieldError), (DateTimeField, Combinable.ADD, DateTimeField, FieldError), (TimeField, Combinable.ADD, TimeField, FieldError), (DurationField, Combinable.ADD, DurationField, DurationField), # Add - different type. (DateField, Combinable.ADD, DurationField, DateTimeField), (DateTimeField, Combinable.ADD, DurationField, DateTimeField), (TimeField, Combinable.ADD, DurationField, TimeField), (DurationField, Combinable.ADD, DateField, DateTimeField), (DurationField, Combinable.ADD, DateTimeField, DateTimeField), (DurationField, Combinable.ADD, TimeField, TimeField), # Subtract - same type. (DateField, Combinable.SUB, DateField, DurationField), (DateTimeField, Combinable.SUB, DateTimeField, DurationField), (TimeField, Combinable.SUB, TimeField, DurationField), (DurationField, Combinable.SUB, DurationField, DurationField), # Subtract - different type. (DateField, Combinable.SUB, DurationField, DateTimeField), (DateTimeField, Combinable.SUB, DurationField, DateTimeField), (TimeField, Combinable.SUB, DurationField, TimeField), (DurationField, Combinable.SUB, DateField, FieldError), (DurationField, Combinable.SUB, DateTimeField, FieldError), (DurationField, Combinable.SUB, DateTimeField, FieldError), ] for lhs, connector, rhs, combined in tests: msg = ( f"Cannot infer type of {connector!r} expression involving these types: " ) with self.subTest(lhs=lhs, connector=connector, rhs=rhs, combined=combined): expr = CombinedExpression( Expression(lhs()), connector, Expression(rhs()), ) if issubclass(combined, Exception): with self.assertRaisesMessage(combined, msg): expr.output_field else: self.assertIsInstance(expr.output_field, combined) def test_mixed_char_date_with_annotate(self): queryset = Experiment.objects.annotate(nonsense=F("name") + F("assigned")) msg = ( "Cannot infer type of '+' expression involving these types: CharField, " "DateField. You must set output_field." ) with self.assertRaisesMessage(FieldError, msg): list(queryset) class ExpressionWrapperTests(SimpleTestCase): def test_empty_group_by(self): expr = ExpressionWrapper(Value(3), output_field=IntegerField()) self.assertEqual(expr.get_group_by_cols(), []) def test_non_empty_group_by(self): value = Value("f") value.output_field = None expr = ExpressionWrapper(Lower(value), output_field=IntegerField()) group_by_cols = expr.get_group_by_cols() self.assertEqual(group_by_cols, [expr.expression]) self.assertEqual(group_by_cols[0].output_field, expr.output_field) class NegatedExpressionTests(TestCase): @classmethod def setUpTestData(cls): ceo = Employee.objects.create(firstname="Joe", lastname="Smith", salary=10) cls.eu_company = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=ceo, based_in_eu=True, ) cls.non_eu_company = Company.objects.create( name="Foobar Ltd.", num_employees=3, num_chairs=4, ceo=ceo, based_in_eu=False, ) def test_invert(self): f = F("field") self.assertEqual(~f, NegatedExpression(f)) self.assertIsNot(~~f, f) self.assertEqual(~~f, f) def test_filter(self): self.assertSequenceEqual( Company.objects.filter(~F("based_in_eu")), [self.non_eu_company], ) qs = Company.objects.annotate(eu_required=~Value(False)) self.assertSequenceEqual( qs.filter(based_in_eu=F("eu_required")).order_by("eu_required"), [self.eu_company], ) self.assertSequenceEqual( qs.filter(based_in_eu=~~F("eu_required")), [self.eu_company], ) self.assertSequenceEqual( qs.filter(based_in_eu=~F("eu_required")), [self.non_eu_company], ) self.assertSequenceEqual(qs.filter(based_in_eu=~F("based_in_eu")), []) def test_values(self): self.assertSequenceEqual( Company.objects.annotate(negated=~F("based_in_eu")) .values_list("name", "negated") .order_by("name"), [("Example Inc.", False), ("Foobar Ltd.", True)], ) class OrderByTests(SimpleTestCase): def test_equal(self): self.assertEqual( OrderBy(F("field"), nulls_last=True), OrderBy(F("field"), nulls_last=True), ) self.assertNotEqual( OrderBy(F("field"), nulls_last=True), OrderBy(F("field")), ) def test_hash(self): self.assertEqual( hash(OrderBy(F("field"), nulls_last=True)), hash(OrderBy(F("field"), nulls_last=True)), ) self.assertNotEqual( hash(OrderBy(F("field"), nulls_last=True)), hash(OrderBy(F("field"))), ) def test_nulls_false(self): # These tests will catch ValueError in Django 5.0 when passing False to # nulls_first and nulls_last becomes forbidden. # msg = "nulls_first and nulls_last values must be True or None." msg = ( "Passing nulls_first=False or nulls_last=False is deprecated, use None " "instead." ) with self.assertRaisesMessage(RemovedInDjango50Warning, msg): OrderBy(F("field"), nulls_first=False) with self.assertRaisesMessage(RemovedInDjango50Warning, msg): OrderBy(F("field"), nulls_last=False) with self.assertRaisesMessage(RemovedInDjango50Warning, msg): F("field").asc(nulls_first=False) with self.assertRaisesMessage(RemovedInDjango50Warning, msg): F("field").desc(nulls_last=False)
1998b764f73781389c85a18df28bf24a6895b281d64ea0932f093fb23c250e52
from django.test import TestCase from .models import SimpleModel class AsyncModelOperationTest(TestCase): @classmethod def setUpTestData(cls): cls.s1 = SimpleModel.objects.create(field=0) async def test_asave(self): self.s1.field = 10 await self.s1.asave() refetched = await SimpleModel.objects.aget() self.assertEqual(refetched.field, 10) async def test_adelete(self): await self.s1.adelete() count = await SimpleModel.objects.acount() self.assertEqual(count, 0) async def test_arefresh_from_db(self): await SimpleModel.objects.filter(pk=self.s1.pk).aupdate(field=20) await self.s1.arefresh_from_db() self.assertEqual(self.s1.field, 20)
8da1408f1a98e422a4ca7daf6ce79ff0999f63f0ab8a0aaff312ce1664a27372
import json import xml.etree.ElementTree from datetime import datetime from asgiref.sync import async_to_sync, sync_to_async from django.db import NotSupportedError, connection from django.db.models import Sum from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from .models import SimpleModel class AsyncQuerySetTest(TestCase): @classmethod def setUpTestData(cls): cls.s1 = SimpleModel.objects.create( field=1, created=datetime(2022, 1, 1, 0, 0, 0), ) cls.s2 = SimpleModel.objects.create( field=2, created=datetime(2022, 1, 1, 0, 0, 1), ) cls.s3 = SimpleModel.objects.create( field=3, created=datetime(2022, 1, 1, 0, 0, 2), ) @staticmethod def _get_db_feature(connection_, feature_name): # Wrapper to avoid accessing connection attributes until inside # coroutine function. Connection access is thread sensitive and cannot # be passed across sync/async boundaries. return getattr(connection_.features, feature_name) async def test_async_iteration(self): results = [] async for m in SimpleModel.objects.order_by("pk"): results.append(m) self.assertEqual(results, [self.s1, self.s2, self.s3]) async def test_aiterator(self): qs = SimpleModel.objects.aiterator() results = [] async for m in qs: results.append(m) self.assertCountEqual(results, [self.s1, self.s2, self.s3]) async def test_aiterator_prefetch_related(self): qs = SimpleModel.objects.prefetch_related("relatedmodels").aiterator() msg = "Using QuerySet.aiterator() after prefetch_related() is not supported." with self.assertRaisesMessage(NotSupportedError, msg): async for m in qs: pass async def test_aiterator_invalid_chunk_size(self): msg = "Chunk size must be strictly positive." for size in [0, -1]: qs = SimpleModel.objects.aiterator(chunk_size=size) with self.subTest(size=size), self.assertRaisesMessage(ValueError, msg): async for m in qs: pass async def test_acount(self): count = await SimpleModel.objects.acount() self.assertEqual(count, 3) async def test_acount_cached_result(self): qs = SimpleModel.objects.all() # Evaluate the queryset to populate the query cache. [x async for x in qs] count = await qs.acount() self.assertEqual(count, 3) await sync_to_async(SimpleModel.objects.create)( field=4, created=datetime(2022, 1, 1, 0, 0, 0), ) # The query cache is used. count = await qs.acount() self.assertEqual(count, 3) async def test_aget(self): instance = await SimpleModel.objects.aget(field=1) self.assertEqual(instance, self.s1) async def test_acreate(self): await SimpleModel.objects.acreate(field=4) self.assertEqual(await SimpleModel.objects.acount(), 4) async def test_aget_or_create(self): instance, created = await SimpleModel.objects.aget_or_create(field=4) self.assertEqual(await SimpleModel.objects.acount(), 4) self.assertIs(created, True) async def test_aupdate_or_create(self): instance, created = await SimpleModel.objects.aupdate_or_create( id=self.s1.id, defaults={"field": 2} ) self.assertEqual(instance, self.s1) self.assertIs(created, False) instance, created = await SimpleModel.objects.aupdate_or_create(field=4) self.assertEqual(await SimpleModel.objects.acount(), 4) self.assertIs(created, True) @skipUnlessDBFeature("has_bulk_insert") @async_to_sync async def test_abulk_create(self): instances = [SimpleModel(field=i) for i in range(10)] qs = await SimpleModel.objects.abulk_create(instances) self.assertEqual(len(qs), 10) @skipUnlessDBFeature("has_bulk_insert", "supports_update_conflicts") @skipIfDBFeature("supports_update_conflicts_with_target") @async_to_sync async def test_update_conflicts_unique_field_unsupported(self): msg = ( "This database backend does not support updating conflicts with specifying " "unique fields that can trigger the upsert." ) with self.assertRaisesMessage(NotSupportedError, msg): await SimpleModel.objects.abulk_create( [SimpleModel(field=1), SimpleModel(field=2)], update_conflicts=True, update_fields=["field"], unique_fields=["created"], ) async def test_abulk_update(self): instances = SimpleModel.objects.all() async for instance in instances: instance.field = instance.field * 10 await SimpleModel.objects.abulk_update(instances, ["field"]) qs = [(o.pk, o.field) async for o in SimpleModel.objects.all()] self.assertCountEqual( qs, [(self.s1.pk, 10), (self.s2.pk, 20), (self.s3.pk, 30)], ) async def test_ain_bulk(self): res = await SimpleModel.objects.ain_bulk() self.assertEqual( res, {self.s1.pk: self.s1, self.s2.pk: self.s2, self.s3.pk: self.s3}, ) res = await SimpleModel.objects.ain_bulk([self.s2.pk]) self.assertEqual(res, {self.s2.pk: self.s2}) res = await SimpleModel.objects.ain_bulk([self.s2.pk], field_name="id") self.assertEqual(res, {self.s2.pk: self.s2}) async def test_alatest(self): instance = await SimpleModel.objects.alatest("created") self.assertEqual(instance, self.s3) instance = await SimpleModel.objects.alatest("-created") self.assertEqual(instance, self.s1) async def test_aearliest(self): instance = await SimpleModel.objects.aearliest("created") self.assertEqual(instance, self.s1) instance = await SimpleModel.objects.aearliest("-created") self.assertEqual(instance, self.s3) async def test_afirst(self): instance = await SimpleModel.objects.afirst() self.assertEqual(instance, self.s1) instance = await SimpleModel.objects.filter(field=4).afirst() self.assertIsNone(instance) async def test_alast(self): instance = await SimpleModel.objects.alast() self.assertEqual(instance, self.s3) instance = await SimpleModel.objects.filter(field=4).alast() self.assertIsNone(instance) async def test_aaggregate(self): total = await SimpleModel.objects.aaggregate(total=Sum("field")) self.assertEqual(total, {"total": 6}) async def test_aexists(self): check = await SimpleModel.objects.filter(field=1).aexists() self.assertIs(check, True) check = await SimpleModel.objects.filter(field=4).aexists() self.assertIs(check, False) async def test_acontains(self): check = await SimpleModel.objects.acontains(self.s1) self.assertIs(check, True) # Unsaved instances are not allowed, so use an ID known not to exist. check = await SimpleModel.objects.acontains( SimpleModel(id=self.s3.id + 1, field=4) ) self.assertIs(check, False) async def test_aupdate(self): await SimpleModel.objects.aupdate(field=99) qs = [o async for o in SimpleModel.objects.all()] values = [instance.field for instance in qs] self.assertEqual(set(values), {99}) async def test_adelete(self): await SimpleModel.objects.filter(field=2).adelete() qs = [o async for o in SimpleModel.objects.all()] self.assertCountEqual(qs, [self.s1, self.s3]) @skipUnlessDBFeature("supports_explaining_query_execution") @async_to_sync async def test_aexplain(self): supported_formats = await sync_to_async(self._get_db_feature)( connection, "supported_explain_formats" ) all_formats = (None, *supported_formats) for format_ in all_formats: with self.subTest(format=format_): # TODO: Check the captured query when async versions of # self.assertNumQueries/CaptureQueriesContext context # processors are available. result = await SimpleModel.objects.filter(field=1).aexplain( format=format_ ) self.assertIsInstance(result, str) self.assertTrue(result) if not format_: continue if format_.lower() == "xml": try: xml.etree.ElementTree.fromstring(result) except xml.etree.ElementTree.ParseError as e: self.fail(f"QuerySet.aexplain() result is not valid XML: {e}") elif format_.lower() == "json": try: json.loads(result) except json.JSONDecodeError as e: self.fail(f"QuerySet.aexplain() result is not valid JSON: {e}") async def test_raw(self): sql = "SELECT id, field FROM async_simplemodel WHERE created=%s" qs = SimpleModel.objects.raw(sql, [self.s1.created]) self.assertEqual([o async for o in qs], [self.s1])
72303f6f5f1e399ca53ed6790d1a9d32be141807f890193d5b21b9f1b1c306a7
from operator import attrgetter from django.core.exceptions import FieldError, ValidationError from django.db import connection, models from django.db.models.query_utils import DeferredAttribute from django.test import SimpleTestCase, TestCase from django.test.utils import CaptureQueriesContext, isolate_apps from .models import ( Base, Chef, CommonInfo, CustomSupplier, GrandChild, GrandParent, ItalianRestaurant, MixinModel, Parent, ParkingLot, Place, Post, Restaurant, Student, SubBase, Supplier, Title, Worker, ) class ModelInheritanceTests(TestCase): def test_abstract(self): # The Student and Worker models both have 'name' and 'age' fields on # them and inherit the __str__() method, just as with normal Python # subclassing. This is useful if you want to factor out common # information for programming purposes, but still completely # independent separate models at the database level. w1 = Worker.objects.create(name="Fred", age=35, job="Quarry worker") Worker.objects.create(name="Barney", age=34, job="Quarry worker") s = Student.objects.create(name="Pebbles", age=5, school_class="1B") self.assertEqual(str(w1), "Worker Fred") self.assertEqual(str(s), "Student Pebbles") # The children inherit the Meta class of their parents (if they don't # specify their own). self.assertSequenceEqual( Worker.objects.values("name"), [ {"name": "Barney"}, {"name": "Fred"}, ], ) # Since Student does not subclass CommonInfo's Meta, it has the effect # of completely overriding it. So ordering by name doesn't take place # for Students. self.assertEqual(Student._meta.ordering, []) # However, the CommonInfo class cannot be used as a normal model (it # doesn't exist as a model). with self.assertRaisesMessage( AttributeError, "'CommonInfo' has no attribute 'objects'" ): CommonInfo.objects.all() def test_reverse_relation_for_different_hierarchy_tree(self): # Even though p.supplier for a Place 'p' (a parent of a Supplier), a # Restaurant object cannot access that reverse relation, since it's not # part of the Place-Supplier Hierarchy. self.assertSequenceEqual(Place.objects.filter(supplier__name="foo"), []) msg = ( "Cannot resolve keyword 'supplier' into field. Choices are: " "address, chef, chef_id, id, italianrestaurant, lot, name, " "place_ptr, place_ptr_id, provider, rating, serves_hot_dogs, serves_pizza" ) with self.assertRaisesMessage(FieldError, msg): Restaurant.objects.filter(supplier__name="foo") def test_model_with_distinct_accessors(self): # The Post model has distinct accessors for the Comment and Link models. post = Post.objects.create(title="Lorem Ipsum") post.attached_comment_set.create(content="Save $ on V1agr@", is_spam=True) post.attached_link_set.create( content="The web framework for perfections with deadlines.", url="http://www.djangoproject.com/", ) # The Post model doesn't have an attribute called # 'attached_%(class)s_set'. msg = "'Post' object has no attribute 'attached_%(class)s_set'" with self.assertRaisesMessage(AttributeError, msg): getattr(post, "attached_%(class)s_set") def test_model_with_distinct_related_query_name(self): self.assertSequenceEqual( Post.objects.filter(attached_model_inheritance_comments__is_spam=True), [] ) # The Post model doesn't have a related query accessor based on # related_name (attached_comment_set). msg = "Cannot resolve keyword 'attached_comment_set' into field." with self.assertRaisesMessage(FieldError, msg): Post.objects.filter(attached_comment_set__is_spam=True) def test_meta_fields_and_ordering(self): # Make sure Restaurant and ItalianRestaurant have the right fields in # the right order. self.assertEqual( [f.name for f in Restaurant._meta.fields], [ "id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef", ], ) self.assertEqual( [f.name for f in ItalianRestaurant._meta.fields], [ "id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef", "restaurant_ptr", "serves_gnocchi", ], ) self.assertEqual(Restaurant._meta.ordering, ["-rating"]) def test_custompk_m2m(self): b = Base.objects.create() b.titles.add(Title.objects.create(title="foof")) s = SubBase.objects.create(sub_id=b.id) b = Base.objects.get(pk=s.id) self.assertNotEqual(b.pk, s.pk) # Low-level test for related_val self.assertEqual(s.titles.related_val, (s.id,)) # Higher level test for correct query values (title foof not # accidentally found). self.assertSequenceEqual(s.titles.all(), []) def test_update_parent_filtering(self): """ Updating a field of a model subclass doesn't issue an UPDATE query constrained by an inner query (#10399). """ supplier = Supplier.objects.create( name="Central market", address="610 some street", ) # Capture the expected query in a database agnostic way with CaptureQueriesContext(connection) as captured_queries: Place.objects.filter(pk=supplier.pk).update(name=supplier.name) expected_sql = captured_queries[0]["sql"] # Capture the queries executed when a subclassed model instance is saved. with CaptureQueriesContext(connection) as captured_queries: supplier.save(update_fields=("name",)) for query in captured_queries: sql = query["sql"] if "UPDATE" in sql: self.assertEqual(expected_sql, sql) def test_create_child_no_update(self): """Creating a child with non-abstract parents only issues INSERTs.""" def a(): GrandChild.objects.create( email="[email protected]", first_name="grand", last_name="parent", ) def b(): GrandChild().save() for i, test in enumerate([a, b]): with self.subTest(i=i), self.assertNumQueries(4), CaptureQueriesContext( connection ) as queries: test() for query in queries: sql = query["sql"] self.assertIn("INSERT INTO", sql, sql) def test_create_copy_with_inherited_m2m(self): restaurant = Restaurant.objects.create() supplier = CustomSupplier.objects.create( name="Central market", address="944 W. Fullerton" ) supplier.customers.set([restaurant]) old_customers = supplier.customers.all() supplier.pk = None supplier.id = None supplier._state.adding = True supplier.save() supplier.customers.set(old_customers) supplier = Supplier.objects.get(pk=supplier.pk) self.assertCountEqual(supplier.customers.all(), old_customers) self.assertSequenceEqual(supplier.customers.all(), [restaurant]) def test_eq(self): # Equality doesn't transfer in multitable inheritance. self.assertNotEqual(Place(id=1), Restaurant(id=1)) self.assertNotEqual(Restaurant(id=1), Place(id=1)) def test_mixin_init(self): m = MixinModel() self.assertEqual(m.other_attr, 1) @isolate_apps("model_inheritance") def test_abstract_parent_link(self): class A(models.Model): pass class B(A): a = models.OneToOneField("A", parent_link=True, on_delete=models.CASCADE) class Meta: abstract = True class C(B): pass self.assertIs(C._meta.parents[A], C._meta.get_field("a")) @isolate_apps("model_inheritance") def test_init_subclass(self): saved_kwargs = {} class A(models.Model): def __init_subclass__(cls, **kwargs): super().__init_subclass__() saved_kwargs.update(kwargs) kwargs = {"x": 1, "y": 2, "z": 3} class B(A, **kwargs): pass self.assertEqual(saved_kwargs, kwargs) @isolate_apps("model_inheritance") def test_set_name(self): class ClassAttr: called = None def __set_name__(self_, owner, name): self.assertIsNone(self_.called) self_.called = (owner, name) class A(models.Model): attr = ClassAttr() self.assertEqual(A.attr.called, (A, "attr")) def test_inherited_ordering_pk_desc(self): p1 = Parent.objects.create(first_name="Joe", email="[email protected]") p2 = Parent.objects.create(first_name="Jon", email="[email protected]") expected_order_by_sql = "ORDER BY %s.%s DESC" % ( connection.ops.quote_name(Parent._meta.db_table), connection.ops.quote_name(Parent._meta.get_field("grandparent_ptr").column), ) qs = Parent.objects.all() self.assertSequenceEqual(qs, [p2, p1]) self.assertIn(expected_order_by_sql, str(qs.query)) def test_queryset_class_getitem(self): self.assertIs(models.QuerySet[Post], models.QuerySet) self.assertIs(models.QuerySet[Post, Post], models.QuerySet) self.assertIs(models.QuerySet[Post, int, str], models.QuerySet) def test_shadow_parent_attribute_with_field(self): class ScalarParent(models.Model): foo = 1 class ScalarOverride(ScalarParent): foo = models.IntegerField() self.assertEqual(type(ScalarOverride.foo), DeferredAttribute) def test_shadow_parent_property_with_field(self): class PropertyParent(models.Model): @property def foo(self): pass class PropertyOverride(PropertyParent): foo = models.IntegerField() self.assertEqual(type(PropertyOverride.foo), DeferredAttribute) def test_shadow_parent_method_with_field(self): class MethodParent(models.Model): def foo(self): pass class MethodOverride(MethodParent): foo = models.IntegerField() self.assertEqual(type(MethodOverride.foo), DeferredAttribute) class ModelInheritanceDataTests(TestCase): @classmethod def setUpTestData(cls): cls.restaurant = Restaurant.objects.create( name="Demon Dogs", address="944 W. Fullerton", serves_hot_dogs=True, serves_pizza=False, rating=2, ) chef = Chef.objects.create(name="Albert") cls.italian_restaurant = ItalianRestaurant.objects.create( name="Ristorante Miron", address="1234 W. Ash", serves_hot_dogs=False, serves_pizza=False, serves_gnocchi=True, rating=4, chef=chef, ) def test_filter_inherited_model(self): self.assertQuerySetEqual( ItalianRestaurant.objects.filter(address="1234 W. Ash"), [ "Ristorante Miron", ], attrgetter("name"), ) def test_update_inherited_model(self): self.italian_restaurant.address = "1234 W. Elm" self.italian_restaurant.save() self.assertQuerySetEqual( ItalianRestaurant.objects.filter(address="1234 W. Elm"), [ "Ristorante Miron", ], attrgetter("name"), ) def test_parent_fields_available_for_filtering_in_child_model(self): # Parent fields can be used directly in filters on the child model. self.assertQuerySetEqual( Restaurant.objects.filter(name="Demon Dogs"), [ "Demon Dogs", ], attrgetter("name"), ) self.assertQuerySetEqual( ItalianRestaurant.objects.filter(address="1234 W. Ash"), [ "Ristorante Miron", ], attrgetter("name"), ) def test_filter_on_parent_returns_object_of_parent_type(self): # Filters against the parent model return objects of the parent's type. p = Place.objects.get(name="Demon Dogs") self.assertIs(type(p), Place) def test_parent_child_one_to_one_link(self): # Since the parent and child are linked by an automatically created # OneToOneField, you can get from the parent to the child by using the # child's name. self.assertEqual( Place.objects.get(name="Demon Dogs").restaurant, Restaurant.objects.get(name="Demon Dogs"), ) self.assertEqual( Place.objects.get(name="Ristorante Miron").restaurant.italianrestaurant, ItalianRestaurant.objects.get(name="Ristorante Miron"), ) self.assertEqual( Restaurant.objects.get(name="Ristorante Miron").italianrestaurant, ItalianRestaurant.objects.get(name="Ristorante Miron"), ) def test_parent_child_one_to_one_link_on_nonrelated_objects(self): # This won't work because the Demon Dogs restaurant is not an Italian # restaurant. with self.assertRaises(ItalianRestaurant.DoesNotExist): Place.objects.get(name="Demon Dogs").restaurant.italianrestaurant def test_inherited_does_not_exist_exception(self): # An ItalianRestaurant which does not exist is also a Place which does # not exist. with self.assertRaises(Place.DoesNotExist): ItalianRestaurant.objects.get(name="The Noodle Void") def test_inherited_multiple_objects_returned_exception(self): # MultipleObjectsReturned is also inherited. with self.assertRaises(Place.MultipleObjectsReturned): Restaurant.objects.get() def test_related_objects_for_inherited_models(self): # Related objects work just as they normally do. s1 = Supplier.objects.create(name="Joe's Chickens", address="123 Sesame St") s1.customers.set([self.restaurant, self.italian_restaurant]) s2 = Supplier.objects.create(name="Luigi's Pasta", address="456 Sesame St") s2.customers.set([self.italian_restaurant]) # This won't work because the Place we select is not a Restaurant (it's # a Supplier). p = Place.objects.get(name="Joe's Chickens") with self.assertRaises(Restaurant.DoesNotExist): p.restaurant self.assertEqual(p.supplier, s1) self.assertQuerySetEqual( self.italian_restaurant.provider.order_by("-name"), ["Luigi's Pasta", "Joe's Chickens"], attrgetter("name"), ) self.assertQuerySetEqual( Restaurant.objects.filter(provider__name__contains="Chickens"), [ "Ristorante Miron", "Demon Dogs", ], attrgetter("name"), ) self.assertQuerySetEqual( ItalianRestaurant.objects.filter(provider__name__contains="Chickens"), [ "Ristorante Miron", ], attrgetter("name"), ) ParkingLot.objects.create(name="Main St", address="111 Main St", main_site=s1) ParkingLot.objects.create( name="Well Lit", address="124 Sesame St", main_site=self.italian_restaurant ) self.assertEqual( Restaurant.objects.get(lot__name="Well Lit").name, "Ristorante Miron" ) def test_update_works_on_parent_and_child_models_at_once(self): # The update() command can update fields in parent and child classes at # once (although it executed multiple SQL queries to do so). rows = Restaurant.objects.filter( serves_hot_dogs=True, name__contains="D" ).update(name="Demon Puppies", serves_hot_dogs=False) self.assertEqual(rows, 1) r1 = Restaurant.objects.get(pk=self.restaurant.pk) self.assertFalse(r1.serves_hot_dogs) self.assertEqual(r1.name, "Demon Puppies") def test_values_works_on_parent_model_fields(self): # The values() command also works on fields from parent models. self.assertSequenceEqual( ItalianRestaurant.objects.values("name", "rating"), [ {"rating": 4, "name": "Ristorante Miron"}, ], ) def test_select_related_works_on_parent_model_fields(self): # select_related works with fields from the parent object as if they # were a normal part of the model. self.assertNumQueries(2, lambda: ItalianRestaurant.objects.all()[0].chef) self.assertNumQueries( 1, lambda: ItalianRestaurant.objects.select_related("chef")[0].chef ) def test_select_related_defer(self): """ #23370 - Should be able to defer child fields when using select_related() from parent to child. """ qs = ( Restaurant.objects.select_related("italianrestaurant") .defer("italianrestaurant__serves_gnocchi") .order_by("rating") ) # The field was actually deferred with self.assertNumQueries(2): objs = list(qs.all()) self.assertTrue(objs[1].italianrestaurant.serves_gnocchi) # Model fields where assigned correct values self.assertEqual(qs[0].name, "Demon Dogs") self.assertEqual(qs[0].rating, 2) self.assertEqual(qs[1].italianrestaurant.name, "Ristorante Miron") self.assertEqual(qs[1].italianrestaurant.rating, 4) def test_parent_cache_reuse(self): place = Place.objects.create() GrandChild.objects.create(place=place) grand_parent = GrandParent.objects.latest("pk") with self.assertNumQueries(1): self.assertEqual(grand_parent.place, place) parent = grand_parent.parent with self.assertNumQueries(0): self.assertEqual(parent.place, place) child = parent.child with self.assertNumQueries(0): self.assertEqual(child.place, place) grandchild = child.grandchild with self.assertNumQueries(0): self.assertEqual(grandchild.place, place) def test_update_query_counts(self): """ Update queries do not generate unnecessary queries (#18304). """ with self.assertNumQueries(3): self.italian_restaurant.save() def test_filter_inherited_on_null(self): # Refs #12567 Supplier.objects.create( name="Central market", address="610 some street", ) self.assertQuerySetEqual( Place.objects.filter(supplier__isnull=False), [ "Central market", ], attrgetter("name"), ) self.assertQuerySetEqual( Place.objects.filter(supplier__isnull=True).order_by("name"), [ "Demon Dogs", "Ristorante Miron", ], attrgetter("name"), ) def test_exclude_inherited_on_null(self): # Refs #12567 Supplier.objects.create( name="Central market", address="610 some street", ) self.assertQuerySetEqual( Place.objects.exclude(supplier__isnull=False).order_by("name"), [ "Demon Dogs", "Ristorante Miron", ], attrgetter("name"), ) self.assertQuerySetEqual( Place.objects.exclude(supplier__isnull=True), [ "Central market", ], attrgetter("name"), ) @isolate_apps("model_inheritance", "model_inheritance.tests") class InheritanceSameModelNameTests(SimpleTestCase): def test_abstract_fk_related_name(self): related_name = "%(app_label)s_%(class)s_references" class Referenced(models.Model): class Meta: app_label = "model_inheritance" class AbstractReferent(models.Model): reference = models.ForeignKey( Referenced, models.CASCADE, related_name=related_name ) class Meta: app_label = "model_inheritance" abstract = True class Referent(AbstractReferent): class Meta: app_label = "model_inheritance" LocalReferent = Referent class Referent(AbstractReferent): class Meta: app_label = "tests" ForeignReferent = Referent self.assertFalse(hasattr(Referenced, related_name)) self.assertIs( Referenced.model_inheritance_referent_references.field.model, LocalReferent ) self.assertIs(Referenced.tests_referent_references.field.model, ForeignReferent) class InheritanceUniqueTests(TestCase): @classmethod def setUpTestData(cls): cls.grand_parent = GrandParent.objects.create( email="[email protected]", first_name="grand", last_name="parent", ) def test_unique(self): grand_child = GrandChild( email=self.grand_parent.email, first_name="grand", last_name="child", ) msg = "Grand parent with this Email already exists." with self.assertRaisesMessage(ValidationError, msg): grand_child.validate_unique() def test_unique_together(self): grand_child = GrandChild( email="[email protected]", first_name=self.grand_parent.first_name, last_name=self.grand_parent.last_name, ) msg = "Grand parent with this First name and Last name already exists." with self.assertRaisesMessage(ValidationError, msg): grand_child.validate_unique()
bcd4d741d1b7d2c749743b7b6ce11e7d4490b61dcdb09f87934baae2e8f6cf48
""" XX. Model inheritance Model inheritance exists in two varieties: - abstract base classes which are a way of specifying common information inherited by the subclasses. They don't exist as a separate model. - non-abstract base classes (the default), which are models in their own right with their own database tables and everything. Their subclasses have references back to them, created automatically. Both styles are demonstrated here. """ from django.db import models # # Abstract base classes # class CommonInfo(models.Model): name = models.CharField(max_length=50) age = models.PositiveIntegerField() class Meta: abstract = True ordering = ["name"] def __str__(self): return "%s %s" % (self.__class__.__name__, self.name) class Worker(CommonInfo): job = models.CharField(max_length=50) class Student(CommonInfo): school_class = models.CharField(max_length=10) class Meta: pass # # Abstract base classes with related models # class Post(models.Model): title = models.CharField(max_length=50) class Attachment(models.Model): post = models.ForeignKey( Post, models.CASCADE, related_name="attached_%(class)s_set", related_query_name="attached_%(app_label)s_%(class)ss", ) content = models.TextField() class Meta: abstract = True class Comment(Attachment): is_spam = models.BooleanField(default=False) class Link(Attachment): url = models.URLField() # # Multi-table inheritance # class Chef(models.Model): name = models.CharField(max_length=50) class Place(models.Model): name = models.CharField(max_length=50) address = models.CharField(max_length=80) class Rating(models.Model): rating = models.IntegerField(null=True, blank=True) class Meta: abstract = True ordering = ["-rating"] class Restaurant(Place, Rating): serves_hot_dogs = models.BooleanField(default=False) serves_pizza = models.BooleanField(default=False) chef = models.ForeignKey(Chef, models.SET_NULL, null=True, blank=True) class Meta(Rating.Meta): db_table = "my_restaurant" class ItalianRestaurant(Restaurant): serves_gnocchi = models.BooleanField(default=False) class Supplier(Place): customers = models.ManyToManyField(Restaurant, related_name="provider") class CustomSupplier(Supplier): pass class ParkingLot(Place): # An explicit link to the parent (we can control the attribute name). parent = models.OneToOneField( Place, models.CASCADE, primary_key=True, parent_link=True ) main_site = models.ForeignKey(Place, models.CASCADE, related_name="lot") # # Abstract base classes with related models where the sub-class has the # same name in a different app and inherits from the same abstract base # class. # NOTE: The actual API tests for the following classes are in # model_inheritance_same_model_name/models.py - They are defined # here in order to have the name conflict between apps # class Title(models.Model): title = models.CharField(max_length=50) class NamedURL(models.Model): title = models.ForeignKey( Title, models.CASCADE, related_name="attached_%(app_label)s_%(class)s_set" ) url = models.URLField() class Meta: abstract = True class Mixin: def __init__(self): self.other_attr = 1 super().__init__() class MixinModel(models.Model, Mixin): pass class Base(models.Model): titles = models.ManyToManyField(Title) class SubBase(Base): sub_id = models.IntegerField(primary_key=True) class GrandParent(models.Model): first_name = models.CharField(max_length=80) last_name = models.CharField(max_length=80) email = models.EmailField(unique=True) place = models.ForeignKey(Place, models.CASCADE, null=True, related_name="+") class Meta: # Ordering used by test_inherited_ordering_pk_desc. ordering = ["-pk"] unique_together = ("first_name", "last_name") class Parent(GrandParent): pass class Child(Parent): pass class GrandChild(Child): pass
f1507ca96b5a3bb5079aeb1cc54cf34458e61e3fb634919f30eb8b67cc26f16c
from django.forms import CharField, Form, Media, MultiWidget, TextInput from django.template import Context, Template from django.templatetags.static import static from django.test import SimpleTestCase, override_settings from django.utils.html import format_html, html_safe @override_settings( STATIC_URL="http://media.example.com/static/", ) class FormsMediaTestCase(SimpleTestCase): """Tests for the media handling on widgets and forms""" def test_construction(self): # Check construction of media objects m = Media( css={"all": ("path/to/css1", "/path/to/css2")}, js=( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", ), ) self.assertEqual( str(m), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>', ) self.assertEqual( repr(m), "Media(css={'all': ['path/to/css1', '/path/to/css2']}, " "js=['/path/to/js1', 'http://media.other.com/path/to/js2', " "'https://secure.other.com/path/to/js3'])", ) class Foo: css = {"all": ("path/to/css1", "/path/to/css2")} js = ( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", ) m3 = Media(Foo) self.assertEqual( str(m3), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>', ) # A widget can exist without a media definition class MyWidget(TextInput): pass w = MyWidget() self.assertEqual(str(w.media), "") def test_media_dsl(self): ############################################################### # DSL Class-based media definitions ############################################################### # A widget can define media if it needs to. # Any absolute path will be preserved; relative paths are combined # with the value of settings.MEDIA_URL class MyWidget1(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css2")} js = ( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", ) w1 = MyWidget1() self.assertEqual( str(w1.media), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>', ) # Media objects can be interrogated by media type self.assertEqual( str(w1.media["css"]), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">', ) self.assertEqual( str(w1.media["js"]), """<script src="/path/to/js1"></script> <script src="http://media.other.com/path/to/js2"></script> <script src="https://secure.other.com/path/to/js3"></script>""", ) def test_combine_media(self): # Media objects can be combined. Any given media resource will appear only # once. Duplicated media definitions are ignored. class MyWidget1(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css2")} js = ( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", ) class MyWidget2(TextInput): class Media: css = {"all": ("/path/to/css2", "/path/to/css3")} js = ("/path/to/js1", "/path/to/js4") class MyWidget3(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css3")} js = ("/path/to/js1", "/path/to/js4") w1 = MyWidget1() w2 = MyWidget2() w3 = MyWidget3() self.assertEqual( str(w1.media + w2.media + w3.media), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<link href="/path/to/css3" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="/path/to/js4"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>', ) # media addition hasn't affected the original objects self.assertEqual( str(w1.media), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>', ) # Regression check for #12879: specifying the same CSS or JS file # multiple times in a single Media instance should result in that file # only being included once. class MyWidget4(TextInput): class Media: css = {"all": ("/path/to/css1", "/path/to/css1")} js = ("/path/to/js1", "/path/to/js1") w4 = MyWidget4() self.assertEqual( str(w4.media), """<link href="/path/to/css1" media="all" rel="stylesheet"> <script src="/path/to/js1"></script>""", ) def test_media_deduplication(self): # A deduplication test applied directly to a Media object, to confirm # that the deduplication doesn't only happen at the point of merging # two or more media objects. media = Media( css={"all": ("/path/to/css1", "/path/to/css1")}, js=("/path/to/js1", "/path/to/js1"), ) self.assertEqual( str(media), """<link href="/path/to/css1" media="all" rel="stylesheet"> <script src="/path/to/js1"></script>""", ) def test_media_property(self): ############################################################### # Property-based media definitions ############################################################### # Widget media can be defined as a property class MyWidget4(TextInput): def _media(self): return Media(css={"all": ("/some/path",)}, js=("/some/js",)) media = property(_media) w4 = MyWidget4() self.assertEqual( str(w4.media), """<link href="/some/path" media="all" rel="stylesheet"> <script src="/some/js"></script>""", ) # Media properties can reference the media of their parents class MyWidget5(MyWidget4): def _media(self): return super().media + Media( css={"all": ("/other/path",)}, js=("/other/js",) ) media = property(_media) w5 = MyWidget5() self.assertEqual( str(w5.media), """<link href="/some/path" media="all" rel="stylesheet"> <link href="/other/path" media="all" rel="stylesheet"> <script src="/some/js"></script> <script src="/other/js"></script>""", ) def test_media_property_parent_references(self): # Media properties can reference the media of their parents, # even if the parent media was defined using a class class MyWidget1(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css2")} js = ( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", ) class MyWidget6(MyWidget1): def _media(self): return super().media + Media( css={"all": ("/other/path",)}, js=("/other/js",) ) media = property(_media) w6 = MyWidget6() self.assertEqual( str(w6.media), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/other/path" media="all" rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="/other/js"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>', ) def test_media_inheritance(self): ############################################################### # Inheritance of media ############################################################### # If a widget extends another but provides no media definition, it # inherits the parent widget's media. class MyWidget1(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css2")} js = ( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", ) class MyWidget7(MyWidget1): pass w7 = MyWidget7() self.assertEqual( str(w7.media), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>', ) # If a widget extends another but defines media, it extends the parent # widget's media by default. class MyWidget8(MyWidget1): class Media: css = {"all": ("/path/to/css3", "path/to/css1")} js = ("/path/to/js1", "/path/to/js4") w8 = MyWidget8() self.assertEqual( str(w8.media), """<link href="/path/to/css3" media="all" rel="stylesheet"> <link href="http://media.example.com/static/path/to/css1" media="all" rel="stylesheet"> <link href="/path/to/css2" media="all" rel="stylesheet"> <script src="/path/to/js1"></script> <script src="http://media.other.com/path/to/js2"></script> <script src="/path/to/js4"></script> <script src="https://secure.other.com/path/to/js3"></script>""", ) def test_media_inheritance_from_property(self): # If a widget extends another but defines media, it extends the parents # widget's media, even if the parent defined media using a property. class MyWidget1(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css2")} js = ( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", ) class MyWidget4(TextInput): def _media(self): return Media(css={"all": ("/some/path",)}, js=("/some/js",)) media = property(_media) class MyWidget9(MyWidget4): class Media: css = {"all": ("/other/path",)} js = ("/other/js",) w9 = MyWidget9() self.assertEqual( str(w9.media), """<link href="/some/path" media="all" rel="stylesheet"> <link href="/other/path" media="all" rel="stylesheet"> <script src="/some/js"></script> <script src="/other/js"></script>""", ) # A widget can disable media inheritance by specifying 'extend=False' class MyWidget10(MyWidget1): class Media: extend = False css = {"all": ("/path/to/css3", "path/to/css1")} js = ("/path/to/js1", "/path/to/js4") w10 = MyWidget10() self.assertEqual( str(w10.media), """<link href="/path/to/css3" media="all" rel="stylesheet"> <link href="http://media.example.com/static/path/to/css1" media="all" rel="stylesheet"> <script src="/path/to/js1"></script> <script src="/path/to/js4"></script>""", ) def test_media_inheritance_extends(self): # A widget can explicitly enable full media inheritance by specifying # 'extend=True'. class MyWidget1(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css2")} js = ( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", ) class MyWidget11(MyWidget1): class Media: extend = True css = {"all": ("/path/to/css3", "path/to/css1")} js = ("/path/to/js1", "/path/to/js4") w11 = MyWidget11() self.assertEqual( str(w11.media), """<link href="/path/to/css3" media="all" rel="stylesheet"> <link href="http://media.example.com/static/path/to/css1" media="all" rel="stylesheet"> <link href="/path/to/css2" media="all" rel="stylesheet"> <script src="/path/to/js1"></script> <script src="http://media.other.com/path/to/js2"></script> <script src="/path/to/js4"></script> <script src="https://secure.other.com/path/to/js3"></script>""", ) def test_media_inheritance_single_type(self): # A widget can enable inheritance of one media type by specifying # extend as a tuple. class MyWidget1(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css2")} js = ( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", ) class MyWidget12(MyWidget1): class Media: extend = ("css",) css = {"all": ("/path/to/css3", "path/to/css1")} js = ("/path/to/js1", "/path/to/js4") w12 = MyWidget12() self.assertEqual( str(w12.media), """<link href="/path/to/css3" media="all" rel="stylesheet"> <link href="http://media.example.com/static/path/to/css1" media="all" rel="stylesheet"> <link href="/path/to/css2" media="all" rel="stylesheet"> <script src="/path/to/js1"></script> <script src="/path/to/js4"></script>""", ) def test_multi_media(self): ############################################################### # Multi-media handling for CSS ############################################################### # A widget can define CSS media for multiple output media types class MultimediaWidget(TextInput): class Media: css = { "screen, print": ("/file1", "/file2"), "screen": ("/file3",), "print": ("/file4",), } js = ("/path/to/js1", "/path/to/js4") multimedia = MultimediaWidget() self.assertEqual( str(multimedia.media), """<link href="/file4" media="print" rel="stylesheet"> <link href="/file3" media="screen" rel="stylesheet"> <link href="/file1" media="screen, print" rel="stylesheet"> <link href="/file2" media="screen, print" rel="stylesheet"> <script src="/path/to/js1"></script> <script src="/path/to/js4"></script>""", ) def test_multi_widget(self): ############################################################### # Multiwidget media handling ############################################################### class MyWidget1(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css2")} js = ( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", ) class MyWidget2(TextInput): class Media: css = {"all": ("/path/to/css2", "/path/to/css3")} js = ("/path/to/js1", "/path/to/js4") class MyWidget3(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css3")} js = ("/path/to/js1", "/path/to/js4") # MultiWidgets have a default media definition that gets all the # media from the component widgets class MyMultiWidget(MultiWidget): def __init__(self, attrs=None): widgets = [MyWidget1, MyWidget2, MyWidget3] super().__init__(widgets, attrs) mymulti = MyMultiWidget() self.assertEqual( str(mymulti.media), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<link href="/path/to/css3" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="/path/to/js4"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>', ) def test_form_media(self): ############################################################### # Media processing for forms ############################################################### class MyWidget1(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css2")} js = ( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", ) class MyWidget2(TextInput): class Media: css = {"all": ("/path/to/css2", "/path/to/css3")} js = ("/path/to/js1", "/path/to/js4") class MyWidget3(TextInput): class Media: css = {"all": ("path/to/css1", "/path/to/css3")} js = ("/path/to/js1", "/path/to/js4") # You can ask a form for the media required by its widgets. class MyForm(Form): field1 = CharField(max_length=20, widget=MyWidget1()) field2 = CharField(max_length=20, widget=MyWidget2()) f1 = MyForm() self.assertEqual( str(f1.media), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<link href="/path/to/css3" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="/path/to/js4"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>', ) # Form media can be combined to produce a single media definition. class AnotherForm(Form): field3 = CharField(max_length=20, widget=MyWidget3()) f2 = AnotherForm() self.assertEqual( str(f1.media + f2.media), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<link href="/path/to/css3" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="/path/to/js4"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>', ) # Forms can also define media, following the same rules as widgets. class FormWithMedia(Form): field1 = CharField(max_length=20, widget=MyWidget1()) field2 = CharField(max_length=20, widget=MyWidget2()) class Media: js = ("/some/form/javascript",) css = {"all": ("/some/form/css",)} f3 = FormWithMedia() self.assertEqual( str(f3.media), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/some/form/css" media="all" rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<link href="/path/to/css3" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="/some/form/javascript"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="/path/to/js4"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>', ) # Media works in templates self.assertEqual( Template("{{ form.media.js }}{{ form.media.css }}").render( Context({"form": f3}) ), '<script src="/path/to/js1"></script>\n' '<script src="/some/form/javascript"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="/path/to/js4"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>' '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/some/form/css" media="all" rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<link href="/path/to/css3" media="all" rel="stylesheet">', ) def test_html_safe(self): media = Media(css={"all": ["/path/to/css"]}, js=["/path/to/js"]) self.assertTrue(hasattr(Media, "__html__")) self.assertEqual(str(media), media.__html__()) def test_merge(self): test_values = ( (([1, 2], [3, 4]), [1, 3, 2, 4]), (([1, 2], [2, 3]), [1, 2, 3]), (([2, 3], [1, 2]), [1, 2, 3]), (([1, 3], [2, 3]), [1, 2, 3]), (([1, 2], [1, 3]), [1, 2, 3]), (([1, 2], [3, 2]), [1, 3, 2]), (([1, 2], [1, 2]), [1, 2]), ( [[1, 2], [1, 3], [2, 3], [5, 7], [5, 6], [6, 7, 9], [8, 9]], [1, 5, 8, 2, 6, 3, 7, 9], ), ((), []), (([1, 2],), [1, 2]), ) for lists, expected in test_values: with self.subTest(lists=lists): self.assertEqual(Media.merge(*lists), expected) def test_merge_warning(self): msg = "Detected duplicate Media files in an opposite order: [1, 2], [2, 1]" with self.assertWarnsMessage(RuntimeWarning, msg): self.assertEqual(Media.merge([1, 2], [2, 1]), [1, 2]) def test_merge_js_three_way(self): """ The relative order of scripts is preserved in a three-way merge. """ widget1 = Media(js=["color-picker.js"]) widget2 = Media(js=["text-editor.js"]) widget3 = Media( js=["text-editor.js", "text-editor-extras.js", "color-picker.js"] ) merged = widget1 + widget2 + widget3 self.assertEqual( merged._js, ["text-editor.js", "text-editor-extras.js", "color-picker.js"] ) def test_merge_js_three_way2(self): # The merge prefers to place 'c' before 'b' and 'g' before 'h' to # preserve the original order. The preference 'c'->'b' is overridden by # widget3's media, but 'g'->'h' survives in the final ordering. widget1 = Media(js=["a", "c", "f", "g", "k"]) widget2 = Media(js=["a", "b", "f", "h", "k"]) widget3 = Media(js=["b", "c", "f", "k"]) merged = widget1 + widget2 + widget3 self.assertEqual(merged._js, ["a", "b", "c", "f", "g", "h", "k"]) def test_merge_css_three_way(self): widget1 = Media(css={"screen": ["c.css"], "all": ["d.css", "e.css"]}) widget2 = Media(css={"screen": ["a.css"]}) widget3 = Media(css={"screen": ["a.css", "b.css", "c.css"], "all": ["e.css"]}) widget4 = Media(css={"all": ["d.css", "e.css"], "screen": ["c.css"]}) merged = widget1 + widget2 # c.css comes before a.css because widget1 + widget2 establishes this # order. self.assertEqual( merged._css, {"screen": ["c.css", "a.css"], "all": ["d.css", "e.css"]} ) merged += widget3 # widget3 contains an explicit ordering of c.css and a.css. self.assertEqual( merged._css, {"screen": ["a.css", "b.css", "c.css"], "all": ["d.css", "e.css"]}, ) # Media ordering does not matter. merged = widget1 + widget4 self.assertEqual(merged._css, {"screen": ["c.css"], "all": ["d.css", "e.css"]}) def test_add_js_deduplication(self): widget1 = Media(js=["a", "b", "c"]) widget2 = Media(js=["a", "b"]) widget3 = Media(js=["a", "c", "b"]) merged = widget1 + widget1 self.assertEqual(merged._js_lists, [["a", "b", "c"]]) self.assertEqual(merged._js, ["a", "b", "c"]) merged = widget1 + widget2 self.assertEqual(merged._js_lists, [["a", "b", "c"], ["a", "b"]]) self.assertEqual(merged._js, ["a", "b", "c"]) # Lists with items in a different order are preserved when added. merged = widget1 + widget3 self.assertEqual(merged._js_lists, [["a", "b", "c"], ["a", "c", "b"]]) msg = ( "Detected duplicate Media files in an opposite order: " "['a', 'b', 'c'], ['a', 'c', 'b']" ) with self.assertWarnsMessage(RuntimeWarning, msg): merged._js def test_add_css_deduplication(self): widget1 = Media(css={"screen": ["a.css"], "all": ["b.css"]}) widget2 = Media(css={"screen": ["c.css"]}) widget3 = Media(css={"screen": ["a.css"], "all": ["b.css", "c.css"]}) widget4 = Media(css={"screen": ["a.css"], "all": ["c.css", "b.css"]}) merged = widget1 + widget1 self.assertEqual(merged._css_lists, [{"screen": ["a.css"], "all": ["b.css"]}]) self.assertEqual(merged._css, {"screen": ["a.css"], "all": ["b.css"]}) merged = widget1 + widget2 self.assertEqual( merged._css_lists, [ {"screen": ["a.css"], "all": ["b.css"]}, {"screen": ["c.css"]}, ], ) self.assertEqual(merged._css, {"screen": ["a.css", "c.css"], "all": ["b.css"]}) merged = widget3 + widget4 # Ordering within lists is preserved. self.assertEqual( merged._css_lists, [ {"screen": ["a.css"], "all": ["b.css", "c.css"]}, {"screen": ["a.css"], "all": ["c.css", "b.css"]}, ], ) msg = ( "Detected duplicate Media files in an opposite order: " "['b.css', 'c.css'], ['c.css', 'b.css']" ) with self.assertWarnsMessage(RuntimeWarning, msg): merged._css def test_add_empty(self): media = Media(css={"screen": ["a.css"]}, js=["a"]) empty_media = Media() merged = media + empty_media self.assertEqual(merged._css_lists, [{"screen": ["a.css"]}]) self.assertEqual(merged._js_lists, [["a"]]) @html_safe class Asset: def __init__(self, path): self.path = path def __eq__(self, other): return (self.__class__ == other.__class__ and self.path == other.path) or ( other.__class__ == str and self.path == other ) def __hash__(self): return hash(self.path) def __str__(self): return self.absolute_path(self.path) def absolute_path(self, path): """ Given a relative or absolute path to a static asset, return an absolute path. An absolute path will be returned unchanged while a relative path will be passed to django.templatetags.static.static(). """ if path.startswith(("http://", "https://", "/")): return path return static(path) def __repr__(self): return f"{self.path!r}" class CSS(Asset): def __init__(self, path, medium): super().__init__(path) self.medium = medium def __str__(self): path = super().__str__() return format_html( '<link href="{}" media="{}" rel="stylesheet">', self.absolute_path(path), self.medium, ) class JS(Asset): def __init__(self, path, integrity=None): super().__init__(path) self.integrity = integrity or "" def __str__(self, integrity=None): path = super().__str__() template = '<script src="{}"%s></script>' % ( ' integrity="{}"' if self.integrity else "{}" ) return format_html(template, self.absolute_path(path), self.integrity) @override_settings( STATIC_URL="http://media.example.com/static/", ) class FormsMediaObjectTestCase(SimpleTestCase): """Media handling when media are objects instead of raw strings.""" def test_construction(self): m = Media( css={"all": (CSS("path/to/css1", "all"), CSS("/path/to/css2", "all"))}, js=( JS("/path/to/js1"), JS("http://media.other.com/path/to/js2"), JS( "https://secure.other.com/path/to/js3", integrity="9d947b87fdeb25030d56d01f7aa75800", ), ), ) self.assertEqual( str(m), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="https://secure.other.com/path/to/js3" ' 'integrity="9d947b87fdeb25030d56d01f7aa75800"></script>', ) self.assertEqual( repr(m), "Media(css={'all': ['path/to/css1', '/path/to/css2']}, " "js=['/path/to/js1', 'http://media.other.com/path/to/js2', " "'https://secure.other.com/path/to/js3'])", ) def test_simplest_class(self): @html_safe class SimpleJS: """The simplest possible asset class.""" def __str__(self): return '<script src="https://example.org/asset.js" rel="stylesheet">' m = Media(js=(SimpleJS(),)) self.assertEqual( str(m), '<script src="https://example.org/asset.js" rel="stylesheet">', ) def test_combine_media(self): class MyWidget1(TextInput): class Media: css = {"all": (CSS("path/to/css1", "all"), "/path/to/css2")} js = ( "/path/to/js1", "http://media.other.com/path/to/js2", "https://secure.other.com/path/to/js3", JS("/path/to/js4", integrity="9d947b87fdeb25030d56d01f7aa75800"), ) class MyWidget2(TextInput): class Media: css = {"all": (CSS("/path/to/css2", "all"), "/path/to/css3")} js = (JS("/path/to/js1"), "/path/to/js4") w1 = MyWidget1() w2 = MyWidget2() self.assertEqual( str(w1.media + w2.media), '<link href="http://media.example.com/static/path/to/css1" media="all" ' 'rel="stylesheet">\n' '<link href="/path/to/css2" media="all" rel="stylesheet">\n' '<link href="/path/to/css3" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>\n' '<script src="http://media.other.com/path/to/js2"></script>\n' '<script src="https://secure.other.com/path/to/js3"></script>\n' '<script src="/path/to/js4" integrity="9d947b87fdeb25030d56d01f7aa75800">' "</script>", ) def test_media_deduplication(self): # The deduplication doesn't only happen at the point of merging two or # more media objects. media = Media( css={ "all": ( CSS("/path/to/css1", "all"), CSS("/path/to/css1", "all"), "/path/to/css1", ) }, js=(JS("/path/to/js1"), JS("/path/to/js1"), "/path/to/js1"), ) self.assertEqual( str(media), '<link href="/path/to/css1" media="all" rel="stylesheet">\n' '<script src="/path/to/js1"></script>', )
3985e915f34b5fbb1bf31aacb87d8033a3586804921ddd8412a1c88d482f6c8d
""" Global Django exception and warning classes. """ import operator from django.utils.hashable import make_hashable class FieldDoesNotExist(Exception): """The requested model field does not exist""" pass class AppRegistryNotReady(Exception): """The django.apps registry is not populated yet""" pass class ObjectDoesNotExist(Exception): """The requested object does not exist""" silent_variable_failure = True class MultipleObjectsReturned(Exception): """The query returned multiple objects when only one was expected.""" pass class SuspiciousOperation(Exception): """The user did something suspicious""" class SuspiciousMultipartForm(SuspiciousOperation): """Suspect MIME request in multipart form data""" pass class SuspiciousFileOperation(SuspiciousOperation): """A Suspicious filesystem operation was attempted""" pass class DisallowedHost(SuspiciousOperation): """HTTP_HOST header contains invalid value""" pass class DisallowedRedirect(SuspiciousOperation): """Redirect to scheme not in allowed list""" pass class TooManyFieldsSent(SuspiciousOperation): """ The number of fields in a GET or POST request exceeded settings.DATA_UPLOAD_MAX_NUMBER_FIELDS. """ pass class RequestDataTooBig(SuspiciousOperation): """ The size of the request (excluding any file uploads) exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE. """ pass class RequestAborted(Exception): """The request was closed before it was completed, or timed out.""" pass class BadRequest(Exception): """The request is malformed and cannot be processed.""" pass class PermissionDenied(Exception): """The user did not have permission to do that""" pass class ViewDoesNotExist(Exception): """The requested view does not exist""" pass class MiddlewareNotUsed(Exception): """This middleware is not used in this server configuration""" pass class ImproperlyConfigured(Exception): """Django is somehow improperly configured""" pass class FieldError(Exception): """Some kind of problem with a model field.""" pass NON_FIELD_ERRORS = "__all__" class ValidationError(Exception): """An error while validating data.""" def __init__(self, message, code=None, params=None): """ The `message` argument can be a single error, a list of errors, or a dictionary that maps field names to lists of errors. What we define as an "error" can be either a simple string or an instance of ValidationError with its message attribute set, and what we define as list or dictionary can be an actual `list` or `dict` or an instance of ValidationError with its `error_list` or `error_dict` attribute set. """ super().__init__(message, code, params) if isinstance(message, ValidationError): if hasattr(message, "error_dict"): message = message.error_dict elif not hasattr(message, "message"): message = message.error_list else: message, code, params = message.message, message.code, message.params if isinstance(message, dict): self.error_dict = {} for field, messages in message.items(): if not isinstance(messages, ValidationError): messages = ValidationError(messages) self.error_dict[field] = messages.error_list elif isinstance(message, list): self.error_list = [] for message in message: # Normalize plain strings to instances of ValidationError. if not isinstance(message, ValidationError): message = ValidationError(message) if hasattr(message, "error_dict"): self.error_list.extend(sum(message.error_dict.values(), [])) else: self.error_list.extend(message.error_list) else: self.message = message self.code = code self.params = params self.error_list = [self] @property def message_dict(self): # Trigger an AttributeError if this ValidationError # doesn't have an error_dict. getattr(self, "error_dict") return dict(self) @property def messages(self): if hasattr(self, "error_dict"): return sum(dict(self).values(), []) return list(self) def update_error_dict(self, error_dict): if hasattr(self, "error_dict"): for field, error_list in self.error_dict.items(): error_dict.setdefault(field, []).extend(error_list) else: error_dict.setdefault(NON_FIELD_ERRORS, []).extend(self.error_list) return error_dict def __iter__(self): if hasattr(self, "error_dict"): for field, errors in self.error_dict.items(): yield field, list(ValidationError(errors)) else: for error in self.error_list: message = error.message if error.params: message %= error.params yield str(message) def __str__(self): if hasattr(self, "error_dict"): return repr(dict(self)) return repr(list(self)) def __repr__(self): return "ValidationError(%s)" % self def __eq__(self, other): if not isinstance(other, ValidationError): return NotImplemented return hash(self) == hash(other) def __hash__(self): if hasattr(self, "message"): return hash( ( self.message, self.code, make_hashable(self.params), ) ) if hasattr(self, "error_dict"): return hash(make_hashable(self.error_dict)) return hash(tuple(sorted(self.error_list, key=operator.attrgetter("message")))) class EmptyResultSet(Exception): """A database query predicate is impossible.""" pass class FullResultSet(Exception): """A database query predicate is matches everything.""" pass class SynchronousOnlyOperation(Exception): """The user tried to call a sync-only function from an async context.""" pass
3263ff08136588b8f4d1b6f8a3de005ddf29514c6d787c59611581b11b949433
""" Classes to represent the definitions of aggregate functions. """ from django.core.exceptions import FieldError, FullResultSet from django.db.models.expressions import Case, Func, Star, When from django.db.models.fields import IntegerField from django.db.models.functions.comparison import Coalesce from django.db.models.functions.mixins import ( FixDurationInputMixin, NumericOutputFieldMixin, ) __all__ = [ "Aggregate", "Avg", "Count", "Max", "Min", "StdDev", "Sum", "Variance", ] class Aggregate(Func): template = "%(function)s(%(distinct)s%(expressions)s)" contains_aggregate = True name = None filter_template = "%s FILTER (WHERE %%(filter)s)" window_compatible = True allow_distinct = False empty_result_set_value = None def __init__( self, *expressions, distinct=False, filter=None, default=None, **extra ): if distinct and not self.allow_distinct: raise TypeError("%s does not allow distinct." % self.__class__.__name__) if default is not None and self.empty_result_set_value is not None: raise TypeError(f"{self.__class__.__name__} does not allow default.") self.distinct = distinct self.filter = filter self.default = default super().__init__(*expressions, **extra) def get_source_fields(self): # Don't return the filter expression since it's not a source field. return [e._output_field_or_none for e in super().get_source_expressions()] def get_source_expressions(self): source_expressions = super().get_source_expressions() if self.filter: return source_expressions + [self.filter] return source_expressions def set_source_expressions(self, exprs): self.filter = self.filter and exprs.pop() return super().set_source_expressions(exprs) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # Aggregates are not allowed in UPDATE queries, so ignore for_save c = super().resolve_expression(query, allow_joins, reuse, summarize) c.filter = c.filter and c.filter.resolve_expression( query, allow_joins, reuse, summarize ) if not summarize: # Call Aggregate.get_source_expressions() to avoid # returning self.filter and including that in this loop. expressions = super(Aggregate, c).get_source_expressions() for index, expr in enumerate(expressions): if expr.contains_aggregate: before_resolved = self.get_source_expressions()[index] name = ( before_resolved.name if hasattr(before_resolved, "name") else repr(before_resolved) ) raise FieldError( "Cannot compute %s('%s'): '%s' is an aggregate" % (c.name, name, name) ) if (default := c.default) is None: return c if hasattr(default, "resolve_expression"): default = default.resolve_expression(query, allow_joins, reuse, summarize) c.default = None # Reset the default argument before wrapping. coalesce = Coalesce(c, default, output_field=c._output_field_or_none) coalesce.is_summary = c.is_summary return coalesce @property def default_alias(self): expressions = self.get_source_expressions() if len(expressions) == 1 and hasattr(expressions[0], "name"): return "%s__%s" % (expressions[0].name, self.name.lower()) raise TypeError("Complex expressions require an alias") def get_group_by_cols(self): return [] def as_sql(self, compiler, connection, **extra_context): extra_context["distinct"] = "DISTINCT " if self.distinct else "" if self.filter: if connection.features.supports_aggregate_filter_clause: try: filter_sql, filter_params = self.filter.as_sql(compiler, connection) except FullResultSet: pass else: template = self.filter_template % extra_context.get( "template", self.template ) sql, params = super().as_sql( compiler, connection, template=template, filter=filter_sql, **extra_context, ) return sql, (*params, *filter_params) else: copy = self.copy() copy.filter = None source_expressions = copy.get_source_expressions() condition = When(self.filter, then=source_expressions[0]) copy.set_source_expressions([Case(condition)] + source_expressions[1:]) return super(Aggregate, copy).as_sql( compiler, connection, **extra_context ) return super().as_sql(compiler, connection, **extra_context) def _get_repr_options(self): options = super()._get_repr_options() if self.distinct: options["distinct"] = self.distinct if self.filter: options["filter"] = self.filter return options class Avg(FixDurationInputMixin, NumericOutputFieldMixin, Aggregate): function = "AVG" name = "Avg" allow_distinct = True class Count(Aggregate): function = "COUNT" name = "Count" output_field = IntegerField() allow_distinct = True empty_result_set_value = 0 def __init__(self, expression, filter=None, **extra): if expression == "*": expression = Star() if isinstance(expression, Star) and filter is not None: raise ValueError("Star cannot be used with filter. Please specify a field.") super().__init__(expression, filter=filter, **extra) class Max(Aggregate): function = "MAX" name = "Max" class Min(Aggregate): function = "MIN" name = "Min" class StdDev(NumericOutputFieldMixin, Aggregate): name = "StdDev" def __init__(self, expression, sample=False, **extra): self.function = "STDDEV_SAMP" if sample else "STDDEV_POP" super().__init__(expression, **extra) def _get_repr_options(self): return {**super()._get_repr_options(), "sample": self.function == "STDDEV_SAMP"} class Sum(FixDurationInputMixin, Aggregate): function = "SUM" name = "Sum" allow_distinct = True class Variance(NumericOutputFieldMixin, Aggregate): name = "Variance" def __init__(self, expression, sample=False, **extra): self.function = "VAR_SAMP" if sample else "VAR_POP" super().__init__(expression, **extra) def _get_repr_options(self): return {**super()._get_repr_options(), "sample": self.function == "VAR_SAMP"}
2854fff2220f86c30366535cbfc97831cee742e8a064635b9cf4ac4eb6acdfa3
import copy import inspect import warnings from functools import partialmethod from itertools import chain from asgiref.sync import sync_to_async import django from django.apps import apps from django.conf import settings from django.core import checks from django.core.exceptions import ( NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, ObjectDoesNotExist, ValidationError, ) from django.db import ( DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, connections, router, transaction, ) from django.db.models import NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value from django.db.models.constants import LOOKUP_SEP from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, Collector from django.db.models.expressions import RawSQL from django.db.models.fields.related import ( ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, ) from django.db.models.functions import Coalesce from django.db.models.manager import Manager from django.db.models.options import Options from django.db.models.query import F, Q from django.db.models.signals import ( class_prepared, post_init, post_save, pre_init, pre_save, ) from django.db.models.utils import AltersData, make_model_tuple from django.utils.encoding import force_str from django.utils.hashable import make_hashable from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext_lazy as _ class Deferred: def __repr__(self): return "<Deferred field>" def __str__(self): return "<Deferred field>" DEFERRED = Deferred() def subclass_exception(name, bases, module, attached_to): """ Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class. """ return type( name, bases, { "__module__": module, "__qualname__": "%s.%s" % (attached_to.__qualname__, name), }, ) def _has_contribute_to_class(value): # Only call contribute_to_class() if it's bound. return not inspect.isclass(value) and hasattr(value, "contribute_to_class") class ModelBase(type): """Metaclass for all models.""" def __new__(cls, name, bases, attrs, **kwargs): super_new = super().__new__ # Also ensure initialization is only performed for subclasses of Model # (excluding Model class itself). parents = [b for b in bases if isinstance(b, ModelBase)] if not parents: return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop("__module__") new_attrs = {"__module__": module} classcell = attrs.pop("__classcell__", None) if classcell is not None: new_attrs["__classcell__"] = classcell attr_meta = attrs.pop("Meta", None) # Pass all attrs without a (Django-specific) contribute_to_class() # method to type.__new__() so that they're properly initialized # (i.e. __set_name__()). contributable_attrs = {} for obj_name, obj in attrs.items(): if _has_contribute_to_class(obj): contributable_attrs[obj_name] = obj else: new_attrs[obj_name] = obj new_class = super_new(cls, name, bases, new_attrs, **kwargs) abstract = getattr(attr_meta, "abstract", False) meta = attr_meta or getattr(new_class, "Meta", None) base_meta = getattr(new_class, "_meta", None) app_label = None # Look for an application configuration to attach the model to. app_config = apps.get_containing_app_config(module) if getattr(meta, "app_label", None) is None: if app_config is None: if not abstract: raise RuntimeError( "Model class %s.%s doesn't declare an explicit " "app_label and isn't in an application in " "INSTALLED_APPS." % (module, name) ) else: app_label = app_config.label new_class.add_to_class("_meta", Options(meta, app_label)) if not abstract: new_class.add_to_class( "DoesNotExist", subclass_exception( "DoesNotExist", tuple( x.DoesNotExist for x in parents if hasattr(x, "_meta") and not x._meta.abstract ) or (ObjectDoesNotExist,), module, attached_to=new_class, ), ) new_class.add_to_class( "MultipleObjectsReturned", subclass_exception( "MultipleObjectsReturned", tuple( x.MultipleObjectsReturned for x in parents if hasattr(x, "_meta") and not x._meta.abstract ) or (MultipleObjectsReturned,), module, attached_to=new_class, ), ) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, "ordering"): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, "get_latest_by"): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError( "%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped) ) # Add remaining attributes (those with a contribute_to_class() method) # to the class. for obj_name, obj in contributable_attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = chain( new_class._meta.local_fields, new_class._meta.local_many_to_many, new_class._meta.private_fields, ) field_names = {f.name for f in new_fields} # Basic setup for proxy models. if is_proxy: base = None for parent in [kls for kls in parents if hasattr(kls, "_meta")]: if parent._meta.abstract: if parent._meta.fields: raise TypeError( "Abstract base class containing model fields not " "permitted for proxy model '%s'." % name ) else: continue if base is None: base = parent elif parent._meta.concrete_model is not base._meta.concrete_model: raise TypeError( "Proxy model '%s' has more than one non-abstract model base " "class." % name ) if base is None: raise TypeError( "Proxy model '%s' has no non-abstract model base class." % name ) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Collect the parent links for multi-table inheritance. parent_links = {} for base in reversed([new_class] + parents): # Conceptually equivalent to `if base is Model`. if not hasattr(base, "_meta"): continue # Skip concrete parent classes. if base != new_class and not base._meta.abstract: continue # Locate OneToOneField instances. for field in base._meta.local_fields: if isinstance(field, OneToOneField) and field.remote_field.parent_link: related = resolve_relation(new_class, field.remote_field.model) parent_links[make_model_tuple(related)] = field # Track fields inherited from base models. inherited_attributes = set() # Do the appropriate setup for any model parents. for base in new_class.mro(): if base not in parents or not hasattr(base, "_meta"): # Things without _meta aren't functional models, so they're # uninteresting parents. inherited_attributes.update(base.__dict__) continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many if not base._meta.abstract: # Check for clashes between locally declared fields and those # on the base classes. for field in parent_fields: if field.name in field_names: raise FieldError( "Local field %r in class %r clashes with field of " "the same name from base class %r." % ( field.name, name, base.__name__, ) ) else: inherited_attributes.add(field.name) # Concrete classes... base = base._meta.concrete_model base_key = make_model_tuple(base) if base_key in parent_links: field = parent_links[base_key] elif not is_proxy: attr_name = "%s_ptr" % base._meta.model_name field = OneToOneField( base, on_delete=CASCADE, name=attr_name, auto_created=True, parent_link=True, ) if attr_name in field_names: raise FieldError( "Auto-generated field '%s' in class %r for " "parent_link to base class %r clashes with " "declared field of the same name." % ( attr_name, name, base.__name__, ) ) # Only add the ptr field if it's not already present; # e.g. migrations will already have it specified if not hasattr(new_class, attr_name): new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: base_parents = base._meta.parents.copy() # Add fields from abstract base class if it wasn't overridden. for field in parent_fields: if ( field.name not in field_names and field.name not in new_class.__dict__ and field.name not in inherited_attributes ): new_field = copy.deepcopy(field) new_class.add_to_class(field.name, new_field) # Replace parent links defined on this base by the new # field. It will be appropriately resolved if required. if field.one_to_one: for parent, parent_link in base_parents.items(): if field == parent_link: base_parents[parent] = new_field # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base_parents) # Inherit private fields (like GenericForeignKey) from the parent # class for field in base._meta.private_fields: if field.name in field_names: if not base._meta.abstract: raise FieldError( "Local field %r in class %r clashes with field of " "the same name from base class %r." % ( field.name, name, base.__name__, ) ) else: field = copy.deepcopy(field) if not base._meta.abstract: field.mti_inherited = True new_class.add_to_class(field.name, field) # Copy indexes so that index names are unique when models extend an # abstract model. new_class._meta.indexes = [ copy.deepcopy(idx) for idx in new_class._meta.indexes ] if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() new_class._meta.apps.register_model(new_class._meta.app_label, new_class) return new_class def add_to_class(cls, name, value): if _has_contribute_to_class(value): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """Create some methods once self._meta has been populated.""" opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = partialmethod( cls._get_next_or_previous_in_order, is_next=True ) cls.get_previous_in_order = partialmethod( cls._get_next_or_previous_in_order, is_next=False ) # Defer creating accessors on the foreign class until it has been # created and registered. If remote_field is None, we're ordering # with respect to a GenericForeignKey and don't know what the # foreign class is - we'll add those accessors later in # contribute_to_class(). if opts.order_with_respect_to.remote_field: wrt = opts.order_with_respect_to remote = wrt.remote_field.model lazy_related_operation(make_foreign_order_accessors, cls, remote) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % ( cls.__name__, ", ".join(f.name for f in opts.fields), ) get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get( opts.label_lower ) if get_absolute_url_override: setattr(cls, "get_absolute_url", get_absolute_url_override) if not opts.managers: if any(f.name == "objects" for f in opts.fields): raise ValueError( "Model %s must specify a custom Manager, because it has a " "field named 'objects'." % cls.__name__ ) manager = Manager() manager.auto_created = True cls.add_to_class("objects", manager) # Set the name of _meta.indexes. This can't be done in # Options.contribute_to_class() because fields haven't been added to # the model at that point. for index in cls._meta.indexes: if not index.name: index.set_name_with_model(cls) class_prepared.send(sender=cls) @property def _base_manager(cls): return cls._meta.base_manager @property def _default_manager(cls): return cls._meta.default_manager class ModelStateFieldsCacheDescriptor: def __get__(self, instance, cls=None): if instance is None: return self res = instance.fields_cache = {} return res class ModelState: """Store model instance state.""" db = None # If true, uniqueness validation checks will consider this a new, unsaved # object. Necessary for correct validation of new instances of objects with # explicit (non-auto) PKs. This impacts validation only; it has no effect # on the actual save. adding = True fields_cache = ModelStateFieldsCacheDescriptor() class Model(AltersData, metaclass=ModelBase): def __init__(self, *args, **kwargs): # Alias some things as locals to avoid repeat global lookups cls = self.__class__ opts = self._meta _setattr = setattr _DEFERRED = DEFERRED if opts.abstract: raise TypeError("Abstract models cannot be instantiated.") pre_init.send(sender=cls, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) else: # Slower, kwargs-ready version. fields_iter = iter(opts.fields) for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) if kwargs.pop(field.name, NOT_PROVIDED) is not NOT_PROVIDED: raise TypeError( f"{cls.__qualname__}() got both positional and " f"keyword arguments for field '{field.name}'." ) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # Virtual field if field.attname not in kwargs and field.column is None: continue if kwargs: if isinstance(field.remote_field, ForeignObjectRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. if rel_obj is not _DEFERRED: _setattr(self, field.name, rel_obj) else: if val is not _DEFERRED: _setattr(self, field.attname, val) if kwargs: property_names = opts._property_names unexpected = () for prop, value in kwargs.items(): # Any remaining kwargs must correspond to properties or virtual # fields. if prop in property_names: if value is not _DEFERRED: _setattr(self, prop, value) else: try: opts.get_field(prop) except FieldDoesNotExist: unexpected += (prop,) else: if value is not _DEFERRED: _setattr(self, prop, value) if unexpected: unexpected_names = ", ".join(repr(n) for n in unexpected) raise TypeError( f"{cls.__name__}() got unexpected keyword arguments: " f"{unexpected_names}" ) super().__init__() post_init.send(sender=cls, instance=self) @classmethod def from_db(cls, db, field_names, values): if len(values) != len(cls._meta.concrete_fields): values_iter = iter(values) values = [ next(values_iter) if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields ] new = cls(*values) new._state.adding = False new._state.db = db return new def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def __str__(self): return "%s object (%s)" % (self.__class__.__name__, self.pk) def __eq__(self, other): if not isinstance(other, Model): return NotImplemented if self._meta.concrete_model != other._meta.concrete_model: return False my_pk = self.pk if my_pk is None: return self is other return my_pk == other.pk def __hash__(self): if self.pk is None: raise TypeError("Model instances without primary key value are unhashable") return hash(self.pk) def __reduce__(self): data = self.__getstate__() data[DJANGO_VERSION_PICKLE_KEY] = django.__version__ class_id = self._meta.app_label, self._meta.object_name return model_unpickle, (class_id,), data def __getstate__(self): """Hook to allow choosing the attributes to pickle.""" state = self.__dict__.copy() state["_state"] = copy.copy(state["_state"]) state["_state"].fields_cache = state["_state"].fields_cache.copy() # memoryview cannot be pickled, so cast it to bytes and store # separately. _memoryview_attrs = [] for attr, value in state.items(): if isinstance(value, memoryview): _memoryview_attrs.append((attr, bytes(value))) if _memoryview_attrs: state["_memoryview_attrs"] = _memoryview_attrs for attr, value in _memoryview_attrs: state.pop(attr) return state def __setstate__(self, state): pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: if pickled_version != django.__version__: warnings.warn( "Pickled model instance's Django version %s does not " "match the current version %s." % (pickled_version, django.__version__), RuntimeWarning, stacklevel=2, ) else: warnings.warn( "Pickled model instance's Django version is not specified.", RuntimeWarning, stacklevel=2, ) if "_memoryview_attrs" in state: for attr, value in state.pop("_memoryview_attrs"): state[attr] = memoryview(value) self.__dict__.update(state) def _get_pk_val(self, meta=None): meta = meta or self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): for parent_link in self._meta.parents.values(): if parent_link and parent_link != self._meta.pk: setattr(self, parent_link.target_field.attname, value) return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def get_deferred_fields(self): """ Return a set containing names of deferred fields for this instance. """ return { f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__ } def refresh_from_db(self, using=None, fields=None): """ Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. """ if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, "_prefetched_objects_cache", ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' "are not allowed in fields." % LOOKUP_SEP ) hints = {"instance": self} db_instance_qs = self.__class__._base_manager.db_manager( using, hints=hints ).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [ f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields ] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) # Clear cached private relations. for field in self._meta.private_fields: if field.is_relation and field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db async def arefresh_from_db(self, using=None, fields=None): return await sync_to_async(self.refresh_from_db)(using=using, fields=fields) def serializable_value(self, field_name): """ Return the value of the field name for this instance. If the field is a foreign key, return the id value instead of the object. If there's no Field object with this name on the model, return the model attribute's value. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field(field_name) except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save( self, force_insert=False, force_update=False, using=None, update_fields=None ): """ Save the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ self._prepare_related_fields_for_save(operation_name="save") using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") deferred_fields = self.get_deferred_fields() if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if not update_fields: return update_fields = frozenset(update_fields) field_names = self._meta._non_pk_concrete_field_names non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError( "The following fields do not exist in this model, are m2m " "fields, or are non-concrete fields: %s" % ", ".join(non_model_fields) ) # If saving to the same database, and this model is deferred, then # automatically do an "update_fields" save on the loaded fields. elif not force_insert and deferred_fields and using == self._state.db: field_names = set() for field in self._meta.concrete_fields: if not field.primary_key and not hasattr(field, "through"): field_names.add(field.attname) loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base( using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields, ) save.alters_data = True async def asave( self, force_insert=False, force_update=False, using=None, update_fields=None ): return await sync_to_async(self.save)( force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields, ) asave.alters_data = True def save_base( self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None, ): """ Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading. """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ # Skip proxies, but keep the origin as the proxy model. if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send( sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields, ) # A transaction isn't needed if one query is issued. if meta.parents: context_manager = transaction.atomic(using=using, savepoint=False) else: context_manager = transaction.mark_for_rollback_on_error(using=using) with context_manager: parent_inserted = False if not raw: parent_inserted = self._save_parents(cls, using, update_fields) updated = self._save_table( raw, cls, force_insert or parent_inserted, force_update, using, update_fields, ) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if not meta.auto_created: post_save.send( sender=origin, instance=self, created=(not updated), update_fields=update_fields, raw=raw, using=using, ) save_base.alters_data = True def _save_parents(self, cls, using, update_fields): """Save all the parents of cls using values from self.""" meta = cls._meta inserted = False for parent, field in meta.parents.items(): # Make sure the link fields are synced between parent and self. if ( field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None ): setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) parent_inserted = self._save_parents( cls=parent, using=using, update_fields=update_fields ) updated = self._save_table( cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted, ) if not updated: inserted = True # Set the parent's PK value to self. if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) # Since we didn't have an instance of the parent handy set # attname directly, bypassing the descriptor. Invalidate # the related object cache, in case it's been accidentally # populated. A fresh instance will be re-built from the # database if necessary. if field.is_cached(self): field.delete_cached_value(self) return inserted def _save_table( self, raw=False, cls=None, force_insert=False, force_update=False, using=None, update_fields=None, ): """ Do the heavy-lifting involved in saving. Update or insert the data for a single table. """ meta = cls._meta non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] if update_fields: non_pks = [ f for f in non_pks if f.name in update_fields or f.attname in update_fields ] pk_val = self._get_pk_val(meta) if pk_val is None: pk_val = meta.pk.get_pk_value_on_save(self) setattr(self, meta.pk.attname, pk_val) pk_set = pk_val is not None if not pk_set and (force_update or update_fields): raise ValueError("Cannot force an update in save() with no primary key.") updated = False # Skip an UPDATE when adding an instance and primary key has a default. if ( not raw and not force_insert and self._state.adding and meta.pk.default and meta.pk.default is not NOT_PROVIDED ): force_insert = True # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [ ( f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)), ) for f in non_pks ] forced_update = update_fields or force_update updated = self._do_update( base_qs, using, pk_val, values, update_fields, forced_update ) if force_update and not updated: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not updated: raise DatabaseError("Save with update_fields did not affect any rows.") if not updated: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to filter_args = field.get_filter_kwargs_for_object(self) self._order = ( cls._base_manager.using(using) .filter(**filter_args) .aggregate( _order__max=Coalesce( ExpressionWrapper( Max("_order") + Value(1), output_field=IntegerField() ), Value(0), ), )["_order__max"] ) fields = meta.local_concrete_fields if not pk_set: fields = [f for f in fields if f is not meta.auto_field] returning_fields = meta.db_returning_fields results = self._do_insert( cls._base_manager, using, fields, returning_fields, raw ) if results: for value, field in zip(results[0], returning_fields): setattr(self, field.attname, value) return updated def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): """ Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB). """ filtered = base_qs.filter(pk=pk_val) if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that # case we just say the update succeeded. Another case ending up here # is a model with just PK - in that case check that the PK still # exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and # It may happen that the object is deleted from the DB right after # this check, causing the subsequent UPDATE to return zero matching # rows. The same result can occur in some rare cases when the # database returns zero despite the UPDATE being executed # successfully (a row is matched and updated). In order to # distinguish these two cases, the object's existence in the # database is again checked for if the UPDATE query returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 def _do_insert(self, manager, using, fields, returning_fields, raw): """ Do an INSERT. If returning_fields is defined then this method should return the newly created data for the model. """ return manager._insert( [self], fields=fields, returning_fields=returning_fields, using=using, raw=raw, ) def _prepare_related_fields_for_save(self, operation_name, fields=None): # Ensure that a model instance without a PK hasn't been assigned to # a ForeignKey, GenericForeignKey or OneToOneField on this model. If # the field is nullable, allowing the save would result in silent data # loss. for field in self._meta.concrete_fields: if fields and field not in fields: continue # If the related field isn't cached, then an instance hasn't been # assigned and there's no need to worry about this check. if field.is_relation and field.is_cached(self): obj = getattr(self, field.name, None) if not obj: continue # A pk may have been assigned manually to a model instance not # saved to the database (or auto-generated in a case like # UUIDField), but we allow the save to proceed and rely on the # database to raise an IntegrityError if applicable. If # constraints aren't supported by the database, there's the # unavoidable risk of data corruption. if obj.pk is None: # Remove the object from a related instance cache. if not field.remote_field.multiple: field.remote_field.delete_cached_value(obj) raise ValueError( "%s() prohibited to prevent data loss due to unsaved " "related object '%s'." % (operation_name, field.name) ) elif getattr(self, field.attname) in field.empty_values: # Set related object if it has been saved after an # assignment. setattr(self, field.name, obj) # If the relationship's pk/to_field was changed, clear the # cached relationship. if getattr(obj, field.target_field.attname) != getattr( self, field.attname ): field.delete_cached_value(self) # GenericForeignKeys are private. for field in self._meta.private_fields: if fields and field not in fields: continue if ( field.is_relation and field.is_cached(self) and hasattr(field, "fk_field") ): obj = field.get_cached_value(self, default=None) if obj and obj.pk is None: raise ValueError( f"{operation_name}() prohibited to prevent data loss due to " f"unsaved related object '{field.name}'." ) def delete(self, using=None, keep_parents=False): if self.pk is None: raise ValueError( "%s object can't be deleted because its %s attribute is set " "to None." % (self._meta.object_name, self._meta.pk.attname) ) using = using or router.db_for_write(self.__class__, instance=self) collector = Collector(using=using, origin=self) collector.collect([self], keep_parents=keep_parents) return collector.delete() delete.alters_data = True async def adelete(self, using=None, keep_parents=False): return await sync_to_async(self.delete)( using=using, keep_parents=keep_parents, ) adelete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) choices_dict = dict(make_hashable(field.flatchoices)) # force_str() to coerce lazy strings. return force_str( choices_dict.get(make_hashable(value), value), strings_only=True ) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = "gt" if is_next else "lt" order = "" if is_next else "-" param = getattr(self, field.attname) q = Q.create([(field.name, param), (f"pk__{op}", self.pk)], connector=Q.AND) q = Q.create([q, (f"{field.name}__{op}", param)], connector=Q.OR) qs = ( self.__class__._default_manager.using(self._state.db) .filter(**kwargs) .filter(q) .order_by("%s%s" % (order, field.name), "%spk" % order) ) try: return qs[0] except IndexError: raise self.DoesNotExist( "%s matching query does not exist." % self.__class__._meta.object_name ) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = "gt" if is_next else "lt" order = "_order" if is_next else "-_order" order_field = self._meta.order_with_respect_to filter_args = order_field.get_filter_kwargs_for_object(self) obj = ( self.__class__._default_manager.filter(**filter_args) .filter( **{ "_order__%s" % op: self.__class__._default_manager.values("_order").filter( **{self._meta.pk.name: self.pk} ) } ) .order_by(order)[:1] .get() ) setattr(self, cachename, obj) return getattr(self, cachename) def _get_field_value_map(self, meta, exclude=None): if exclude is None: exclude = set() meta = meta or self._meta return { field.name: Value(getattr(self, field.attname), field) for field in meta.local_concrete_fields if field.name not in exclude } def prepare_database_save(self, field): if self.pk is None: raise ValueError( "Unsaved model instance %r cannot be used in an ORM query." % self ) return getattr(self, field.remote_field.get_related_field().attname) def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Check unique constraints on the model and raise ValidationError if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None, include_meta_constraints=False): """ Return a list of checks to perform. Since validate_unique() could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = set() unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] constraints = [] if include_meta_constraints: constraints = [(self.__class__, self._meta.total_unique_constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.unique_together: unique_togethers.append( (parent_class, parent_class._meta.unique_together) ) if include_meta_constraints and parent_class._meta.total_unique_constraints: constraints.append( (parent_class, parent_class._meta.total_unique_constraints) ) for model_class, unique_together in unique_togethers: for check in unique_together: if not any(name in exclude for name in check): # Add the check if the field isn't excluded. unique_checks.append((model_class, tuple(check))) if include_meta_constraints: for model_class, model_constraints in constraints: for constraint in model_constraints: if not any(name in exclude for name in constraint.fields): unique_checks.append((model_class, constraint.fields)) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.get_parent_list(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, "date", name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, "year", name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, "month", name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) # TODO: Handle multiple backends with different feature flags. if lookup_value is None or ( lookup_value == "" and connection.features.interprets_empty_strings_as_nulls ): # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append( self.unique_error_message(model_class, unique_check) ) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == "date": lookup_kwargs["%s__day" % unique_for] = date.day lookup_kwargs["%s__month" % unique_for] = date.month lookup_kwargs["%s__year" % unique_for] = date.year else: lookup_kwargs["%s__%s" % (unique_for, lookup_type)] = getattr( date, lookup_type ) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field_name, unique_for): opts = self._meta field = opts.get_field(field_name) return ValidationError( message=field.error_messages["unique_for_date"], code="unique_for_date", params={ "model": self, "model_name": capfirst(opts.verbose_name), "lookup_type": lookup_type, "field": field_name, "field_label": capfirst(field.verbose_name), "date_field": unique_for, "date_field_label": capfirst(opts.get_field(unique_for).verbose_name), }, ) def unique_error_message(self, model_class, unique_check): opts = model_class._meta params = { "model": self, "model_class": model_class, "model_name": capfirst(opts.verbose_name), "unique_check": unique_check, } # A unique field if len(unique_check) == 1: field = opts.get_field(unique_check[0]) params["field_label"] = capfirst(field.verbose_name) return ValidationError( message=field.error_messages["unique"], code="unique", params=params, ) # unique_together else: field_labels = [ capfirst(opts.get_field(f).verbose_name) for f in unique_check ] params["field_labels"] = get_text_list(field_labels, _("and")) return ValidationError( message=_("%(model_name)s with this %(field_labels)s already exists."), code="unique_together", params=params, ) def get_constraints(self): constraints = [(self.__class__, self._meta.constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.constraints: constraints.append((parent_class, parent_class._meta.constraints)) return constraints def validate_constraints(self, exclude=None): constraints = self.get_constraints() using = router.db_for_write(self.__class__, instance=self) errors = {} for model_class, model_constraints in constraints: for constraint in model_constraints: try: constraint.validate(model_class, self, exclude=exclude, using=using) except ValidationError as e: if e.code == "unique" and len(constraint.fields) == 1: errors.setdefault(constraint.fields[0], []).append(e) else: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def full_clean(self, exclude=None, validate_unique=True, validate_constraints=True): """ Call clean_fields(), clean(), validate_unique(), and validate_constraints() on the model. Raise a ValidationError for any errors that occur. """ errors = {} if exclude is None: exclude = set() else: exclude = set(exclude) try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. if validate_unique: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.add(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Run constraints checks, but only for fields that passed validation. if validate_constraints: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.add(name) try: self.validate_constraints(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Clean all fields and raise a ValidationError containing a dict of all validation errors if any occur. """ if exclude is None: exclude = set() errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in f.empty_values: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.error_list if errors: raise ValidationError(errors) @classmethod def check(cls, **kwargs): errors = [ *cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs), ] if not cls._meta.swapped: databases = kwargs.get("databases") or [] errors += [ *cls._check_fields(**kwargs), *cls._check_m2m_through_same_relationship(), *cls._check_long_column_names(databases), ] clash_errors = ( *cls._check_id_field(), *cls._check_field_name_clashes(), *cls._check_model_name_db_lookup_clashes(), *cls._check_property_name_related_field_accessor_clashes(), *cls._check_single_primary_key(), ) errors.extend(clash_errors) # If there are field name clashes, hide consequent column name # clashes. if not clash_errors: errors.extend(cls._check_column_name_clashes()) errors += [ *cls._check_index_together(), *cls._check_unique_together(), *cls._check_indexes(databases), *cls._check_ordering(), *cls._check_constraints(databases), *cls._check_default_pk(), ] return errors @classmethod def _check_default_pk(cls): if ( not cls._meta.abstract and cls._meta.pk.auto_created and # Inherited PKs are checked in parents models. not ( isinstance(cls._meta.pk, OneToOneField) and cls._meta.pk.remote_field.parent_link ) and not settings.is_overridden("DEFAULT_AUTO_FIELD") and cls._meta.app_config and not cls._meta.app_config._is_default_auto_field_overridden ): return [ checks.Warning( f"Auto-created primary key used when not defining a " f"primary key type, by default " f"'{settings.DEFAULT_AUTO_FIELD}'.", hint=( f"Configure the DEFAULT_AUTO_FIELD setting or the " f"{cls._meta.app_config.__class__.__qualname__}." f"default_auto_field attribute to point to a subclass " f"of AutoField, e.g. 'django.db.models.BigAutoField'." ), obj=cls, id="models.W042", ), ] return [] @classmethod def _check_swappable(cls): """Check if the swapped model exists.""" errors = [] if cls._meta.swapped: try: apps.get_model(cls._meta.swapped) except ValueError: errors.append( checks.Error( "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, id="models.E001", ) ) except LookupError: app_label, model_name = cls._meta.swapped.split(".") errors.append( checks.Error( "'%s' references '%s.%s', which has not been " "installed, or is abstract." % (cls._meta.swappable, app_label, model_name), id="models.E002", ) ) return errors @classmethod def _check_model(cls): errors = [] if cls._meta.proxy: if cls._meta.local_fields or cls._meta.local_many_to_many: errors.append( checks.Error( "Proxy model '%s' contains model fields." % cls.__name__, id="models.E017", ) ) return errors @classmethod def _check_managers(cls, **kwargs): """Perform all manager checks.""" errors = [] for manager in cls._meta.managers: errors.extend(manager.check(**kwargs)) return errors @classmethod def _check_fields(cls, **kwargs): """Perform all field checks.""" errors = [] for field in cls._meta.local_fields: errors.extend(field.check(**kwargs)) for field in cls._meta.local_many_to_many: errors.extend(field.check(from_model=cls, **kwargs)) return errors @classmethod def _check_m2m_through_same_relationship(cls): """Check if no relationship model is used by more than one m2m field.""" errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id="models.E003", ) ) else: seen_intermediary_signatures.append(signature) return errors @classmethod def _check_id_field(cls): """Check if `id` field is a primary key.""" fields = [ f for f in cls._meta.local_fields if f.name == "id" and f != cls._meta.pk ] # fields is empty or consists of the invalid "id" field if fields and not fields[0].primary_key and cls._meta.pk.name == "id": return [ checks.Error( "'id' can only be used as a field name if the field also " "sets 'primary_key=True'.", obj=cls, id="models.E004", ) ] else: return [] @classmethod def _check_field_name_clashes(cls): """Forbid field shadowing in multi-table inheritance.""" errors = [] used_fields = {} # name or attname -> field # Check that multi-inheritance doesn't cause field name shadowing. for parent in cls._meta.get_parent_list(): for f in parent._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None if clash: errors.append( checks.Error( "The field '%s' from parent model " "'%s' clashes with the field '%s' " "from parent model '%s'." % (clash.name, clash.model._meta, f.name, f.model._meta), obj=cls, id="models.E005", ) ) used_fields[f.name] = f used_fields[f.attname] = f # Check that fields defined in the model don't clash with fields from # parents, including auto-generated fields like multi-table inheritance # child accessors. for parent in cls._meta.get_parent_list(): for f in parent._meta.get_fields(): if f not in used_fields: used_fields[f.name] = f for f in cls._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None # Note that we may detect clash between user-defined non-unique # field "id" and automatically added unique field "id", both # defined at the same model. This special case is considered in # _check_id_field and here we ignore it. id_conflict = ( f.name == "id" and clash and clash.name == "id" and clash.model == cls ) if clash and not id_conflict: errors.append( checks.Error( "The field '%s' clashes with the field '%s' " "from model '%s'." % (f.name, clash.name, clash.model._meta), obj=f, id="models.E006", ) ) used_fields[f.name] = f used_fields[f.attname] = f return errors @classmethod def _check_column_name_clashes(cls): # Store a list of column names which have already been used by other fields. used_column_names = [] errors = [] for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Ensure the column name is not already in use. if column_name and column_name in used_column_names: errors.append( checks.Error( "Field '%s' has column name '%s' that is used by " "another field." % (f.name, column_name), hint="Specify a 'db_column' for the field.", obj=cls, id="models.E007", ) ) else: used_column_names.append(column_name) return errors @classmethod def _check_model_name_db_lookup_clashes(cls): errors = [] model_name = cls.__name__ if model_name.startswith("_") or model_name.endswith("_"): errors.append( checks.Error( "The model name '%s' cannot start or end with an underscore " "as it collides with the query lookup syntax." % model_name, obj=cls, id="models.E023", ) ) elif LOOKUP_SEP in model_name: errors.append( checks.Error( "The model name '%s' cannot contain double underscores as " "it collides with the query lookup syntax." % model_name, obj=cls, id="models.E024", ) ) return errors @classmethod def _check_property_name_related_field_accessor_clashes(cls): errors = [] property_names = cls._meta._property_names related_field_accessors = ( f.get_attname() for f in cls._meta._get_fields(reverse=False) if f.is_relation and f.related_model is not None ) for accessor in related_field_accessors: if accessor in property_names: errors.append( checks.Error( "The property '%s' clashes with a related field " "accessor." % accessor, obj=cls, id="models.E025", ) ) return errors @classmethod def _check_single_primary_key(cls): errors = [] if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: errors.append( checks.Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=cls, id="models.E026", ) ) return errors # RemovedInDjango51Warning. @classmethod def _check_index_together(cls): """Check the value of "index_together" option.""" if not isinstance(cls._meta.index_together, (tuple, list)): return [ checks.Error( "'index_together' must be a list or tuple.", obj=cls, id="models.E008", ) ] elif any( not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together ): return [ checks.Error( "All 'index_together' elements must be lists or tuples.", obj=cls, id="models.E009", ) ] else: errors = [] for fields in cls._meta.index_together: errors.extend(cls._check_local_fields(fields, "index_together")) return errors @classmethod def _check_unique_together(cls): """Check the value of "unique_together" option.""" if not isinstance(cls._meta.unique_together, (tuple, list)): return [ checks.Error( "'unique_together' must be a list or tuple.", obj=cls, id="models.E010", ) ] elif any( not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together ): return [ checks.Error( "All 'unique_together' elements must be lists or tuples.", obj=cls, id="models.E011", ) ] else: errors = [] for fields in cls._meta.unique_together: errors.extend(cls._check_local_fields(fields, "unique_together")) return errors @classmethod def _check_indexes(cls, databases): """Check fields, names, and conditions of indexes.""" errors = [] references = set() for index in cls._meta.indexes: # Index name can't start with an underscore or a number, restricted # for cross-database compatibility with Oracle. if index.name[0] == "_" or index.name[0].isdigit(): errors.append( checks.Error( "The index name '%s' cannot start with an underscore " "or a number." % index.name, obj=cls, id="models.E033", ), ) if len(index.name) > index.max_name_length: errors.append( checks.Error( "The index name '%s' cannot be longer than %d " "characters." % (index.name, index.max_name_length), obj=cls, id="models.E034", ), ) if index.contains_expressions: for expression in index.expressions: references.update( ref[0] for ref in cls._get_expr_references(expression) ) for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_partial_indexes or "supports_partial_indexes" in cls._meta.required_db_features ) and any(index.condition is not None for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes with conditions." % connection.display_name, hint=( "Conditions will be ignored. Silence this warning " "if you don't care about it." ), obj=cls, id="models.W037", ) ) if not ( connection.features.supports_covering_indexes or "supports_covering_indexes" in cls._meta.required_db_features ) and any(index.include for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes with non-key columns." % connection.display_name, hint=( "Non-key columns will be ignored. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W040", ) ) if not ( connection.features.supports_expression_indexes or "supports_expression_indexes" in cls._meta.required_db_features ) and any(index.contains_expressions for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes on expressions." % connection.display_name, hint=( "An index won't be created. Silence this warning " "if you don't care about it." ), obj=cls, id="models.W043", ) ) fields = [ field for index in cls._meta.indexes for field, _ in index.fields_orders ] fields += [include for index in cls._meta.indexes for include in index.include] fields += references errors.extend(cls._check_local_fields(fields, "indexes")) return errors @classmethod def _check_local_fields(cls, fields, option): from django.db import models # In order to avoid hitting the relation tree prematurely, we use our # own fields_map instead of using get_field() forward_fields_map = {} for field in cls._meta._get_fields(reverse=False): forward_fields_map[field.name] = field if hasattr(field, "attname"): forward_fields_map[field.attname] = field errors = [] for field_name in fields: try: field = forward_fields_map[field_name] except KeyError: errors.append( checks.Error( "'%s' refers to the nonexistent field '%s'." % ( option, field_name, ), obj=cls, id="models.E012", ) ) else: if isinstance(field.remote_field, models.ManyToManyRel): errors.append( checks.Error( "'%s' refers to a ManyToManyField '%s', but " "ManyToManyFields are not permitted in '%s'." % ( option, field_name, option, ), obj=cls, id="models.E013", ) ) elif field not in cls._meta.local_fields: errors.append( checks.Error( "'%s' refers to field '%s' which is not local to model " "'%s'." % (option, field_name, cls._meta.object_name), hint="This issue may be caused by multi-table inheritance.", obj=cls, id="models.E016", ) ) return errors @classmethod def _check_ordering(cls): """ Check "ordering" option -- is it a list of strings and do all fields exist? """ if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id="models.E021", ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by " "only one field).", obj=cls, id="models.E014", ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != "?") # Convert "-field" to "field". fields = ((f[1:] if f.startswith("-") else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: # pk is an alias that won't be found by opts.get_field. if part == "pk": fld = _cls._meta.pk else: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.path_infos[-1].to_opts.model else: _cls = None except (FieldDoesNotExist, AttributeError): if fld is None or ( fld.get_transform(part) is None and fld.get_lookup(part) is None ): errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id="models.E015", ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != "pk"} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set( chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) ) ) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id="models.E015", ) ) return errors @classmethod def _check_long_column_names(cls, databases): """ Check that any auto-generated column names are shorter than the limits for each database in which the model will be created. """ if not databases: return [] errors = [] allowed_len = None db_alias = None # Find the minimum max allowed length among all specified db_aliases. for db in databases: # skip databases where the model won't be created if not router.allow_migrate_model(db, cls): continue connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is None or connection.features.truncates_names: continue else: if allowed_len is None: allowed_len = max_name_length db_alias = db elif max_name_length < allowed_len: allowed_len = max_name_length db_alias = db if allowed_len is None: return errors for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Check if auto-generated name for the field is too long # for the database. if ( f.db_column is None and column_name is not None and len(column_name) > allowed_len ): errors.append( checks.Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (column_name, allowed_len, db_alias), hint="Set the column name manually using 'db_column'.", obj=cls, id="models.E018", ) ) for f in cls._meta.local_many_to_many: # Skip nonexistent models. if isinstance(f.remote_field.through, str): continue # Check if auto-generated name for the M2M field is too long # for the database. for m2m in f.remote_field.through._meta.local_fields: _, rel_name = m2m.get_attname_column() if ( m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len ): errors.append( checks.Error( "Autogenerated column name too long for M2M field " '"%s". Maximum length is "%s" for database "%s".' % (rel_name, allowed_len, db_alias), hint=( "Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'." ), obj=cls, id="models.E019", ) ) return errors @classmethod def _get_expr_references(cls, expr): if isinstance(expr, Q): for child in expr.children: if isinstance(child, tuple): lookup, value = child yield tuple(lookup.split(LOOKUP_SEP)) yield from cls._get_expr_references(value) else: yield from cls._get_expr_references(child) elif isinstance(expr, F): yield tuple(expr.name.split(LOOKUP_SEP)) elif hasattr(expr, "get_source_expressions"): for src_expr in expr.get_source_expressions(): yield from cls._get_expr_references(src_expr) @classmethod def _check_constraints(cls, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_table_check_constraints or "supports_table_check_constraints" in cls._meta.required_db_features ) and any( isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support check constraints." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W027", ) ) if not ( connection.features.supports_partial_indexes or "supports_partial_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.condition is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints with " "conditions." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W036", ) ) if not ( connection.features.supports_deferrable_unique_constraints or "supports_deferrable_unique_constraints" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.deferrable is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support deferrable unique constraints." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W038", ) ) if not ( connection.features.supports_covering_indexes or "supports_covering_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.include for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints with non-key " "columns." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W039", ) ) if not ( connection.features.supports_expression_indexes or "supports_expression_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.contains_expressions for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints on " "expressions." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W044", ) ) fields = set( chain.from_iterable( (*constraint.fields, *constraint.include) for constraint in cls._meta.constraints if isinstance(constraint, UniqueConstraint) ) ) references = set() for constraint in cls._meta.constraints: if isinstance(constraint, UniqueConstraint): if ( connection.features.supports_partial_indexes or "supports_partial_indexes" not in cls._meta.required_db_features ) and isinstance(constraint.condition, Q): references.update( cls._get_expr_references(constraint.condition) ) if ( connection.features.supports_expression_indexes or "supports_expression_indexes" not in cls._meta.required_db_features ) and constraint.contains_expressions: for expression in constraint.expressions: references.update(cls._get_expr_references(expression)) elif isinstance(constraint, CheckConstraint): if ( connection.features.supports_table_check_constraints or "supports_table_check_constraints" not in cls._meta.required_db_features ): if isinstance(constraint.check, Q): references.update( cls._get_expr_references(constraint.check) ) if any( isinstance(expr, RawSQL) for expr in constraint.check.flatten() ): errors.append( checks.Warning( f"Check constraint {constraint.name!r} contains " f"RawSQL() expression and won't be validated " f"during the model full_clean().", hint=( "Silence this warning if you don't care about " "it." ), obj=cls, id="models.W045", ), ) for field_name, *lookups in references: # pk is an alias that won't be found by opts.get_field. if field_name != "pk": fields.add(field_name) if not lookups: # If it has no lookups it cannot result in a JOIN. continue try: if field_name == "pk": field = cls._meta.pk else: field = cls._meta.get_field(field_name) if not field.is_relation or field.many_to_many or field.one_to_many: continue except FieldDoesNotExist: continue # JOIN must happen at the first lookup. first_lookup = lookups[0] if ( hasattr(field, "get_transform") and hasattr(field, "get_lookup") and field.get_transform(first_lookup) is None and field.get_lookup(first_lookup) is None ): errors.append( checks.Error( "'constraints' refers to the joined field '%s'." % LOOKUP_SEP.join([field_name] + lookups), obj=cls, id="models.E041", ) ) errors.extend(cls._check_local_fields(fields, "constraints")) return errors ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(self, ordered_obj, id_list, using=None): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update( [ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list)], ["_order"], ) def method_get_order(self, ordered_obj): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) pk_name = ordered_obj._meta.pk.name return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) def make_foreign_order_accessors(model, related_model): setattr( related_model, "get_%s_order" % model.__name__.lower(), partialmethod(method_get_order, model), ) setattr( related_model, "set_%s_order" % model.__name__.lower(), partialmethod(method_set_order, model), ) ######## # MISC # ######## def model_unpickle(model_id): """Used to unpickle Model subclasses with deferred fields.""" if isinstance(model_id, tuple): model = apps.get_model(*model_id) else: # Backwards compat - the model was cached directly in earlier versions. model = model_id return model.__new__(model) model_unpickle.__safe_for_unpickle__ = True
d587eaab02c1a25d8b57096e9ac85f74b52de6a9e1375710b30ade4ca92bcadc
import copy import datetime import functools import inspect import warnings from collections import defaultdict from decimal import Decimal from uuid import UUID from django.core.exceptions import EmptyResultSet, FieldError, FullResultSet from django.db import DatabaseError, NotSupportedError, connection from django.db.models import fields from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import Q from django.utils.deconstruct import deconstructible from django.utils.deprecation import RemovedInDjango50Warning from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLiteNumericMixin: """ Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered. """ def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() == "DecimalField": sql = "CAST(%s AS NUMERIC)" % sql except FieldError: pass return sql, params class Combinable: """ Provide the ability to combine one or two objects with some connector. For example F('foo') + F('bar'). """ # Arithmetic connectors ADD = "+" SUB = "-" MUL = "*" DIV = "/" POW = "^" # The following is a quoted % operator - it is quoted because it can be # used in strings that also have parameter substitution. MOD = "%%" # Bitwise operators - note that these are generated by .bitand() # and .bitor(), the '&' and '|' are reserved for boolean operator # usage. BITAND = "&" BITOR = "|" BITLEFTSHIFT = "<<" BITRIGHTSHIFT = ">>" BITXOR = "#" def _combine(self, other, connector, reversed): if not hasattr(other, "resolve_expression"): # everything must be resolvable to an expression other = Value(other) if reversed: return CombinedExpression(other, connector, self) return CombinedExpression(self, connector, other) ############# # OPERATORS # ############# def __neg__(self): return self._combine(-1, self.MUL, False) def __add__(self, other): return self._combine(other, self.ADD, False) def __sub__(self, other): return self._combine(other, self.SUB, False) def __mul__(self, other): return self._combine(other, self.MUL, False) def __truediv__(self, other): return self._combine(other, self.DIV, False) def __mod__(self, other): return self._combine(other, self.MOD, False) def __pow__(self, other): return self._combine(other, self.POW, False) def __and__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) & Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitand(self, other): return self._combine(other, self.BITAND, False) def bitleftshift(self, other): return self._combine(other, self.BITLEFTSHIFT, False) def bitrightshift(self, other): return self._combine(other, self.BITRIGHTSHIFT, False) def __xor__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) ^ Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitxor(self, other): return self._combine(other, self.BITXOR, False) def __or__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) | Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitor(self, other): return self._combine(other, self.BITOR, False) def __radd__(self, other): return self._combine(other, self.ADD, True) def __rsub__(self, other): return self._combine(other, self.SUB, True) def __rmul__(self, other): return self._combine(other, self.MUL, True) def __rtruediv__(self, other): return self._combine(other, self.DIV, True) def __rmod__(self, other): return self._combine(other, self.MOD, True) def __rpow__(self, other): return self._combine(other, self.POW, True) def __rand__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def __ror__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def __rxor__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def __invert__(self): return NegatedExpression(self) class BaseExpression: """Base class for all query expressions.""" empty_result_set_value = NotImplemented # aggregate specific fields is_summary = False _output_field_resolved_to_none = False # Can the expression be used in a WHERE clause? filterable = True # Can the expression can be used as a source expression in Window? window_compatible = False def __init__(self, output_field=None): if output_field is not None: self.output_field = output_field def __getstate__(self): state = self.__dict__.copy() state.pop("convert_value", None) return state def get_db_converters(self, connection): return ( [] if self.convert_value is self._convert_value_noop else [self.convert_value] ) + self.output_field.get_db_converters(connection) def get_source_expressions(self): return [] def set_source_expressions(self, exprs): assert not exprs def _parse_expressions(self, *expressions): return [ arg if hasattr(arg, "resolve_expression") else (F(arg) if isinstance(arg, str) else Value(arg)) for arg in expressions ] def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()") @cached_property def contains_aggregate(self): return any( expr and expr.contains_aggregate for expr in self.get_source_expressions() ) @cached_property def contains_over_clause(self): return any( expr and expr.contains_over_clause for expr in self.get_source_expressions() ) @cached_property def contains_column_references(self): return any( expr and expr.contains_column_references for expr in self.get_source_expressions() ) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions( [ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ] ) return c @property def conditional(self): return isinstance(self.output_field, fields.BooleanField) @property def field(self): return self.output_field @cached_property def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError("Cannot resolve expression type, unknown output_field") return output_field @cached_property def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise def _resolve_output_field(self): """ Attempt to infer the output type of the expression. As a guess, if the output fields of all source fields match then simply infer the same type here. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ # This guess is mostly a bad idea, but there is quite a lot of code # (especially 3rd party Func subclasses) that depend on it, we'd need a # deprecation path to fix it. sources_iter = ( source for source in self.get_source_fields() if source is not None ) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( "Expression contains mixed types: %s, %s. You must " "set output_field." % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field @staticmethod def _convert_value_noop(value, expression, connection): return value @cached_property def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == "FloatField": return ( lambda value, expression, connection: None if value is None else float(value) ) elif internal_type.endswith("IntegerField"): return ( lambda value, expression, connection: None if value is None else int(value) ) elif internal_type == "DecimalField": return ( lambda value, expression, connection: None if value is None else Decimal(value) ) return self._convert_value_noop def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def get_transform(self, name): return self.output_field.get_transform(name) def relabeled_clone(self, change_map): clone = self.copy() clone.set_source_expressions( [ e.relabeled_clone(change_map) if e is not None else None for e in self.get_source_expressions() ] ) return clone def replace_expressions(self, replacements): if replacement := replacements.get(self): return replacement clone = self.copy() source_expressions = clone.get_source_expressions() clone.set_source_expressions( [ expr.replace_expressions(replacements) if expr else None for expr in source_expressions ] ) return clone def copy(self): return copy.copy(self) def prefix_references(self, prefix): clone = self.copy() clone.set_source_expressions( [ F(f"{prefix}{expr.name}") if isinstance(expr, F) else expr.prefix_references(prefix) for expr in self.get_source_expressions() ] ) return clone def get_group_by_cols(self): if not self.contains_aggregate: return [self] cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()] def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def reverse_ordering(self): return self def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: if hasattr(expr, "flatten"): yield from expr.flatten() else: yield expr def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle. """ if hasattr(self.output_field, "select_format"): return self.output_field.select_format(compiler, sql, params) return sql, params @deconstructible class Expression(BaseExpression, Combinable): """An expression that can be combined with other expressions.""" @cached_property def identity(self): constructor_signature = inspect.signature(self.__init__) args, kwargs = self._constructor_args signature = constructor_signature.bind_partial(*args, **kwargs) signature.apply_defaults() arguments = signature.arguments.items() identity = [self.__class__] for arg, value in arguments: if isinstance(value, fields.Field): if value.name and value.model: value = (value.model._meta.label, value.name) else: value = type(value) else: value = make_hashable(value) identity.append((arg, value)) return tuple(identity) def __eq__(self, other): if not isinstance(other, Expression): return NotImplemented return other.identity == self.identity def __hash__(self): return hash(self.identity) # Type inference for CombinedExpression.output_field. # Missing items will result in FieldError, by design. # # The current approach for NULL is based on lowest common denominator behavior # i.e. if one of the supported databases is raising an error (rather than # return NULL) for `val <op> NULL`, then Django raises FieldError. NoneType = type(None) _connector_combinations = [ # Numeric operations - operands of same type. { connector: [ (fields.IntegerField, fields.IntegerField, fields.IntegerField), (fields.FloatField, fields.FloatField, fields.FloatField), (fields.DecimalField, fields.DecimalField, fields.DecimalField), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, # Behavior for DIV with integer arguments follows Postgres/SQLite, # not MySQL/Oracle. Combinable.DIV, Combinable.MOD, Combinable.POW, ) }, # Numeric operations - operands of different type. { connector: [ (fields.IntegerField, fields.DecimalField, fields.DecimalField), (fields.DecimalField, fields.IntegerField, fields.DecimalField), (fields.IntegerField, fields.FloatField, fields.FloatField), (fields.FloatField, fields.IntegerField, fields.FloatField), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV, Combinable.MOD, ) }, # Bitwise operators. { connector: [ (fields.IntegerField, fields.IntegerField, fields.IntegerField), ] for connector in ( Combinable.BITAND, Combinable.BITOR, Combinable.BITLEFTSHIFT, Combinable.BITRIGHTSHIFT, Combinable.BITXOR, ) }, # Numeric with NULL. { connector: [ (field_type, NoneType, field_type), (NoneType, field_type, field_type), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV, Combinable.MOD, Combinable.POW, ) for field_type in (fields.IntegerField, fields.DecimalField, fields.FloatField) }, # Date/DateTimeField/DurationField/TimeField. { Combinable.ADD: [ # Date/DateTimeField. (fields.DateField, fields.DurationField, fields.DateTimeField), (fields.DateTimeField, fields.DurationField, fields.DateTimeField), (fields.DurationField, fields.DateField, fields.DateTimeField), (fields.DurationField, fields.DateTimeField, fields.DateTimeField), # DurationField. (fields.DurationField, fields.DurationField, fields.DurationField), # TimeField. (fields.TimeField, fields.DurationField, fields.TimeField), (fields.DurationField, fields.TimeField, fields.TimeField), ], }, { Combinable.SUB: [ # Date/DateTimeField. (fields.DateField, fields.DurationField, fields.DateTimeField), (fields.DateTimeField, fields.DurationField, fields.DateTimeField), (fields.DateField, fields.DateField, fields.DurationField), (fields.DateField, fields.DateTimeField, fields.DurationField), (fields.DateTimeField, fields.DateField, fields.DurationField), (fields.DateTimeField, fields.DateTimeField, fields.DurationField), # DurationField. (fields.DurationField, fields.DurationField, fields.DurationField), # TimeField. (fields.TimeField, fields.DurationField, fields.TimeField), (fields.TimeField, fields.TimeField, fields.DurationField), ], }, ] _connector_combinators = defaultdict(list) def register_combinable_fields(lhs, connector, rhs, result): """ Register combinable types: lhs <connector> rhs -> result e.g. register_combinable_fields( IntegerField, Combinable.ADD, FloatField, FloatField ) """ _connector_combinators[connector].append((lhs, rhs, result)) for d in _connector_combinations: for connector, field_types in d.items(): for lhs, rhs, result in field_types: register_combinable_fields(lhs, connector, rhs, result) @functools.lru_cache(maxsize=128) def _resolve_combined_type(connector, lhs_type, rhs_type): combinators = _connector_combinators.get(connector, ()) for combinator_lhs_type, combinator_rhs_type, combined_type in combinators: if issubclass(lhs_type, combinator_lhs_type) and issubclass( rhs_type, combinator_rhs_type ): return combined_type class CombinedExpression(SQLiteNumericMixin, Expression): def __init__(self, lhs, connector, rhs, output_field=None): super().__init__(output_field=output_field) self.connector = connector self.lhs = lhs self.rhs = rhs def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self) def __str__(self): return "{} {} {}".format(self.lhs, self.connector, self.rhs) def get_source_expressions(self): return [self.lhs, self.rhs] def set_source_expressions(self, exprs): self.lhs, self.rhs = exprs def _resolve_output_field(self): # We avoid using super() here for reasons given in # Expression._resolve_output_field() combined_type = _resolve_combined_type( self.connector, type(self.lhs._output_field_or_none), type(self.rhs._output_field_or_none), ) if combined_type is None: raise FieldError( f"Cannot infer type of {self.connector!r} expression involving these " f"types: {self.lhs.output_field.__class__.__name__}, " f"{self.rhs.output_field.__class__.__name__}. You must set " f"output_field." ) return combined_type() def as_sql(self, compiler, connection): expressions = [] expression_params = [] sql, params = compiler.compile(self.lhs) expressions.append(sql) expression_params.extend(params) sql, params = compiler.compile(self.rhs) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = "(%s)" sql = connection.ops.combine_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): lhs = self.lhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) rhs = self.rhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) if not isinstance(self, (DurationExpression, TemporalSubtraction)): try: lhs_type = lhs.output_field.get_internal_type() except (AttributeError, FieldError): lhs_type = None try: rhs_type = rhs.output_field.get_internal_type() except (AttributeError, FieldError): rhs_type = None if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type: return DurationExpression( self.lhs, self.connector, self.rhs ).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) datetime_fields = {"DateField", "DateTimeField", "TimeField"} if ( self.connector == self.SUB and lhs_type in datetime_fields and lhs_type == rhs_type ): return TemporalSubtraction(self.lhs, self.rhs).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) c = self.copy() c.is_summary = summarize c.lhs = lhs c.rhs = rhs return c class DurationExpression(CombinedExpression): def compile(self, side, compiler, connection): try: output = side.output_field except FieldError: pass else: if output.get_internal_type() == "DurationField": sql, params = compiler.compile(side) return connection.ops.format_for_duration_arithmetic(sql), params return compiler.compile(side) def as_sql(self, compiler, connection): if connection.features.has_native_duration_field: return super().as_sql(compiler, connection) connection.ops.check_expression_support(self) expressions = [] expression_params = [] sql, params = self.compile(self.lhs, compiler, connection) expressions.append(sql) expression_params.extend(params) sql, params = self.compile(self.rhs, compiler, connection) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = "(%s)" sql = connection.ops.combine_duration_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) if self.connector in {Combinable.MUL, Combinable.DIV}: try: lhs_type = self.lhs.output_field.get_internal_type() rhs_type = self.rhs.output_field.get_internal_type() except (AttributeError, FieldError): pass else: allowed_fields = { "DecimalField", "DurationField", "FloatField", "IntegerField", } if lhs_type not in allowed_fields or rhs_type not in allowed_fields: raise DatabaseError( f"Invalid arguments for operator {self.connector}." ) return sql, params class TemporalSubtraction(CombinedExpression): output_field = fields.DurationField() def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) lhs = compiler.compile(self.lhs) rhs = compiler.compile(self.rhs) return connection.ops.subtract_temporals( self.lhs.output_field.get_internal_type(), lhs, rhs ) @deconstructible(path="django.db.models.F") class F(Combinable): """An object capable of resolving references to existing query objects.""" def __init__(self, name): """ Arguments: * name: the name of the field this expression references """ self.name = name def __repr__(self): return "{}({})".format(self.__class__.__name__, self.name) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): return query.resolve_ref(self.name, allow_joins, reuse, summarize) def replace_expressions(self, replacements): return replacements.get(self, self) def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __hash__(self): return hash(self.name) def copy(self): return copy.copy(self) class ResolvedOuterRef(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False contains_over_clause = False def as_sql(self, *args, **kwargs): raise ValueError( "This queryset contains a reference to an outer query and may " "only be used in a subquery." ) def resolve_expression(self, *args, **kwargs): col = super().resolve_expression(*args, **kwargs) # FIXME: Rename possibly_multivalued to multivalued and fix detection # for non-multivalued JOINs (e.g. foreign key fields). This should take # into account only many-to-many and one-to-many relationships. col.possibly_multivalued = LOOKUP_SEP in self.name return col def relabeled_clone(self, relabels): return self def get_group_by_cols(self): return [] class OuterRef(F): contains_aggregate = False def resolve_expression(self, *args, **kwargs): if isinstance(self.name, self.__class__): return self.name return ResolvedOuterRef(self.name) def relabeled_clone(self, relabels): return self @deconstructible(path="django.db.models.Func") class Func(SQLiteNumericMixin, Expression): """An SQL function call.""" function = None template = "%(function)s(%(expressions)s)" arg_joiner = ", " arity = None # The number of arguments the function accepts. def __init__(self, *expressions, output_field=None, **extra): if self.arity is not None and len(expressions) != self.arity: raise TypeError( "'%s' takes exactly %s %s (%s given)" % ( self.__class__.__name__, self.arity, "argument" if self.arity == 1 else "arguments", len(expressions), ) ) super().__init__(output_field=output_field) self.source_expressions = self._parse_expressions(*expressions) self.extra = extra def __repr__(self): args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) extra = {**self.extra, **self._get_repr_options()} if extra: extra = ", ".join( str(key) + "=" + str(val) for key, val in sorted(extra.items()) ) return "{}({}, {})".format(self.__class__.__name__, args, extra) return "{}({})".format(self.__class__.__name__, args) def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {} def get_source_expressions(self): return self.source_expressions def set_source_expressions(self, exprs): self.source_expressions = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize for pos, arg in enumerate(c.source_expressions): c.source_expressions[pos] = arg.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def as_sql( self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context, ): connection.ops.check_expression_support(self) sql_parts = [] params = [] for arg in self.source_expressions: try: arg_sql, arg_params = compiler.compile(arg) except EmptyResultSet: empty_result_set_value = getattr( arg, "empty_result_set_value", NotImplemented ) if empty_result_set_value is NotImplemented: raise arg_sql, arg_params = compiler.compile(Value(empty_result_set_value)) except FullResultSet: arg_sql, arg_params = compiler.compile(Value(True)) sql_parts.append(arg_sql) params.extend(arg_params) data = {**self.extra, **extra_context} # Use the first supplied value in this order: the parameter to this # method, a value supplied in __init__()'s **extra (the value in # `data`), or the value defined on the class. if function is not None: data["function"] = function else: data.setdefault("function", self.function) template = template or data.get("template", self.template) arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner) data["expressions"] = data["field"] = arg_joiner.join(sql_parts) return template % data, params def copy(self): copy = super().copy() copy.source_expressions = self.source_expressions[:] copy.extra = self.extra.copy() return copy @deconstructible(path="django.db.models.Value") class Value(SQLiteNumericMixin, Expression): """Represent a wrapped value as a node within an expression.""" # Provide a default value for `for_save` in order to allow unresolved # instances to be compiled until a decision is taken in #25425. for_save = False def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value def __repr__(self): return f"{self.__class__.__name__}({self.value!r})" def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) val = self.value output_field = self._output_field_or_none if output_field is not None: if self.for_save: val = output_field.get_db_prep_save(val, connection=connection) else: val = output_field.get_db_prep_value(val, connection=connection) if hasattr(output_field, "get_placeholder"): return output_field.get_placeholder(val, compiler, connection), [val] if val is None: # cx_Oracle does not always convert None to the appropriate # NULL type (like in case expressions using numbers), so we # use a literal SQL NULL return "NULL", [] return "%s", [val] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) c.for_save = for_save return c def get_group_by_cols(self): return [] def _resolve_output_field(self): if isinstance(self.value, str): return fields.CharField() if isinstance(self.value, bool): return fields.BooleanField() if isinstance(self.value, int): return fields.IntegerField() if isinstance(self.value, float): return fields.FloatField() if isinstance(self.value, datetime.datetime): return fields.DateTimeField() if isinstance(self.value, datetime.date): return fields.DateField() if isinstance(self.value, datetime.time): return fields.TimeField() if isinstance(self.value, datetime.timedelta): return fields.DurationField() if isinstance(self.value, Decimal): return fields.DecimalField() if isinstance(self.value, bytes): return fields.BinaryField() if isinstance(self.value, UUID): return fields.UUIDField() @property def empty_result_set_value(self): return self.value class RawSQL(Expression): def __init__(self, sql, params, output_field=None): if output_field is None: output_field = fields.Field() self.sql, self.params = sql, params super().__init__(output_field=output_field) def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) def as_sql(self, compiler, connection): return "(%s)" % self.sql, self.params def get_group_by_cols(self): return [self] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # Resolve parents fields used in raw SQL. if query.model: for parent in query.model._meta.get_parent_list(): for parent_field in parent._meta.local_fields: _, column_name = parent_field.get_attname_column() if column_name.lower() in self.sql.lower(): query.resolve_ref( parent_field.name, allow_joins, reuse, summarize ) break return super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) class Star(Expression): def __repr__(self): return "'*'" def as_sql(self, compiler, connection): return "*", [] class Col(Expression): contains_column_references = True possibly_multivalued = False def __init__(self, alias, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.alias, self.target = alias, target def __repr__(self): alias, target = self.alias, self.target identifiers = (alias, str(target)) if alias else (str(target),) return "{}({})".format(self.__class__.__name__, ", ".join(identifiers)) def as_sql(self, compiler, connection): alias, column = self.alias, self.target.column identifiers = (alias, column) if alias else (column,) sql = ".".join(map(compiler.quote_name_unless_alias, identifiers)) return sql, [] def relabeled_clone(self, relabels): if self.alias is None: return self return self.__class__( relabels.get(self.alias, self.alias), self.target, self.output_field ) def get_group_by_cols(self): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return self.output_field.get_db_converters( connection ) + self.target.get_db_converters(connection) class Ref(Expression): """ Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query. """ def __init__(self, refs, source): super().__init__() self.refs, self.source = refs, source def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) def get_source_expressions(self): return [self.source] def set_source_expressions(self, exprs): (self.source,) = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # The sub-expression `source` has already been resolved, as this is # just a reference to the name of `source`. return self def relabeled_clone(self, relabels): return self def as_sql(self, compiler, connection): return connection.ops.quote_name(self.refs), [] def get_group_by_cols(self): return [self] class ExpressionList(Func): """ An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like a partition clause. """ template = "%(expressions)s" def __init__(self, *expressions, **extra): if not expressions: raise ValueError( "%s requires at least one expression." % self.__class__.__name__ ) super().__init__(*expressions, **extra) def __str__(self): return self.arg_joiner.join(str(arg) for arg in self.source_expressions) def as_sqlite(self, compiler, connection, **extra_context): # Casting to numeric is unnecessary. return self.as_sql(compiler, connection, **extra_context) class OrderByList(Func): template = "ORDER BY %(expressions)s" def __init__(self, *expressions, **extra): expressions = ( ( OrderBy(F(expr[1:]), descending=True) if isinstance(expr, str) and expr[0] == "-" else expr ) for expr in expressions ) super().__init__(*expressions, **extra) def as_sql(self, *args, **kwargs): if not self.source_expressions: return "", () return super().as_sql(*args, **kwargs) def get_group_by_cols(self): group_by_cols = [] for order_by in self.get_source_expressions(): group_by_cols.extend(order_by.get_group_by_cols()) return group_by_cols @deconstructible(path="django.db.models.ExpressionWrapper") class ExpressionWrapper(SQLiteNumericMixin, Expression): """ An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field. """ def __init__(self, expression, output_field): super().__init__(output_field=output_field) self.expression = expression def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def get_group_by_cols(self): if isinstance(self.expression, Expression): expression = self.expression.copy() expression.output_field = self.output_field return expression.get_group_by_cols() # For non-expressions e.g. an SQL WHERE clause, the entire # `expression` must be included in the GROUP BY clause. return super().get_group_by_cols() def as_sql(self, compiler, connection): return compiler.compile(self.expression) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.expression) class NegatedExpression(ExpressionWrapper): """The logical negation of a conditional expression.""" def __init__(self, expression): super().__init__(expression, output_field=fields.BooleanField()) def __invert__(self): return self.expression.copy() def as_sql(self, compiler, connection): try: sql, params = super().as_sql(compiler, connection) except EmptyResultSet: features = compiler.connection.features if not features.supports_boolean_expr_in_select_clause: return "1=1", () return compiler.compile(Value(True)) ops = compiler.connection.ops # Some database backends (e.g. Oracle) don't allow EXISTS() and filters # to be compared to another expression unless they're wrapped in a CASE # WHEN. if not ops.conditional_expression_supported_in_where_clause(self.expression): return f"CASE WHEN {sql} = 0 THEN 1 ELSE 0 END", params return f"NOT {sql}", params def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): resolved = super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) if not getattr(resolved.expression, "conditional", False): raise TypeError("Cannot negate non-conditional expressions.") return resolved def select_format(self, compiler, sql, params): # Wrap boolean expressions with a CASE WHEN expression if a database # backend (e.g. Oracle) doesn't support boolean expression in SELECT or # GROUP BY list. expression_supported_in_where_clause = ( compiler.connection.ops.conditional_expression_supported_in_where_clause ) if ( not compiler.connection.features.supports_boolean_expr_in_select_clause # Avoid double wrapping. and expression_supported_in_where_clause(self.expression) ): sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql) return sql, params @deconstructible(path="django.db.models.When") class When(Expression): template = "WHEN %(condition)s THEN %(result)s" # This isn't a complete conditional expression, must be used in Case(). conditional = False def __init__(self, condition=None, then=None, **lookups): if lookups: if condition is None: condition, lookups = Q(**lookups), None elif getattr(condition, "conditional", False): condition, lookups = Q(condition, **lookups), None if condition is None or not getattr(condition, "conditional", False) or lookups: raise TypeError( "When() supports a Q object, a boolean expression, or lookups " "as a condition." ) if isinstance(condition, Q) and not condition: raise ValueError("An empty Q() can't be used as a When() condition.") super().__init__(output_field=None) self.condition = condition self.result = self._parse_expressions(then)[0] def __str__(self): return "WHEN %r THEN %r" % (self.condition, self.result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return [self.condition, self.result] def set_source_expressions(self, exprs): self.condition, self.result = exprs def get_source_fields(self): # We're only interested in the fields of the result expressions. return [self.result._output_field_or_none] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize if hasattr(c.condition, "resolve_expression"): c.condition = c.condition.resolve_expression( query, allow_joins, reuse, summarize, False ) c.result = c.result.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = extra_context sql_params = [] condition_sql, condition_params = compiler.compile(self.condition) template_params["condition"] = condition_sql result_sql, result_params = compiler.compile(self.result) template_params["result"] = result_sql template = template or self.template return template % template_params, ( *sql_params, *condition_params, *result_params, ) def get_group_by_cols(self): # This is not a complete expression and cannot be used in GROUP BY. cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols @deconstructible(path="django.db.models.Case") class Case(SQLiteNumericMixin, Expression): """ An SQL searched CASE expression: CASE WHEN n > 0 THEN 'positive' WHEN n < 0 THEN 'negative' ELSE 'zero' END """ template = "CASE %(cases)s ELSE %(default)s END" case_joiner = " " def __init__(self, *cases, default=None, output_field=None, **extra): if not all(isinstance(case, When) for case in cases): raise TypeError("Positional arguments must all be When objects.") super().__init__(output_field) self.cases = list(cases) self.default = self._parse_expressions(default)[0] self.extra = extra def __str__(self): return "CASE %s, ELSE %r" % ( ", ".join(str(c) for c in self.cases), self.default, ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return self.cases + [self.default] def set_source_expressions(self, exprs): *self.cases, self.default = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize for pos, case in enumerate(c.cases): c.cases[pos] = case.resolve_expression( query, allow_joins, reuse, summarize, for_save ) c.default = c.default.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def copy(self): c = super().copy() c.cases = c.cases[:] return c def as_sql( self, compiler, connection, template=None, case_joiner=None, **extra_context ): connection.ops.check_expression_support(self) if not self.cases: return compiler.compile(self.default) template_params = {**self.extra, **extra_context} case_parts = [] sql_params = [] default_sql, default_params = compiler.compile(self.default) for case in self.cases: try: case_sql, case_params = compiler.compile(case) except EmptyResultSet: continue except FullResultSet: default_sql, default_params = compiler.compile(case.result) break case_parts.append(case_sql) sql_params.extend(case_params) if not case_parts: return default_sql, default_params case_joiner = case_joiner or self.case_joiner template_params["cases"] = case_joiner.join(case_parts) template_params["default"] = default_sql sql_params.extend(default_params) template = template or template_params.get("template", self.template) sql = template % template_params if self._output_field_or_none is not None: sql = connection.ops.unification_cast_sql(self.output_field) % sql return sql, sql_params def get_group_by_cols(self): if not self.cases: return self.default.get_group_by_cols() return super().get_group_by_cols() class Subquery(BaseExpression, Combinable): """ An explicit subquery. It may contain OuterRef() references to the outer query which will be resolved when it is applied to that query. """ template = "(%(subquery)s)" contains_aggregate = False empty_result_set_value = None def __init__(self, queryset, output_field=None, **extra): # Allow the usage of both QuerySet and sql.Query objects. self.query = getattr(queryset, "query", queryset).clone() self.query.subquery = True self.extra = extra super().__init__(output_field) def get_source_expressions(self): return [self.query] def set_source_expressions(self, exprs): self.query = exprs[0] def _resolve_output_field(self): return self.query.output_field def copy(self): clone = super().copy() clone.query = clone.query.clone() return clone @property def external_aliases(self): return self.query.external_aliases def get_external_cols(self): return self.query.get_external_cols() def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = {**self.extra, **extra_context} subquery_sql, sql_params = self.query.as_sql(compiler, connection) template_params["subquery"] = subquery_sql[1:-1] template = template or template_params.get("template", self.template) sql = template % template_params return sql, sql_params def get_group_by_cols(self): return self.query.get_group_by_cols(wrapper=self) class Exists(Subquery): template = "EXISTS(%(subquery)s)" output_field = fields.BooleanField() def __init__(self, queryset, **kwargs): super().__init__(queryset, **kwargs) self.query = self.query.exists() def select_format(self, compiler, sql, params): # Wrap EXISTS() with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql) return sql, params @deconstructible(path="django.db.models.OrderBy") class OrderBy(Expression): template = "%(expression)s %(ordering)s" conditional = False def __init__(self, expression, descending=False, nulls_first=None, nulls_last=None): if nulls_first and nulls_last: raise ValueError("nulls_first and nulls_last are mutually exclusive") if nulls_first is False or nulls_last is False: # When the deprecation ends, replace with: # raise ValueError( # "nulls_first and nulls_last values must be True or None." # ) warnings.warn( "Passing nulls_first=False or nulls_last=False is deprecated, use None " "instead.", RemovedInDjango50Warning, stacklevel=2, ) self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, "resolve_expression"): raise ValueError("expression must be an expression type") self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending ) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): template = template or self.template if connection.features.supports_order_by_nulls_modifier: if self.nulls_last: template = "%s NULLS LAST" % template elif self.nulls_first: template = "%s NULLS FIRST" % template else: if self.nulls_last and not ( self.descending and connection.features.order_by_nulls_first ): template = "%%(expression)s IS NULL, %s" % template elif self.nulls_first and not ( not self.descending and connection.features.order_by_nulls_first ): template = "%%(expression)s IS NOT NULL, %s" % template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { "expression": expression_sql, "ordering": "DESC" if self.descending else "ASC", **extra_context, } params *= template.count("%(expression)s") return (template % placeholders).rstrip(), params def as_oracle(self, compiler, connection): # Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped # in a CASE WHEN. if connection.ops.conditional_expression_supported_in_where_clause( self.expression ): copy = self.copy() copy.expression = Case( When(self.expression, then=True), default=False, ) return copy.as_sql(compiler, connection) return self.as_sql(compiler, connection) def get_group_by_cols(self): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first: self.nulls_last = True self.nulls_first = None elif self.nulls_last: self.nulls_first = True self.nulls_last = None return self def asc(self): self.descending = False def desc(self): self.descending = True class Window(SQLiteNumericMixin, Expression): template = "%(expression)s OVER (%(window)s)" # Although the main expression may either be an aggregate or an # expression with an aggregate function, the GROUP BY that will # be introduced in the query as a result is not desired. contains_aggregate = False contains_over_clause = True def __init__( self, expression, partition_by=None, order_by=None, frame=None, output_field=None, ): self.partition_by = partition_by self.order_by = order_by self.frame = frame if not getattr(expression, "window_compatible", False): raise ValueError( "Expression '%s' isn't compatible with OVER clauses." % expression.__class__.__name__ ) if self.partition_by is not None: if not isinstance(self.partition_by, (tuple, list)): self.partition_by = (self.partition_by,) self.partition_by = ExpressionList(*self.partition_by) if self.order_by is not None: if isinstance(self.order_by, (list, tuple)): self.order_by = OrderByList(*self.order_by) elif isinstance(self.order_by, (BaseExpression, str)): self.order_by = OrderByList(self.order_by) else: raise ValueError( "Window.order_by must be either a string reference to a " "field, an expression, or a list or tuple of them." ) super().__init__(output_field=output_field) self.source_expression = self._parse_expressions(expression)[0] def _resolve_output_field(self): return self.source_expression.output_field def get_source_expressions(self): return [self.source_expression, self.partition_by, self.order_by, self.frame] def set_source_expressions(self, exprs): self.source_expression, self.partition_by, self.order_by, self.frame = exprs def as_sql(self, compiler, connection, template=None): connection.ops.check_expression_support(self) if not connection.features.supports_over_clause: raise NotSupportedError("This backend does not support window expressions.") expr_sql, params = compiler.compile(self.source_expression) window_sql, window_params = [], () if self.partition_by is not None: sql_expr, sql_params = self.partition_by.as_sql( compiler=compiler, connection=connection, template="PARTITION BY %(expressions)s", ) window_sql.append(sql_expr) window_params += tuple(sql_params) if self.order_by is not None: order_sql, order_params = compiler.compile(self.order_by) window_sql.append(order_sql) window_params += tuple(order_params) if self.frame: frame_sql, frame_params = compiler.compile(self.frame) window_sql.append(frame_sql) window_params += tuple(frame_params) template = template or self.template return ( template % {"expression": expr_sql, "window": " ".join(window_sql).strip()}, (*params, *window_params), ) def as_sqlite(self, compiler, connection): if isinstance(self.output_field, fields.DecimalField): # Casting to numeric must be outside of the window expression. copy = self.copy() source_expressions = copy.get_source_expressions() source_expressions[0].output_field = fields.FloatField() copy.set_source_expressions(source_expressions) return super(Window, copy).as_sqlite(compiler, connection) return self.as_sql(compiler, connection) def __str__(self): return "{} OVER ({}{}{})".format( str(self.source_expression), "PARTITION BY " + str(self.partition_by) if self.partition_by else "", str(self.order_by or ""), str(self.frame or ""), ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_group_by_cols(self): group_by_cols = [] if self.partition_by: group_by_cols.extend(self.partition_by.get_group_by_cols()) if self.order_by is not None: group_by_cols.extend(self.order_by.get_group_by_cols()) return group_by_cols class WindowFrame(Expression): """ Model the frame clause in window expressions. There are two types of frame clauses which are subclasses, however, all processing and validation (by no means intended to be complete) is done here. Thus, providing an end for a frame is optional (the default is UNBOUNDED FOLLOWING, which is the last row in the frame). """ template = "%(frame_type)s BETWEEN %(start)s AND %(end)s" def __init__(self, start=None, end=None): self.start = Value(start) self.end = Value(end) def set_source_expressions(self, exprs): self.start, self.end = exprs def get_source_expressions(self): return [self.start, self.end] def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) start, end = self.window_frame_start_end( connection, self.start.value, self.end.value ) return ( self.template % { "frame_type": self.frame_type, "start": start, "end": end, }, [], ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_group_by_cols(self): return [] def __str__(self): if self.start.value is not None and self.start.value < 0: start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING) elif self.start.value is not None and self.start.value == 0: start = connection.ops.CURRENT_ROW else: start = connection.ops.UNBOUNDED_PRECEDING if self.end.value is not None and self.end.value > 0: end = "%d %s" % (self.end.value, connection.ops.FOLLOWING) elif self.end.value is not None and self.end.value == 0: end = connection.ops.CURRENT_ROW else: end = connection.ops.UNBOUNDED_FOLLOWING return self.template % { "frame_type": self.frame_type, "start": start, "end": end, } def window_frame_start_end(self, connection, start, end): raise NotImplementedError("Subclasses must implement window_frame_start_end().") class RowRange(WindowFrame): frame_type = "ROWS" def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_rows_start_end(start, end) class ValueRange(WindowFrame): frame_type = "RANGE" def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_range_start_end(start, end)
bad9f736dd593289c6767819a8838fc72b6c5babb57741376d2c84b701cd8417
import collections.abc import copy import datetime import decimal import operator import uuid import warnings from base64 import b64decode, b64encode from functools import partialmethod, total_ordering from django import forms from django.apps import apps from django.conf import settings from django.core import checks, exceptions, validators from django.db import connection, connections, router from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin from django.utils import timezone from django.utils.datastructures import DictWrapper from django.utils.dateparse import ( parse_date, parse_datetime, parse_duration, parse_time, ) from django.utils.duration import duration_microseconds, duration_string from django.utils.functional import Promise, cached_property from django.utils.ipv6 import clean_ipv6_address from django.utils.itercompat import is_iterable from django.utils.text import capfirst from django.utils.translation import gettext_lazy as _ __all__ = [ "AutoField", "BLANK_CHOICE_DASH", "BigAutoField", "BigIntegerField", "BinaryField", "BooleanField", "CharField", "CommaSeparatedIntegerField", "DateField", "DateTimeField", "DecimalField", "DurationField", "EmailField", "Empty", "Field", "FilePathField", "FloatField", "GenericIPAddressField", "IPAddressField", "IntegerField", "NOT_PROVIDED", "NullBooleanField", "PositiveBigIntegerField", "PositiveIntegerField", "PositiveSmallIntegerField", "SlugField", "SmallAutoField", "SmallIntegerField", "TextField", "TimeField", "URLField", "UUIDField", ] class Empty: pass class NOT_PROVIDED: pass # The values to use for "blank" in SelectFields. Will be appended to the start # of most "choices" lists. BLANK_CHOICE_DASH = [("", "---------")] def _load_field(app_label, model_name, field_name): return apps.get_model(app_label, model_name)._meta.get_field(field_name) # A guide to Field parameters: # # * name: The name of the field specified in the model. # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * db_column: The db_column specified in the model (or None). # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. # # Code that introspects values, or does other dynamic things, should use # attname. For example, this gets the primary key value of object "obj": # # getattr(obj, opts.pk.attname) def _empty(of_cls): new = Empty() new.__class__ = of_cls return new def return_None(): return None @total_ordering class Field(RegisterLookupMixin): """Base class for all field types""" # Designates whether empty strings fundamentally are allowed at the # database level. empty_strings_allowed = True empty_values = list(validators.EMPTY_VALUES) # These track each time a Field instance is created. Used to retain order. # The auto_creation_counter is used for fields that Django implicitly # creates, creation_counter is used for all user-specified fields. creation_counter = 0 auto_creation_counter = -1 default_validators = [] # Default set of validators default_error_messages = { "invalid_choice": _("Value %(value)r is not a valid choice."), "null": _("This field cannot be null."), "blank": _("This field cannot be blank."), "unique": _("%(model_name)s with this %(field_label)s already exists."), "unique_for_date": _( # Translators: The 'lookup_type' is one of 'date', 'year' or # 'month'. Eg: "Title must be unique for pub_date year" "%(field_label)s must be unique for " "%(date_field_label)s %(lookup_type)s." ), } system_check_deprecated_details = None system_check_removed_details = None # Attributes that don't affect a column definition. # These attributes are ignored when altering the field. non_db_attrs = ( "blank", "choices", "db_column", "editable", "error_messages", "help_text", "limit_choices_to", # Database-level options are not supported, see #21961. "on_delete", "related_name", "related_query_name", "validators", "verbose_name", ) # Field flags hidden = False many_to_many = None many_to_one = None one_to_many = None one_to_one = None related_model = None descriptor_class = DeferredAttribute # Generic field type description, usually overridden by subclasses def _description(self): return _("Field of type: %(field_type)s") % { "field_type": self.__class__.__name__ } description = property(_description) def __init__( self, verbose_name=None, name=None, primary_key=False, max_length=None, unique=False, blank=False, null=False, db_index=False, rel=None, default=NOT_PROVIDED, editable=True, serialize=True, unique_for_date=None, unique_for_month=None, unique_for_year=None, choices=None, help_text="", db_column=None, db_tablespace=None, auto_created=False, validators=(), error_messages=None, ): self.name = name self.verbose_name = verbose_name # May be set by set_attributes_from_name self._verbose_name = verbose_name # Store original for deconstruction self.primary_key = primary_key self.max_length, self._unique = max_length, unique self.blank, self.null = blank, null self.remote_field = rel self.is_relation = self.remote_field is not None self.default = default self.editable = editable self.serialize = serialize self.unique_for_date = unique_for_date self.unique_for_month = unique_for_month self.unique_for_year = unique_for_year if isinstance(choices, collections.abc.Iterator): choices = list(choices) self.choices = choices self.help_text = help_text self.db_index = db_index self.db_column = db_column self._db_tablespace = db_tablespace self.auto_created = auto_created # Adjust the appropriate creation counter, and save our local copy. if auto_created: self.creation_counter = Field.auto_creation_counter Field.auto_creation_counter -= 1 else: self.creation_counter = Field.creation_counter Field.creation_counter += 1 self._validators = list(validators) # Store for deconstruction later self._error_messages = error_messages # Store for deconstruction later def __str__(self): """ Return "app_label.model_label.field_name" for fields attached to models. """ if not hasattr(self, "model"): return super().__str__() model = self.model return "%s.%s" % (model._meta.label, self.name) def __repr__(self): """Display the module, class, and name of the field.""" path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) name = getattr(self, "name", None) if name is not None: return "<%s: %s>" % (path, name) return "<%s>" % path def check(self, **kwargs): return [ *self._check_field_name(), *self._check_choices(), *self._check_db_index(), *self._check_null_allowed_for_primary_keys(), *self._check_backend_specific_checks(**kwargs), *self._check_validators(), *self._check_deprecation_details(), ] def _check_field_name(self): """ Check if field name is valid, i.e. 1) does not end with an underscore, 2) does not contain "__" and 3) is not "pk". """ if self.name.endswith("_"): return [ checks.Error( "Field names must not end with an underscore.", obj=self, id="fields.E001", ) ] elif LOOKUP_SEP in self.name: return [ checks.Error( 'Field names must not contain "%s".' % LOOKUP_SEP, obj=self, id="fields.E002", ) ] elif self.name == "pk": return [ checks.Error( "'pk' is a reserved word that cannot be used as a field name.", obj=self, id="fields.E003", ) ] else: return [] @classmethod def _choices_is_value(cls, value): return isinstance(value, (str, Promise)) or not is_iterable(value) def _check_choices(self): if not self.choices: return [] if not is_iterable(self.choices) or isinstance(self.choices, str): return [ checks.Error( "'choices' must be an iterable (e.g., a list or tuple).", obj=self, id="fields.E004", ) ] choice_max_length = 0 # Expect [group_name, [value, display]] for choices_group in self.choices: try: group_name, group_choices = choices_group except (TypeError, ValueError): # Containing non-pairs break try: if not all( self._choices_is_value(value) and self._choices_is_value(human_name) for value, human_name in group_choices ): break if self.max_length is not None and group_choices: choice_max_length = max( [ choice_max_length, *( len(value) for value, _ in group_choices if isinstance(value, str) ), ] ) except (TypeError, ValueError): # No groups, choices in the form [value, display] value, human_name = group_name, group_choices if not self._choices_is_value(value) or not self._choices_is_value( human_name ): break if self.max_length is not None and isinstance(value, str): choice_max_length = max(choice_max_length, len(value)) # Special case: choices=['ab'] if isinstance(choices_group, str): break else: if self.max_length is not None and choice_max_length > self.max_length: return [ checks.Error( "'max_length' is too small to fit the longest value " "in 'choices' (%d characters)." % choice_max_length, obj=self, id="fields.E009", ), ] return [] return [ checks.Error( "'choices' must be an iterable containing " "(actual value, human readable name) tuples.", obj=self, id="fields.E005", ) ] def _check_db_index(self): if self.db_index not in (None, True, False): return [ checks.Error( "'db_index' must be None, True or False.", obj=self, id="fields.E006", ) ] else: return [] def _check_null_allowed_for_primary_keys(self): if ( self.primary_key and self.null and not connection.features.interprets_empty_strings_as_nulls ): # We cannot reliably check this for backends like Oracle which # consider NULL and '' to be equal (and thus set up # character-based fields a little differently). return [ checks.Error( "Primary keys must not have null=True.", hint=( "Set null=False on the field, or " "remove primary_key=True argument." ), obj=self, id="fields.E007", ) ] else: return [] def _check_backend_specific_checks(self, databases=None, **kwargs): if databases is None: return [] app_label = self.model._meta.app_label errors = [] for alias in databases: if router.allow_migrate( alias, app_label, model_name=self.model._meta.model_name ): errors.extend(connections[alias].validation.check_field(self, **kwargs)) return errors def _check_validators(self): errors = [] for i, validator in enumerate(self.validators): if not callable(validator): errors.append( checks.Error( "All 'validators' must be callable.", hint=( "validators[{i}] ({repr}) isn't a function or " "instance of a validator class.".format( i=i, repr=repr(validator), ) ), obj=self, id="fields.E008", ) ) return errors def _check_deprecation_details(self): if self.system_check_removed_details is not None: return [ checks.Error( self.system_check_removed_details.get( "msg", "%s has been removed except for support in historical " "migrations." % self.__class__.__name__, ), hint=self.system_check_removed_details.get("hint"), obj=self, id=self.system_check_removed_details.get("id", "fields.EXXX"), ) ] elif self.system_check_deprecated_details is not None: return [ checks.Warning( self.system_check_deprecated_details.get( "msg", "%s has been deprecated." % self.__class__.__name__ ), hint=self.system_check_deprecated_details.get("hint"), obj=self, id=self.system_check_deprecated_details.get("id", "fields.WXXX"), ) ] return [] def get_col(self, alias, output_field=None): if alias == self.model._meta.db_table and ( output_field is None or output_field == self ): return self.cached_col from django.db.models.expressions import Col return Col(alias, self, output_field) @cached_property def cached_col(self): from django.db.models.expressions import Col return Col(self.model._meta.db_table, self) def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, GIS columns need to be selected as AsText(table.col) on MySQL as the table.col data can't be used by Django. """ return sql, params def deconstruct(self): """ Return enough information to recreate the field as a 4-tuple: * The name of the field on the model, if contribute_to_class() has been run. * The import path of the field, including the class, e.g. django.db.models.IntegerField. This should be the most portable version, so less specific may be better. * A list of positional arguments. * A dict of keyword arguments. Note that the positional or keyword arguments must contain values of the following types (including inner values of collection types): * None, bool, str, int, float, complex, set, frozenset, list, tuple, dict * UUID * datetime.datetime (naive), datetime.date * top-level classes, top-level functions - will be referenced by their full import path * Storage instances - these have their own deconstruct() method This is because the values here must be serialized into a text format (possibly new Python code, possibly JSON) and these are the only types with encoding handlers defined. There's no need to return the exact way the field was instantiated this time, just ensure that the resulting field is the same - prefer keyword arguments over positional ones, and omit parameters with their default values. """ # Short-form way of fetching all the default parameters keywords = {} possibles = { "verbose_name": None, "primary_key": False, "max_length": None, "unique": False, "blank": False, "null": False, "db_index": False, "default": NOT_PROVIDED, "editable": True, "serialize": True, "unique_for_date": None, "unique_for_month": None, "unique_for_year": None, "choices": None, "help_text": "", "db_column": None, "db_tablespace": None, "auto_created": False, "validators": [], "error_messages": None, } attr_overrides = { "unique": "_unique", "error_messages": "_error_messages", "validators": "_validators", "verbose_name": "_verbose_name", "db_tablespace": "_db_tablespace", } equals_comparison = {"choices", "validators"} for name, default in possibles.items(): value = getattr(self, attr_overrides.get(name, name)) # Unroll anything iterable for choices into a concrete list if name == "choices" and isinstance(value, collections.abc.Iterable): value = list(value) # Do correct kind of comparison if name in equals_comparison: if value != default: keywords[name] = value else: if value is not default: keywords[name] = value # Work out path - we shorten it for known Django core fields path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) if path.startswith("django.db.models.fields.related"): path = path.replace("django.db.models.fields.related", "django.db.models") elif path.startswith("django.db.models.fields.files"): path = path.replace("django.db.models.fields.files", "django.db.models") elif path.startswith("django.db.models.fields.json"): path = path.replace("django.db.models.fields.json", "django.db.models") elif path.startswith("django.db.models.fields.proxy"): path = path.replace("django.db.models.fields.proxy", "django.db.models") elif path.startswith("django.db.models.fields"): path = path.replace("django.db.models.fields", "django.db.models") # Return basic info - other fields should override this. return (self.name, path, [], keywords) def clone(self): """ Uses deconstruct() to clone a new copy of this Field. Will not preserve any class attachments/attribute names. """ name, path, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) def __eq__(self, other): # Needed for @total_ordering if isinstance(other, Field): return self.creation_counter == other.creation_counter and getattr( self, "model", None ) == getattr(other, "model", None) return NotImplemented def __lt__(self, other): # This is needed because bisect does not take a comparison function. # Order by creation_counter first for backward compatibility. if isinstance(other, Field): if ( self.creation_counter != other.creation_counter or not hasattr(self, "model") and not hasattr(other, "model") ): return self.creation_counter < other.creation_counter elif hasattr(self, "model") != hasattr(other, "model"): return not hasattr(self, "model") # Order no-model fields first else: # creation_counter's are equal, compare only models. return (self.model._meta.app_label, self.model._meta.model_name) < ( other.model._meta.app_label, other.model._meta.model_name, ) return NotImplemented def __hash__(self): return hash(self.creation_counter) def __deepcopy__(self, memodict): # We don't have to deepcopy very much here, since most things are not # intended to be altered after initial creation. obj = copy.copy(self) if self.remote_field: obj.remote_field = copy.copy(self.remote_field) if hasattr(self.remote_field, "field") and self.remote_field.field is self: obj.remote_field.field = obj memodict[id(self)] = obj return obj def __copy__(self): # We need to avoid hitting __reduce__, so define this # slightly weird copy construct. obj = Empty() obj.__class__ = self.__class__ obj.__dict__ = self.__dict__.copy() return obj def __reduce__(self): """ Pickling should return the model._meta.fields instance of the field, not a new copy of that field. So, use the app registry to load the model and then the field back. """ if not hasattr(self, "model"): # Fields are sometimes used without attaching them to models (for # example in aggregation). In this case give back a plain field # instance. The code below will create a new empty instance of # class self.__class__, then update its dict with self.__dict__ # values - so, this is very close to normal pickle. state = self.__dict__.copy() # The _get_default cached_property can't be pickled due to lambda # usage. state.pop("_get_default", None) return _empty, (self.__class__,), state return _load_field, ( self.model._meta.app_label, self.model._meta.object_name, self.name, ) def get_pk_value_on_save(self, instance): """ Hook to generate new PK values on save. This method is called when saving instances with no primary key value set. If this method returns something else than None, then the returned value is used when saving the new instance. """ if self.default: return self.get_default() return None def to_python(self, value): """ Convert the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Return the converted value. Subclasses should override this. """ return value @cached_property def error_messages(self): messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, "default_error_messages", {})) messages.update(self._error_messages or {}) return messages @cached_property def validators(self): """ Some validators can't be created at field initialization time. This method provides a way to delay their creation until required. """ return [*self.default_validators, *self._validators] def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except exceptions.ValidationError as e: if hasattr(e, "code") and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise exceptions.ValidationError(errors) def validate(self, value, model_instance): """ Validate value and raise ValidationError if necessary. Subclasses should override this to provide validation logic. """ if not self.editable: # Skip validation for non-editable fields. return if self.choices is not None and value not in self.empty_values: for option_key, option_value in self.choices: if isinstance(option_value, (list, tuple)): # This is an optgroup, so look inside the group for # options. for optgroup_key, optgroup_value in option_value: if value == optgroup_key: return elif value == option_key: return raise exceptions.ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": value}, ) if value is None and not self.null: raise exceptions.ValidationError(self.error_messages["null"], code="null") if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages["blank"], code="blank") def clean(self, value, model_instance): """ Convert the value's type and run validation. Validation errors from to_python() and validate() are propagated. Return the correct value if no error is raised. """ value = self.to_python(value) self.validate(value, model_instance) self.run_validators(value) return value def db_type_parameters(self, connection): return DictWrapper(self.__dict__, connection.ops.quote_name, "qn_") def db_check(self, connection): """ Return the database column check constraint for this field, for the provided connection. Works the same way as db_type() for the case that get_internal_type() does not map to a preexisting model field. """ data = self.db_type_parameters(connection) try: return ( connection.data_type_check_constraints[self.get_internal_type()] % data ) except KeyError: return None def db_type(self, connection): """ Return the database column data type for this field, for the provided connection. """ # The default implementation of this method looks at the # backend-specific data_types dictionary, looking up the field by its # "internal type". # # A Field class can implement the get_internal_type() method to specify # which *preexisting* Django Field class it's most similar to -- i.e., # a custom field might be represented by a TEXT column type, which is # the same as the TextField Django field type, which means the custom # field's get_internal_type() returns 'TextField'. # # But the limitation of the get_internal_type() / data_types approach # is that it cannot handle database column types that aren't already # mapped to one of the built-in Django field types. In this case, you # can implement db_type() instead of get_internal_type() to specify # exactly which wacky database column type you want to use. data = self.db_type_parameters(connection) try: return connection.data_types[self.get_internal_type()] % data except KeyError: return None def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. For example, this method is called by ForeignKey and OneToOneField to determine its data type. """ return self.db_type(connection) def cast_db_type(self, connection): """Return the data type to use in the Cast() function.""" db_type = connection.ops.cast_data_types.get(self.get_internal_type()) if db_type: return db_type % self.db_type_parameters(connection) return self.db_type(connection) def db_parameters(self, connection): """ Extension of db_type(), providing a range of different return values (type, checks). This will look at db_type(), allowing custom model fields to override it. """ type_string = self.db_type(connection) check_string = self.db_check(connection) return { "type": type_string, "check": check_string, } def db_type_suffix(self, connection): return connection.data_types_suffix.get(self.get_internal_type()) def get_db_converters(self, connection): if hasattr(self, "from_db_value"): return [self.from_db_value] return [] @property def unique(self): return self._unique or self.primary_key @property def db_tablespace(self): return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE @property def db_returning(self): """ Private API intended only to be used by Django itself. Currently only the PostgreSQL backend supports returning multiple fields on a model. """ return False def set_attributes_from_name(self, name): self.name = self.name or name self.attname, self.column = self.get_attname_column() self.concrete = self.column is not None if self.verbose_name is None and self.name: self.verbose_name = self.name.replace("_", " ") def contribute_to_class(self, cls, name, private_only=False): """ Register the field with the model class it belongs to. If private_only is True, create a separate instance of this field for every subclass of cls, even if cls is not an abstract model. """ self.set_attributes_from_name(name) self.model = cls cls._meta.add_field(self, private=private_only) if self.column: setattr(cls, self.attname, self.descriptor_class(self)) if self.choices is not None: # Don't override a get_FOO_display() method defined explicitly on # this class, but don't check methods derived from inheritance, to # allow overriding inherited choices. For more complex inheritance # structures users should override contribute_to_class(). if "get_%s_display" % self.name not in cls.__dict__: setattr( cls, "get_%s_display" % self.name, partialmethod(cls._get_FIELD_display, field=self), ) def get_filter_kwargs_for_object(self, obj): """ Return a dict that when passed as kwargs to self.model.filter(), would yield all instances having the same value for this field as obj has. """ return {self.name: getattr(obj, self.attname)} def get_attname(self): return self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_internal_type(self): return self.__class__.__name__ def pre_save(self, model_instance, add): """Return field's value just before saving.""" return getattr(model_instance, self.attname) def get_prep_value(self, value): """Perform preliminary non-db specific value checks and conversions.""" if isinstance(value, Promise): value = value._proxy____cast() return value def get_db_prep_value(self, value, connection, prepared=False): """ Return field's value prepared for interacting with the database backend. Used by the default implementations of get_db_prep_save(). """ if not prepared: value = self.get_prep_value(value) return value def get_db_prep_save(self, value, connection): """Return field's value prepared for saving into a database.""" return self.get_db_prep_value(value, connection=connection, prepared=False) def has_default(self): """Return a boolean of whether this field has a default value.""" return self.default is not NOT_PROVIDED def get_default(self): """Return the default value for this field.""" return self._get_default() @cached_property def _get_default(self): if self.has_default(): if callable(self.default): return self.default return lambda: self.default if ( not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls ): return return_None return str # return empty string def get_choices( self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=(), ): """ Return choices with a default blank choices included, for use as <select> choices for this field. """ if self.choices is not None: choices = list(self.choices) if include_blank: blank_defined = any( choice in ("", None) for choice, _ in self.flatchoices ) if not blank_defined: choices = blank_choice + choices return choices rel_model = self.remote_field.model limit_choices_to = limit_choices_to or self.get_limit_choices_to() choice_func = operator.attrgetter( self.remote_field.get_related_field().attname if hasattr(self.remote_field, "get_related_field") else "pk" ) qs = rel_model._default_manager.complex_filter(limit_choices_to) if ordering: qs = qs.order_by(*ordering) return (blank_choice if include_blank else []) + [ (choice_func(x), str(x)) for x in qs ] def value_to_string(self, obj): """ Return a string value of this field from the passed obj. This is used by the serialization framework. """ return str(self.value_from_object(obj)) def _get_flatchoices(self): """Flattened version of choices tuple.""" if self.choices is None: return [] flat = [] for choice, value in self.choices: if isinstance(value, (list, tuple)): flat.extend(value) else: flat.append((choice, value)) return flat flatchoices = property(_get_flatchoices) def save_form_data(self, instance, data): setattr(instance, self.name, data) def formfield(self, form_class=None, choices_form_class=None, **kwargs): """Return a django.forms.Field instance for this field.""" defaults = { "required": not self.blank, "label": capfirst(self.verbose_name), "help_text": self.help_text, } if self.has_default(): if callable(self.default): defaults["initial"] = self.default defaults["show_hidden_initial"] = True else: defaults["initial"] = self.get_default() if self.choices is not None: # Fields with choices get special treatment. include_blank = self.blank or not ( self.has_default() or "initial" in kwargs ) defaults["choices"] = self.get_choices(include_blank=include_blank) defaults["coerce"] = self.to_python if self.null: defaults["empty_value"] = None if choices_form_class is not None: form_class = choices_form_class else: form_class = forms.TypedChoiceField # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in list(kwargs): if k not in ( "coerce", "empty_value", "choices", "required", "widget", "label", "initial", "help_text", "error_messages", "show_hidden_initial", "disabled", ): del kwargs[k] defaults.update(kwargs) if form_class is None: form_class = forms.CharField return form_class(**defaults) def value_from_object(self, obj): """Return the value of this field in the given model instance.""" return getattr(obj, self.attname) class BooleanField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be either True or False."), "invalid_nullable": _("“%(value)s” value must be either True, False, or None."), } description = _("Boolean (Either True or False)") def get_internal_type(self): return "BooleanField" def to_python(self, value): if self.null and value in self.empty_values: return None if value in (True, False): # 1/0 are equal to True/False. bool() converts former to latter. return bool(value) if value in ("t", "True", "1"): return True if value in ("f", "False", "0"): return False raise exceptions.ValidationError( self.error_messages["invalid_nullable" if self.null else "invalid"], code="invalid", params={"value": value}, ) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return self.to_python(value) def formfield(self, **kwargs): if self.choices is not None: include_blank = not (self.has_default() or "initial" in kwargs) defaults = {"choices": self.get_choices(include_blank=include_blank)} else: form_class = forms.NullBooleanField if self.null else forms.BooleanField # In HTML checkboxes, 'required' means "must be checked" which is # different from the choices case ("must select some value"). # required=False allows unchecked checkboxes. defaults = {"form_class": form_class, "required": False} return super().formfield(**{**defaults, **kwargs}) class CharField(Field): description = _("String (up to %(max_length)s)") def __init__(self, *args, db_collation=None, **kwargs): super().__init__(*args, **kwargs) self.db_collation = db_collation if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): databases = kwargs.get("databases") or [] return [ *super().check(**kwargs), *self._check_db_collation(databases), *self._check_max_length_attribute(**kwargs), ] def _check_max_length_attribute(self, **kwargs): if self.max_length is None: return [ checks.Error( "CharFields must define a 'max_length' attribute.", obj=self, id="fields.E120", ) ] elif ( not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or self.max_length <= 0 ): return [ checks.Error( "'max_length' must be a positive integer.", obj=self, id="fields.E121", ) ] else: return [] def _check_db_collation(self, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if not ( self.db_collation is None or "supports_collation_on_charfield" in self.model._meta.required_db_features or connection.features.supports_collation_on_charfield ): errors.append( checks.Error( "%s does not support a database collation on " "CharFields." % connection.display_name, obj=self, id="fields.E190", ), ) return errors def cast_db_type(self, connection): if self.max_length is None: return connection.ops.cast_char_field_without_max_length return super().cast_db_type(connection) def db_parameters(self, connection): db_params = super().db_parameters(connection) db_params["collation"] = self.db_collation return db_params def get_internal_type(self): return "CharField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). defaults = {"max_length": self.max_length} # TODO: Handle multiple backends with different feature flags. if self.null and not connection.features.interprets_empty_strings_as_nulls: defaults["empty_value"] = None defaults.update(kwargs) return super().formfield(**defaults) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.db_collation: kwargs["db_collation"] = self.db_collation return name, path, args, kwargs class CommaSeparatedIntegerField(CharField): default_validators = [validators.validate_comma_separated_integer_list] description = _("Comma-separated integers") system_check_removed_details = { "msg": ( "CommaSeparatedIntegerField is removed except for support in " "historical migrations." ), "hint": ( "Use CharField(validators=[validate_comma_separated_integer_list]) " "instead." ), "id": "fields.E901", } def _to_naive(value): if timezone.is_aware(value): value = timezone.make_naive(value, datetime.timezone.utc) return value def _get_naive_now(): return _to_naive(timezone.now()) class DateTimeCheckMixin: def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_mutually_exclusive_options(), *self._check_fix_default_value(), ] def _check_mutually_exclusive_options(self): # auto_now, auto_now_add, and default are mutually exclusive # options. The use of more than one of these options together # will trigger an Error mutually_exclusive_options = [ self.auto_now_add, self.auto_now, self.has_default(), ] enabled_options = [ option not in (None, False) for option in mutually_exclusive_options ].count(True) if enabled_options > 1: return [ checks.Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", obj=self, id="fields.E160", ) ] else: return [] def _check_fix_default_value(self): return [] # Concrete subclasses use this in their implementations of # _check_fix_default_value(). def _check_if_value_fixed(self, value, now=None): """ Check if the given value appears to have been provided as a "fixed" time value, and include a warning in the returned list if it does. The value argument must be a date object or aware/naive datetime object. If now is provided, it must be a naive datetime object. """ if now is None: now = _get_naive_now() offset = datetime.timedelta(seconds=10) lower = now - offset upper = now + offset if isinstance(value, datetime.datetime): value = _to_naive(value) else: assert isinstance(value, datetime.date) lower = lower.date() upper = upper.date() if lower <= value <= upper: return [ checks.Warning( "Fixed default value provided.", hint=( "It seems you set a fixed date / time / datetime " "value as default for this field. This may not be " "what you want. If you want to have the current date " "as default, use `django.utils.timezone.now`" ), obj=self, id="fields.W161", ) ] return [] class DateField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid date format. It must be " "in YYYY-MM-DD format." ), "invalid_date": _( "“%(value)s” value has the correct format (YYYY-MM-DD) " "but it is an invalid date." ), } description = _("Date (without time)") def __init__( self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs ): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs["editable"] = False kwargs["blank"] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, datetime.datetime): value = _to_naive(value).date() elif isinstance(value, datetime.date): pass else: # No explicit date / datetime value -- no checks necessary return [] # At this point, value is a date object. return self._check_if_value_fixed(value) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now: kwargs["auto_now"] = True if self.auto_now_add: kwargs["auto_now_add"] = True if self.auto_now or self.auto_now_add: del kwargs["editable"] del kwargs["blank"] return name, path, args, kwargs def get_internal_type(self): return "DateField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): if settings.USE_TZ and timezone.is_aware(value): # Convert aware datetimes to the default time zone # before casting them to dates (#17742). default_timezone = timezone.get_default_timezone() value = timezone.make_naive(value, default_timezone) return value.date() if isinstance(value, datetime.date): return value try: parsed = parse_date(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_date"], code="invalid_date", params={"value": value}, ) raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.date.today() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) if not self.null: setattr( cls, "get_next_by_%s" % self.name, partialmethod( cls._get_next_or_previous_by_FIELD, field=self, is_next=True ), ) setattr( cls, "get_previous_by_%s" % self.name, partialmethod( cls._get_next_or_previous_by_FIELD, field=self, is_next=False ), ) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts dates into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.DateField, **kwargs, } ) class DateTimeField(DateField): empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid format. It must be in " "YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format." ), "invalid_date": _( "“%(value)s” value has the correct format " "(YYYY-MM-DD) but it is an invalid date." ), "invalid_datetime": _( "“%(value)s” value has the correct format " "(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) " "but it is an invalid date/time." ), } description = _("Date (with time)") # __init__ is inherited from DateField def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, (datetime.datetime, datetime.date)): return self._check_if_value_fixed(value) # No explicit date / datetime value -- no checks necessary. return [] def get_internal_type(self): return "DateTimeField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): return value if isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) if settings.USE_TZ: # For backwards compatibility, interpret naive datetimes in # local time. This won't work during DST change, but we can't # do much about it, so we let the exceptions percolate up the # call stack. warnings.warn( "DateTimeField %s.%s received a naive datetime " "(%s) while time zone support is active." % (self.model.__name__, self.name, value), RuntimeWarning, ) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value try: parsed = parse_datetime(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_datetime"], code="invalid_datetime", params={"value": value}, ) try: parsed = parse_date(value) if parsed is not None: return datetime.datetime(parsed.year, parsed.month, parsed.day) except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_date"], code="invalid_date", params={"value": value}, ) raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = timezone.now() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) # contribute_to_class is inherited from DateField, it registers # get_next_by_FOO and get_prev_by_FOO def get_prep_value(self, value): value = super().get_prep_value(value) value = self.to_python(value) if value is not None and settings.USE_TZ and timezone.is_naive(value): # For backwards compatibility, interpret naive datetimes in local # time. This won't work during DST change, but we can't do much # about it, so we let the exceptions percolate up the call stack. try: name = "%s.%s" % (self.model.__name__, self.name) except AttributeError: name = "(unbound)" warnings.warn( "DateTimeField %s received a naive datetime (%s)" " while time zone support is active." % (name, value), RuntimeWarning, ) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value def get_db_prep_value(self, value, connection, prepared=False): # Casts datetimes into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datetimefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.DateTimeField, **kwargs, } ) class DecimalField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be a decimal number."), } description = _("Decimal number") def __init__( self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs, ): self.max_digits, self.decimal_places = max_digits, decimal_places super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super().check(**kwargs) digits_errors = [ *self._check_decimal_places(), *self._check_max_digits(), ] if not digits_errors: errors.extend(self._check_decimal_places_and_max_digits(**kwargs)) else: errors.extend(digits_errors) return errors def _check_decimal_places(self): try: decimal_places = int(self.decimal_places) if decimal_places < 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'decimal_places' attribute.", obj=self, id="fields.E130", ) ] except ValueError: return [ checks.Error( "'decimal_places' must be a non-negative integer.", obj=self, id="fields.E131", ) ] else: return [] def _check_max_digits(self): try: max_digits = int(self.max_digits) if max_digits <= 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'max_digits' attribute.", obj=self, id="fields.E132", ) ] except ValueError: return [ checks.Error( "'max_digits' must be a positive integer.", obj=self, id="fields.E133", ) ] else: return [] def _check_decimal_places_and_max_digits(self, **kwargs): if int(self.decimal_places) > int(self.max_digits): return [ checks.Error( "'max_digits' must be greater or equal to 'decimal_places'.", obj=self, id="fields.E134", ) ] return [] @cached_property def validators(self): return super().validators + [ validators.DecimalValidator(self.max_digits, self.decimal_places) ] @cached_property def context(self): return decimal.Context(prec=self.max_digits) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.max_digits is not None: kwargs["max_digits"] = self.max_digits if self.decimal_places is not None: kwargs["decimal_places"] = self.decimal_places return name, path, args, kwargs def get_internal_type(self): return "DecimalField" def to_python(self, value): if value is None: return value try: if isinstance(value, float): decimal_value = self.context.create_decimal_from_float(value) else: decimal_value = decimal.Decimal(value) except (decimal.InvalidOperation, TypeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) if not decimal_value.is_finite(): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) return decimal_value def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_decimalfield_value( value, self.max_digits, self.decimal_places ) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): return super().formfield( **{ "max_digits": self.max_digits, "decimal_places": self.decimal_places, "form_class": forms.DecimalField, **kwargs, } ) class DurationField(Field): """ Store timedelta objects. Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint of microseconds on other databases. """ empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid format. It must be in " "[DD] [[HH:]MM:]ss[.uuuuuu] format." ) } description = _("Duration") def get_internal_type(self): return "DurationField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.timedelta): return value try: parsed = parse_duration(value) except ValueError: pass else: if parsed is not None: return parsed raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def get_db_prep_value(self, value, connection, prepared=False): if connection.features.has_native_duration_field: return value if value is None: return None return duration_microseconds(value) def get_db_converters(self, connection): converters = [] if not connection.features.has_native_duration_field: converters.append(connection.ops.convert_durationfield_value) return converters + super().get_db_converters(connection) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else duration_string(val) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.DurationField, **kwargs, } ) class EmailField(CharField): default_validators = [validators.validate_email] description = _("Email address") def __init__(self, *args, **kwargs): # max_length=254 to be compliant with RFCs 3696 and 5321 kwargs.setdefault("max_length", 254) super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() # We do not exclude max_length if it matches default as we want to change # the default in future. return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause email validation to be performed # twice. return super().formfield( **{ "form_class": forms.EmailField, **kwargs, } ) class FilePathField(Field): description = _("File path") def __init__( self, verbose_name=None, name=None, path="", match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs, ): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders kwargs.setdefault("max_length", 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_allowing_files_or_folders(**kwargs), ] def _check_allowing_files_or_folders(self, **kwargs): if not self.allow_files and not self.allow_folders: return [ checks.Error( "FilePathFields must have either 'allow_files' or 'allow_folders' " "set to True.", obj=self, id="fields.E140", ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.path != "": kwargs["path"] = self.path if self.match is not None: kwargs["match"] = self.match if self.recursive is not False: kwargs["recursive"] = self.recursive if self.allow_files is not True: kwargs["allow_files"] = self.allow_files if self.allow_folders is not False: kwargs["allow_folders"] = self.allow_folders if kwargs.get("max_length") == 100: del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def formfield(self, **kwargs): return super().formfield( **{ "path": self.path() if callable(self.path) else self.path, "match": self.match, "recursive": self.recursive, "form_class": forms.FilePathField, "allow_files": self.allow_files, "allow_folders": self.allow_folders, **kwargs, } ) def get_internal_type(self): return "FilePathField" class FloatField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be a float."), } description = _("Floating point number") def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return float(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "FloatField" def to_python(self, value): if value is None: return value try: return float(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.FloatField, **kwargs, } ) class IntegerField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be an integer."), } description = _("Integer") def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_warning(), ] def _check_max_length_warning(self): if self.max_length is not None: return [ checks.Warning( "'max_length' is ignored when used with %s." % self.__class__.__name__, hint="Remove 'max_length' from field", obj=self, id="fields.W122", ) ] return [] @cached_property def validators(self): # These validators can't be added at field initialization time since # they're based on values retrieved from `connection`. validators_ = super().validators internal_type = self.get_internal_type() min_value, max_value = connection.ops.integer_field_range(internal_type) if min_value is not None and not any( ( isinstance(validator, validators.MinValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) >= min_value ) for validator in validators_ ): validators_.append(validators.MinValueValidator(min_value)) if max_value is not None and not any( ( isinstance(validator, validators.MaxValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) <= max_value ) for validator in validators_ ): validators_.append(validators.MaxValueValidator(max_value)) return validators_ def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return int(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "IntegerField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.IntegerField, **kwargs, } ) class BigIntegerField(IntegerField): description = _("Big (8 byte) integer") MAX_BIGINT = 9223372036854775807 def get_internal_type(self): return "BigIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": -BigIntegerField.MAX_BIGINT - 1, "max_value": BigIntegerField.MAX_BIGINT, **kwargs, } ) class SmallIntegerField(IntegerField): description = _("Small integer") def get_internal_type(self): return "SmallIntegerField" class IPAddressField(Field): empty_strings_allowed = False description = _("IPv4 address") system_check_removed_details = { "msg": ( "IPAddressField has been removed except for support in " "historical migrations." ), "hint": "Use GenericIPAddressField instead.", "id": "fields.E900", } def __init__(self, *args, **kwargs): kwargs["max_length"] = 15 super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def get_internal_type(self): return "IPAddressField" class GenericIPAddressField(Field): empty_strings_allowed = False description = _("IP address") default_error_messages = {} def __init__( self, verbose_name=None, name=None, protocol="both", unpack_ipv4=False, *args, **kwargs, ): self.unpack_ipv4 = unpack_ipv4 self.protocol = protocol ( self.default_validators, invalid_error_message, ) = validators.ip_address_validators(protocol, unpack_ipv4) self.default_error_messages["invalid"] = invalid_error_message kwargs["max_length"] = 39 super().__init__(verbose_name, name, *args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_blank_and_null_values(**kwargs), ] def _check_blank_and_null_values(self, **kwargs): if not getattr(self, "null", False) and getattr(self, "blank", False): return [ checks.Error( "GenericIPAddressFields cannot have blank=True if null=False, " "as blank values are stored as nulls.", obj=self, id="fields.E150", ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.unpack_ipv4 is not False: kwargs["unpack_ipv4"] = self.unpack_ipv4 if self.protocol != "both": kwargs["protocol"] = self.protocol if kwargs.get("max_length") == 39: del kwargs["max_length"] return name, path, args, kwargs def get_internal_type(self): return "GenericIPAddressField" def to_python(self, value): if value is None: return None if not isinstance(value, str): value = str(value) value = value.strip() if ":" in value: return clean_ipv6_address( value, self.unpack_ipv4, self.error_messages["invalid"] ) return value def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_ipaddressfield_value(value) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None if value and ":" in value: try: return clean_ipv6_address(value, self.unpack_ipv4) except exceptions.ValidationError: pass return str(value) def formfield(self, **kwargs): return super().formfield( **{ "protocol": self.protocol, "form_class": forms.GenericIPAddressField, **kwargs, } ) class NullBooleanField(BooleanField): default_error_messages = { "invalid": _("“%(value)s” value must be either None, True or False."), "invalid_nullable": _("“%(value)s” value must be either None, True or False."), } description = _("Boolean (Either True, False or None)") system_check_removed_details = { "msg": ( "NullBooleanField is removed except for support in historical " "migrations." ), "hint": "Use BooleanField(null=True) instead.", "id": "fields.E903", } def __init__(self, *args, **kwargs): kwargs["null"] = True kwargs["blank"] = True super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["null"] del kwargs["blank"] return name, path, args, kwargs class PositiveIntegerRelDbTypeMixin: def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if not hasattr(cls, "integer_field_class"): cls.integer_field_class = next( ( parent for parent in cls.__mro__[1:] if issubclass(parent, IntegerField) ), None, ) def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. In most cases, a foreign key pointing to a positive integer primary key will have an integer column data type but some databases (e.g. MySQL) have an unsigned integer type. In that case (related_fields_match_type=True), the primary key should return its db_type. """ if connection.features.related_fields_match_type: return self.db_type(connection) else: return self.integer_field_class().db_type(connection=connection) class PositiveBigIntegerField(PositiveIntegerRelDbTypeMixin, BigIntegerField): description = _("Positive big integer") def get_internal_type(self): return "PositiveBigIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": 0, **kwargs, } ) class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive integer") def get_internal_type(self): return "PositiveIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": 0, **kwargs, } ) class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, SmallIntegerField): description = _("Positive small integer") def get_internal_type(self): return "PositiveSmallIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": 0, **kwargs, } ) class SlugField(CharField): default_validators = [validators.validate_slug] description = _("Slug (up to %(max_length)s)") def __init__( self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs ): self.allow_unicode = allow_unicode if self.allow_unicode: self.default_validators = [validators.validate_unicode_slug] super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 50: del kwargs["max_length"] if self.db_index is False: kwargs["db_index"] = False else: del kwargs["db_index"] if self.allow_unicode is not False: kwargs["allow_unicode"] = self.allow_unicode return name, path, args, kwargs def get_internal_type(self): return "SlugField" def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.SlugField, "allow_unicode": self.allow_unicode, **kwargs, } ) class TextField(Field): description = _("Text") def __init__(self, *args, db_collation=None, **kwargs): super().__init__(*args, **kwargs) self.db_collation = db_collation def check(self, **kwargs): databases = kwargs.get("databases") or [] return [ *super().check(**kwargs), *self._check_db_collation(databases), ] def _check_db_collation(self, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if not ( self.db_collation is None or "supports_collation_on_textfield" in self.model._meta.required_db_features or connection.features.supports_collation_on_textfield ): errors.append( checks.Error( "%s does not support a database collation on " "TextFields." % connection.display_name, obj=self, id="fields.E190", ), ) return errors def db_parameters(self, connection): db_params = super().db_parameters(connection) db_params["collation"] = self.db_collation return db_params def get_internal_type(self): return "TextField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). return super().formfield( **{ "max_length": self.max_length, **({} if self.choices is not None else {"widget": forms.Textarea}), **kwargs, } ) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.db_collation: kwargs["db_collation"] = self.db_collation return name, path, args, kwargs class TimeField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid format. It must be in " "HH:MM[:ss[.uuuuuu]] format." ), "invalid_time": _( "“%(value)s” value has the correct format " "(HH:MM[:ss[.uuuuuu]]) but it is an invalid time." ), } description = _("Time") def __init__( self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs ): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs["editable"] = False kwargs["blank"] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, datetime.datetime): now = None elif isinstance(value, datetime.time): now = _get_naive_now() # This will not use the right date in the race condition where now # is just before the date change and value is just past 0:00. value = datetime.datetime.combine(now.date(), value) else: # No explicit time / datetime value -- no checks necessary return [] # At this point, value is a datetime object. return self._check_if_value_fixed(value, now=now) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now is not False: kwargs["auto_now"] = self.auto_now if self.auto_now_add is not False: kwargs["auto_now_add"] = self.auto_now_add if self.auto_now or self.auto_now_add: del kwargs["blank"] del kwargs["editable"] return name, path, args, kwargs def get_internal_type(self): return "TimeField" def to_python(self, value): if value is None: return None if isinstance(value, datetime.time): return value if isinstance(value, datetime.datetime): # Not usually a good idea to pass in a datetime here (it loses # information), but this can be a side-effect of interacting with a # database backend (e.g. Oracle), so we'll be accommodating. return value.time() try: parsed = parse_time(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_time"], code="invalid_time", params={"value": value}, ) raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.datetime.now().time() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts times into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_timefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.TimeField, **kwargs, } ) class URLField(CharField): default_validators = [validators.URLValidator()] description = _("URL") def __init__(self, verbose_name=None, name=None, **kwargs): kwargs.setdefault("max_length", 200) super().__init__(verbose_name, name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 200: del kwargs["max_length"] return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause URL validation to be performed # twice. return super().formfield( **{ "form_class": forms.URLField, **kwargs, } ) class BinaryField(Field): description = _("Raw binary data") empty_values = [None, b""] def __init__(self, *args, **kwargs): kwargs.setdefault("editable", False) super().__init__(*args, **kwargs) if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [*super().check(**kwargs), *self._check_str_default_value()] def _check_str_default_value(self): if self.has_default() and isinstance(self.default, str): return [ checks.Error( "BinaryField's default cannot be a string. Use bytes " "content instead.", obj=self, id="fields.E170", ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.editable: kwargs["editable"] = True else: del kwargs["editable"] return name, path, args, kwargs def get_internal_type(self): return "BinaryField" def get_placeholder(self, value, compiler, connection): return connection.ops.binary_placeholder_sql(value) def get_default(self): if self.has_default() and not callable(self.default): return self.default default = super().get_default() if default == "": return b"" return default def get_db_prep_value(self, value, connection, prepared=False): value = super().get_db_prep_value(value, connection, prepared) if value is not None: return connection.Database.Binary(value) return value def value_to_string(self, obj): """Binary data is serialized as base64""" return b64encode(self.value_from_object(obj)).decode("ascii") def to_python(self, value): # If it's a string, it should be base64-encoded data if isinstance(value, str): return memoryview(b64decode(value.encode("ascii"))) return value class UUIDField(Field): default_error_messages = { "invalid": _("“%(value)s” is not a valid UUID."), } description = _("Universally unique identifier") empty_strings_allowed = False def __init__(self, verbose_name=None, **kwargs): kwargs["max_length"] = 32 super().__init__(verbose_name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["max_length"] return name, path, args, kwargs def get_internal_type(self): return "UUIDField" def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): if value is None: return None if not isinstance(value, uuid.UUID): value = self.to_python(value) if connection.features.has_native_uuid_field: return value return value.hex def to_python(self, value): if value is not None and not isinstance(value, uuid.UUID): input_form = "int" if isinstance(value, int) else "hex" try: return uuid.UUID(**{input_form: value}) except (AttributeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) return value def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.UUIDField, **kwargs, } ) class AutoFieldMixin: db_returning = True def __init__(self, *args, **kwargs): kwargs["blank"] = True super().__init__(*args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), ] def _check_primary_key(self): if not self.primary_key: return [ checks.Error( "AutoFields must set primary_key=True.", obj=self, id="fields.E100", ), ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["blank"] kwargs["primary_key"] = True return name, path, args, kwargs def validate(self, value, model_instance): pass def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) value = connection.ops.validate_autopk_value(value) return value def contribute_to_class(self, cls, name, **kwargs): if cls._meta.auto_field: raise ValueError( "Model %s can't have more than one auto-generated field." % cls._meta.label ) super().contribute_to_class(cls, name, **kwargs) cls._meta.auto_field = self def formfield(self, **kwargs): return None class AutoFieldMeta(type): """ Metaclass to maintain backward inheritance compatibility for AutoField. It is intended that AutoFieldMixin become public API when it is possible to create a non-integer automatically-generated field using column defaults stored in the database. In many areas Django also relies on using isinstance() to check for an automatically-generated field as a subclass of AutoField. A new flag needs to be implemented on Field to be used instead. When these issues have been addressed, this metaclass could be used to deprecate inheritance from AutoField and use of isinstance() with AutoField for detecting automatically-generated fields. """ @property def _subclasses(self): return (BigAutoField, SmallAutoField) def __instancecheck__(self, instance): return isinstance(instance, self._subclasses) or super().__instancecheck__( instance ) def __subclasscheck__(self, subclass): return issubclass(subclass, self._subclasses) or super().__subclasscheck__( subclass ) class AutoField(AutoFieldMixin, IntegerField, metaclass=AutoFieldMeta): def get_internal_type(self): return "AutoField" def rel_db_type(self, connection): return IntegerField().db_type(connection=connection) class BigAutoField(AutoFieldMixin, BigIntegerField): def get_internal_type(self): return "BigAutoField" def rel_db_type(self, connection): return BigIntegerField().db_type(connection=connection) class SmallAutoField(AutoFieldMixin, SmallIntegerField): def get_internal_type(self): return "SmallAutoField" def rel_db_type(self, connection): return SmallIntegerField().db_type(connection=connection)
d32807420bee07516bfb8900da80049ecc6a2cdf6da857c779d32d8a090ff84a
import collections import json import re from functools import partial from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError, FullResultSet from django.db import DatabaseError, NotSupportedError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value from django.db.models.functions import Cast, Random from django.db.models.lookups import Lookup from django.db.models.query_utils import select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.models.sql.where import AND from django.db.transaction import TransactionManagementError from django.utils.functional import cached_property from django.utils.hashable import make_hashable from django.utils.regex_helper import _lazy_re_compile class SQLCompiler: # Multiline ordering SQL clause may appear from RawSQL. ordering_parts = _lazy_re_compile( r"^(.*)\s(?:ASC|DESC).*", re.MULTILINE | re.DOTALL, ) def __init__(self, query, connection, using, elide_empty=True): self.query = query self.connection = connection self.using = using # Some queries, e.g. coalesced aggregation, need to be executed even if # they would return an empty result set. self.elide_empty = elide_empty self.quote_cache = {"*": "*"} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None self._meta_ordering = None def __repr__(self): return ( f"<{self.__class__.__qualname__} " f"model={self.query.model.__qualname__} " f"connection={self.connection!r} using={self.using!r}>" ) def setup_query(self, with_col_aliases=False): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select( with_col_aliases=with_col_aliases, ) self.col_count = len(self.select) def pre_sql_setup(self, with_col_aliases=False): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query(with_col_aliases=with_col_aliases) order_by = self.get_order_by() self.where, self.having, self.qualify = self.query.where.split_having_qualify( must_group_by=self.query.group_by is not None ) extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk') # .annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] allows_group_by_refs = self.connection.features.allows_group_by_refs if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, "as_sql"): expr = self.query.resolve_ref(expr) if not allows_group_by_refs and isinstance(expr, Ref): expr = expr.source expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)} aliased_exprs = {} for expr, _, alias in select: # Skip members of the select clause that are already included # by reference. if expr in ref_sources: continue if alias: aliased_exprs[expr] = alias cols = expr.get_group_by_cols() for col in cols: expressions.append(col) if not self._meta_ordering: for expr, (sql, params, is_ref) in order_by: # Skip references to the SELECT clause, as all expressions in # the SELECT clause are already part of the GROUP BY. if not is_ref: expressions.extend(expr.get_group_by_cols()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: if allows_group_by_refs and (alias := aliased_exprs.get(expr)): expr = Ref(alias, expr) try: sql, params = self.compile(expr) except (EmptyResultSet, FullResultSet): continue sql, params = expr.select_format(self, sql, params) params_hash = make_hashable(params) if (sql, params_hash) not in seen: result.append((sql, params)) seen.add((sql, params_hash)) return result def collapse_group_by(self, expressions, having): # If the database supports group by functional dependence reduction, # then the expressions can be reduced to the set of selected table # primary keys as all other columns are functionally dependent on them. if self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if ( hasattr(expr, "target") and expr.target.primary_key and self.connection.features.allows_group_by_selected_pks_on_model( expr.target.model ) ) } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or expr in having or getattr(expr, "alias", None) not in aliases ] return expressions def get_select(self, with_col_aliases=False): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) select_mask = self.query.get_select_mask() if self.query.default_cols: cols = self.get_default_columns(select_mask) else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { "model": self.query.model, "select_fields": select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select, select_mask) klass_info["related_klass_infos"] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info["related_klass_infos"]: if ki["from_parent"]: ki["select_fields"] = ( klass_info["select_fields"] + ki["select_fields"] ) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] col_idx = 1 for col, alias in select: try: sql, params = self.compile(col) except EmptyResultSet: empty_result_set_value = getattr( col, "empty_result_set_value", NotImplemented ) if empty_result_set_value is NotImplemented: # Select a predicate that's always False. sql, params = "0", () else: sql, params = self.compile(Value(empty_result_set_value)) except FullResultSet: sql, params = self.compile(Value(True)) else: sql, params = col.select_format(self, sql, params) if alias is None and with_col_aliases: alias = f"col{col_idx}" col_idx += 1 ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def _order_by_pairs(self): if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif (meta := self.query.get_meta()) and meta.ordering: ordering = meta.ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: default_order, _ = ORDER_DIR["ASC"] else: default_order, _ = ORDER_DIR["DESC"] for field in ordering: if hasattr(field, "resolve_expression"): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() if isinstance(field.expression, F) and ( annotation := self.query.annotation_select.get( field.expression.name ) ): field.expression = Ref(field.expression.name, annotation) yield field, isinstance(field.expression, Ref) continue if field == "?": # random yield OrderBy(Random()), False continue col, order = get_order_dir(field, default_order) descending = order == "DESC" if col in self.query.annotation_select: # Reference to expression in SELECT clause yield ( OrderBy( Ref(col, self.query.annotation_select[col]), descending=descending, ), True, ) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT # clause. if self.query.combinator and self.select: # Don't use the resolved annotation because other # combinated queries might define it differently. expr = F(col) else: expr = self.query.annotations[col] if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) yield OrderBy(expr, descending=descending), False continue if "." in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split(".", 1) yield ( OrderBy( RawSQL( "%s.%s" % (self.quote_name_unless_alias(table), col), [] ), descending=descending, ), False, ) continue if self.query.extra and col in self.query.extra: if col in self.query.extra_select: yield ( OrderBy( Ref(col, RawSQL(*self.query.extra[col])), descending=descending, ), True, ) else: yield ( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False, ) else: if self.query.combinator and self.select: # Don't use the first model's field because other # combinated queries might define it differently. yield OrderBy(F(col), descending=descending), False else: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. yield from self.find_ordering_name( field, self.query.get_meta(), default_order=default_order, ) def get_order_by(self): """ Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ result = [] seen = set() for expr, is_ref in self._order_by_pairs(): resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if not is_ref and self.query.combinator and self.select: src = resolved.expression expr_src = expr.expression for sel_expr, _, col_alias in self.select: if col_alias and not ( isinstance(expr_src, F) and col_alias == expr_src.name ): continue if src == sel_expr: resolved.set_source_expressions( [Ref(col_alias if col_alias else src.target.column, src)] ) break else: if col_alias: raise DatabaseError( "ORDER BY term does not match any column in the result set." ) # Add column used in ORDER BY clause to the selected # columns and to each combined query. order_by_idx = len(self.query.select) + 1 col_name = f"__orderbycol{order_by_idx}" for q in self.query.combined_queries: q.add_annotation(expr_src, col_name) self.query.add_select_col(resolved, col_name) resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())]) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql)[1] params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql)[1] if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ( (name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( self.query.external_aliases.get(name) and name not in self.query.table_map ) ): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node): vendor_impl = getattr(node, "as_" + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection, self.elide_empty) for query in self.query.combined_queries ] if not features.supports_slicing_ordering_in_compound: for compiler in compilers: if compiler.query.is_sliced: raise DatabaseError( "LIMIT/OFFSET not allowed in subqueries of compound statements." ) if compiler.get_order_by(): raise DatabaseError( "ORDER BY not allowed in subqueries of compound statements." ) elif self.query.is_sliced and combinator == "union": limit = (self.query.low_mark, self.query.high_mark) for compiler in compilers: # A sliced union cannot have its parts elided as some of them # might be sliced as well and in the event where only a single # part produces a non-empty resultset it might be impossible to # generate valid SQL. compiler.elide_empty = False if not compiler.query.is_sliced: compiler.query.set_limits(*limit) parts = () for compiler in compilers: try: # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. if not compiler.query.values_select and self.query.values_select: compiler.query = compiler.query.clone() compiler.query.set_values( ( *self.query.extra_select, *self.query.values_select, *self.query.annotation_select, ) ) part_sql, part_args = compiler.as_sql() if compiler.query.combinator: # Wrap in a subquery if wrapping in parentheses isn't # supported. if not features.supports_parentheses_in_compound: part_sql = "SELECT * FROM ({})".format(part_sql) # Add parentheses when combining with compound query if not # already added for all compound queries. elif ( self.query.subquery or not features.supports_slicing_ordering_in_compound ): part_sql = "({})".format(part_sql) elif ( self.query.subquery and features.supports_slicing_ordering_in_compound ): part_sql = "({})".format(part_sql) parts += ((part_sql, part_args),) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == "union" or (combinator == "difference" and parts): continue raise if not parts: raise EmptyResultSet combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == "union": combinator_sql += " ALL" braces = "{}" if not self.query.subquery and features.supports_slicing_ordering_in_compound: braces = "({})" sql_parts, args_parts = zip( *((braces.format(sql), args) for sql, args in parts) ) result = [" {} ".format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def get_qualify_sql(self): where_parts = [] if self.where: where_parts.append(self.where) if self.having: where_parts.append(self.having) inner_query = self.query.clone() inner_query.subquery = True inner_query.where = inner_query.where.__class__(where_parts) # Augment the inner query with any window function references that # might have been masked via values() and alias(). If any masked # aliases are added they'll be masked again to avoid fetching # the data in the `if qual_aliases` branch below. select = { expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0] } select_aliases = set(select.values()) qual_aliases = set() replacements = {} def collect_replacements(expressions): while expressions: expr = expressions.pop() if expr in replacements: continue elif select_alias := select.get(expr): replacements[expr] = select_alias elif isinstance(expr, Lookup): expressions.extend(expr.get_source_expressions()) elif isinstance(expr, Ref): if expr.refs not in select_aliases: expressions.extend(expr.get_source_expressions()) else: num_qual_alias = len(qual_aliases) select_alias = f"qual{num_qual_alias}" qual_aliases.add(select_alias) inner_query.add_annotation(expr, select_alias) replacements[expr] = select_alias collect_replacements(list(self.qualify.leaves())) self.qualify = self.qualify.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) order_by = [] for order_by_expr, *_ in self.get_order_by(): collect_replacements(order_by_expr.get_source_expressions()) order_by.append( order_by_expr.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) ) inner_query_compiler = inner_query.get_compiler( self.using, elide_empty=self.elide_empty ) inner_sql, inner_params = inner_query_compiler.as_sql( # The limits must be applied to the outer query to avoid pruning # results too eagerly. with_limits=False, # Force unique aliasing of selected columns to avoid collisions # and make rhs predicates referencing easier. with_col_aliases=True, ) qualify_sql, qualify_params = self.compile(self.qualify) result = [ "SELECT * FROM (", inner_sql, ")", self.connection.ops.quote_name("qualify"), "WHERE", qualify_sql, ] if qual_aliases: # If some select aliases were unmasked for filtering purposes they # must be masked back. cols = [self.connection.ops.quote_name(alias) for alias in select.values()] result = [ "SELECT", ", ".join(cols), "FROM (", *result, ")", self.connection.ops.quote_name("qualify_mask"), ] params = list(inner_params) + qualify_params # As the SQL spec is unclear on whether or not derived tables # ordering must propagate it has to be explicitly repeated on the # outer-most query to ensure it's preserved. if order_by: ordering_sqls = [] for ordering in order_by: ordering_sql, ordering_params = self.compile(ordering) ordering_sqls.append(ordering_sql) params.extend(ordering_params) result.extend(["ORDER BY", ", ".join(ordering_sqls)]) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup( with_col_aliases=with_col_aliases, ) for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and self.query.is_sliced combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, "supports_select_{}".format(combinator)): raise NotSupportedError( "{} is not supported on this database backend.".format( combinator ) ) result, params = self.get_combinator_sql( combinator, self.query.combinator_all ) elif self.qualify: result, params = self.get_qualify_sql() order_by = None else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() try: where, w_params = ( self.compile(self.where) if self.where is not None else ("", []) ) except EmptyResultSet: if self.elide_empty: raise # Use a predicate that's always False. where, w_params = "0 = 1", [] except FullResultSet: where, w_params = "", [] try: having, h_params = ( self.compile(self.having) if self.having is not None else ("", []) ) except FullResultSet: having, h_params = "", [] result = ["SELECT"] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = "%s AS %s" % ( s_sql, self.connection.ops.quote_name(alias), ) params.extend(s_params) out_cols.append(s_sql) result += [", ".join(out_cols)] if from_: result += ["FROM", *from_] elif self.connection.features.bare_select_suffix: result += [self.connection.features.bare_select_suffix] params.extend(f_params) if self.query.select_for_update and features.has_select_for_update: if ( self.connection.get_autocommit() # Don't raise an exception when database doesn't # support transactions, as it's a noop. and features.supports_transactions ): raise TransactionManagementError( "select_for_update cannot be used outside of a transaction." ) if ( with_limit_offset and not features.supports_select_for_update_with_limit ): raise NotSupportedError( "LIMIT/OFFSET is not supported with " "select_for_update on this database backend." ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of no_key = self.query.select_for_no_key_update # If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the # backend doesn't support it, raise NotSupportedError to # prevent a possible deadlock. if nowait and not features.has_select_for_update_nowait: raise NotSupportedError( "NOWAIT is not supported on this database backend." ) elif skip_locked and not features.has_select_for_update_skip_locked: raise NotSupportedError( "SKIP LOCKED is not supported on this database backend." ) elif of and not features.has_select_for_update_of: raise NotSupportedError( "FOR UPDATE OF is not supported on this database backend." ) elif no_key and not features.has_select_for_no_key_update: raise NotSupportedError( "FOR NO KEY UPDATE is not supported on this " "database backend." ) for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), no_key=no_key, ) if for_update_part and features.for_update_after_from: result.append(for_update_part) if where: result.append("WHERE %s" % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError( "annotate() + distinct(fields) is not implemented." ) order_by = order_by or self.connection.ops.force_no_ordering() result.append("GROUP BY %s" % ", ".join(grouping)) if self._meta_ordering: order_by = None if having: result.append("HAVING %s" % having) params.extend(h_params) if self.query.explain_info: result.insert( 0, self.connection.ops.explain_query_prefix( self.query.explain_info.format, **self.query.explain_info.options, ), ) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) order_by_sql = "ORDER BY %s" % ", ".join(ordering) if combinator and features.requires_compound_order_by_subquery: result = ["SELECT * FROM (", *result, ")", order_by_sql] else: result.append(order_by_sql) if with_limit_offset: result.append( self.connection.ops.limit_offset_sql( self.query.low_mark, self.query.high_mark ) ) if for_update_part and not features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering expressions # to be selected as well. Wrap the query in another subquery # to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if alias: sub_selects.append( "%s.%s" % ( self.connection.ops.quote_name("subquery"), self.connection.ops.quote_name(alias), ) ) else: select_clone = select.relabeled_clone( {select.alias: "subquery"} ) subselect, subparams = select_clone.as_sql( self, self.connection ) sub_selects.append(subselect) sub_params.extend(subparams) return "SELECT %s FROM (%s) subquery" % ( ", ".join(sub_selects), " ".join(result), ), tuple(sub_params + params) return " ".join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns( self, select_mask, start_alias=None, opts=None, from_parent=None ): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: if (opts := self.query.get_meta()) is None: return result start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if ( from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model ) ): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if select_mask and field not in select_mask: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins( parts, opts, None ) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(self.connection.ops.quote_name(name)) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name( self, name, opts, alias=None, default_order="ASC", already_seen=None ): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == "DESC" pieces = name.split(LOOKUP_SEP) ( field, targets, alias, joins, path, opts, transform_function, ) = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless it is the pk # shortcut or the attribute name of the field that is specified or # there are transforms to process. if ( field.is_relation and opts.ordering and getattr(field, "attname", None) != pieces[-1] and name != "pk" and not getattr(transform_function, "has_transforms", False) ): # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple( getattr(self.query.alias_map[j], "join_cols", None) for j in joins ) if join_tuple in already_seen: raise FieldError("Infinite loop caused by ordering.") already_seen.add(join_tuple) results = [] for item in opts.ordering: if hasattr(item, "resolve_expression") and not isinstance( item, OrderBy ): item = item.desc() if descending else item.asc() if isinstance(item, OrderBy): results.append( (item.prefix_references(f"{name}{LOOKUP_SEP}"), False) ) continue results.extend( (expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref) for expr, is_ref in self.find_ordering_name( item, opts, alias, order, already_seen ) ) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [ (OrderBy(transform_function(t, alias), descending=descending), False) for t in targets ] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins( pieces, opts, alias ) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in tuple(self.query.alias_map): if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if ( alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1 ): result.append(", %s" % self.quote_name_unless_alias(alias)) return result, params def get_related_selections( self, select, select_mask, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None, ): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain( direct_choices, reverse_choices, self.query._filtered_relations ) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info["related_klass_infos"] = related_klass_infos for f in opts.fields: fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or "(none)", ) ) else: next = False if not select_related_descend(f, restricted, requested, select_mask): continue related_select_mask = select_mask.get(f) or {} klass_info = { "model": f.remote_field.model, "field": f, "reverse": False, "local_setter": f.set_cached_value, "remote_setter": f.remote_field.set_cached_value if f.unique else lambda x, y: None, "from_parent": False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=f.remote_field.model._meta ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_klass_infos = self.get_related_selections( select, related_select_mask, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: related_select_mask = select_mask.get(f) or {} if not select_related_descend( f, restricted, requested, related_select_mask, reverse=True ): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins( [related_field_name], opts, root_alias ) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { "model": model, "field": f, "reverse": True, "local_setter": f.remote_field.set_cached_value, "remote_setter": f.set_cached_value, "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, related_select_mask, model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) def local_setter(obj, from_obj): # Set a reverse fk object when relation is non-empty. if from_obj: f.remote_field.set_cached_value(from_obj, obj) def remote_setter(name, obj, from_obj): setattr(from_obj, name, obj) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) f, _, join_opts, joins, _, _ = self.query.setup_joins( [name], opts, root_alias ) model = join_opts.model alias = joins[-1] from_parent = ( issubclass(model, opts.model) and model is not opts.model ) klass_info = { "model": model, "field": f, "reverse": True, "local_setter": local_setter, "remote_setter": partial(remote_setter, name), "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] field_select_mask = select_mask.get((name, f)) or {} columns = self.get_default_columns( field_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, field_select_mask, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( "Invalid field name(s) given in select_related: %s. " "Choices are: %s" % ( ", ".join(invalid_fields), ", ".join(_get_field_choices()) or "(none)", ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_parent_klass_info(klass_info): concrete_model = klass_info["model"]._meta.concrete_model for parent_model, parent_link in concrete_model._meta.parents.items(): parent_list = parent_model._meta.get_parent_list() yield { "model": parent_model, "field": parent_link, "reverse": False, "select_fields": [ select_index for select_index in klass_info["select_fields"] # Selected columns from a model or its parents. if ( self.select[select_index][0].target.model == parent_model or self.select[select_index][0].target.model in parent_list ) ], } def _get_first_selected_col_from_model(klass_info): """ Find the first selected column from a model. If it doesn't exist, don't lock a model. select_fields is filled recursively, so it also contains fields from the parent models. """ concrete_model = klass_info["model"]._meta.concrete_model for select_index in klass_info["select_fields"]: if self.select[select_index][0].target.model == concrete_model: return self.select[select_index][0] def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield "self" else: field = klass_info["field"] if klass_info["reverse"]: field = field.remote_field path = parent_path + [field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in _get_parent_klass_info(klass_info) ) queue.extend( (path, klass_info) for klass_info in klass_info.get("related_klass_infos", []) ) if not self.klass_info: return [] result = [] invalid_names = [] for name in self.query.select_for_update_of: klass_info = self.klass_info if name == "self": col = _get_first_selected_col_from_model(klass_info) else: for part in name.split(LOOKUP_SEP): klass_infos = ( *klass_info.get("related_klass_infos", []), *_get_parent_klass_info(klass_info), ) for related_klass_info in klass_infos: field = related_klass_info["field"] if related_klass_info["reverse"]: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue col = _get_first_selected_col_from_model(klass_info) if col is not None: if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( "Invalid field name(s) given in select_for_update(of=(...)): %s. " "Only relational fields followed in the query are allowed. " "Choices are: %s." % ( ", ".join(invalid_names), ", ".join(_get_field_choices()), ) ) return result def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def results_iter( self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE, ): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql( MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size ) fields = [s[0] for s in self.select[0 : self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ return bool(self.execute_sql(SINGLE)) def execute_sql( self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE ): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0 : self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch or not self.connection.features.can_use_chunked_reads: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested, # unless the database doesn't support it. return list(result) return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = "%s.%s" % (qn(alias), qn2(columns[index])) self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND) sql, params = self.as_sql() return "EXISTS (%s)" % sql, params def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. format_ = self.query.explain_info.format output_formatter = json.dumps if format_ and format_.lower() == "json" else str for row in result[0]: if not isinstance(row, str): yield " ".join(output_formatter(c) for c in row) else: yield row class SQLInsertCompiler(SQLCompiler): returning_fields = None returning_params = () def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, "as_sql"): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, "get_placeholder"): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = "%s", [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, "resolve_expression"): value = value.resolve_expression( self.query, allow_joins=False, for_save=True ) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' "can only be used to update, not to insert." % (value, field) ) if value.contains_aggregate: raise FieldError( "Aggregate functions are not allowed in this query " "(%s=%r)." % (field.name, value) ) if value.contains_over_clause: raise FieldError( "Window expressions are not allowed in this query (%s=%r)." % (field.name, value) ) else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() insert_statement = self.connection.ops.insert_statement( on_conflict=self.query.on_conflict, ) result = ["%s %s" % (insert_statement, qn(opts.db_table))] fields = self.query.fields or [opts.pk] result.append("(%s)" % ", ".join(qn(f.column) for f in fields)) if self.query.fields: value_rows = [ [ self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields ] for obj in self.query.objs ] else: # An empty object. value_rows = [ [self.connection.ops.pk_default_value()] for _ in self.query.objs ] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = ( not self.returning_fields and self.connection.features.has_bulk_insert ) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql( fields, self.query.on_conflict, self.query.update_fields, self.query.unique_fields, ) if ( self.returning_fields and self.connection.features.can_return_columns_from_insert ): if self.connection.features.can_return_rows_from_bulk_insert: result.append( self.connection.ops.bulk_insert_sql(fields, placeholder_rows) ) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) # Skip empty r_sql to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. r_sql, self.returning_params = self.connection.ops.return_insert_columns( self.returning_fields ) if r_sql: result.append(r_sql) params += [self.returning_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, returning_fields=None): assert not ( returning_fields and len(self.query.objs) != 1 and not self.connection.features.can_return_rows_from_bulk_insert ) opts = self.query.get_meta() self.returning_fields = returning_fields with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not self.returning_fields: return [] if ( self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1 ): rows = self.connection.ops.fetch_returned_insert_rows(cursor) elif self.connection.features.can_return_columns_from_insert: assert len(self.query.objs) == 1 rows = [ self.connection.ops.fetch_returned_insert_columns( cursor, self.returning_params, ) ] else: rows = [ ( self.connection.ops.last_insert_id( cursor, opts.db_table, opts.pk.column, ), ) ] cols = [field.get_col(opts.db_table) for field in self.returning_fields] converters = self.get_converters(cols) if converters: rows = list(self.apply_converters(rows, converters)) return rows class SQLDeleteCompiler(SQLCompiler): @cached_property def single_alias(self): # Ensure base table is in aliases. self.query.get_initial_alias() return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1 @classmethod def _expr_refs_base_model(cls, expr, base_model): if isinstance(expr, Query): return expr.model == base_model if not hasattr(expr, "get_source_expressions"): return False return any( cls._expr_refs_base_model(source_expr, base_model) for source_expr in expr.get_source_expressions() ) @cached_property def contains_self_reference_subquery(self): return any( self._expr_refs_base_model(expr, self.query.model) for expr in chain( self.query.annotations.values(), self.query.where.children ) ) def _as_sql(self, query): delete = "DELETE FROM %s" % self.quote_name_unless_alias(query.base_table) try: where, params = self.compile(query.where) except FullResultSet: return delete, () return f"{delete} WHERE {where}", tuple(params) def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ if self.single_alias and not self.contains_self_reference_subquery: return self._as_sql(self.query) innerq = self.query.clone() innerq.__class__ = Query innerq.clear_select_clause() pk = self.query.model._meta.pk innerq.select = [pk.get_col(self.query.get_initial_alias())] outerq = Query(self.query.model) if not self.connection.features.update_can_self_select: # Force the materialization of the inner query to allow reference # to the target table on MySQL. sql, params = innerq.get_compiler(connection=self.connection).as_sql() innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params) outerq.add_filter("pk__in", innerq) return self._as_sql(outerq) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return "", () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, "resolve_expression"): val = val.resolve_expression( self.query, allow_joins=False, for_save=True ) if val.contains_aggregate: raise FieldError( "Aggregate functions are not allowed in this query " "(%s=%r)." % (field.name, val) ) if val.contains_over_clause: raise FieldError( "Window expressions are not allowed in this query " "(%s=%r)." % (field.name, val) ) elif hasattr(val, "prepare_database_save"): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, "get_placeholder"): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = "%s" name = field.column if hasattr(val, "as_sql"): sql, params = self.compile(val) values.append("%s = %s" % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append("%s = %s" % (qn(name), placeholder)) update_params.append(val) else: values.append("%s = NULL" % qn(name)) table = self.query.base_table result = [ "UPDATE %s SET" % qn(table), ", ".join(values), ] try: where, params = self.compile(self.query.where) except FullResultSet: params = [] else: result.append("WHERE %s" % where) return " ".join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(force=True) query.extra = {} query.select = [] meta = query.get_meta() fields = [meta.pk.name] related_ids_index = [] for related in self.query.related_updates: if all( path.join_field.primary_key for path in meta.get_path_to_parent(related) ): # If a primary key chain exists to the targeted related update, # then the meta.pk value can be used for it. related_ids_index.append((related, 0)) else: # This branch will only be reached when updating a field of an # ancestor that is not part of the primary key chain of a MTI # tree. related_ids_index.append((related, len(fields))) fields.append(related._meta.pk.name) query.add_fields(fields) super().pre_sql_setup() must_pre_select = ( count > 1 and not self.connection.features.update_can_self_select ) # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.clear_where() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] related_ids = collections.defaultdict(list) for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) for parent, index in related_ids_index: related_ids[parent].extend(r[index] for r in rows) self.query.add_filter("pk__in", idents) self.query.related_ids = related_ids else: # The fast path. Filters and updates in one query. self.query.add_filter("pk__in", query) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation) ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ", ".join(sql) params = tuple(params) inner_query_sql, inner_query_params = self.query.inner_query.get_compiler( self.using, elide_empty=self.elide_empty, ).as_sql(with_col_aliases=True) sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql) params += inner_query_params return sql, params def cursor_iter(cursor, sentinel, col_count, itersize): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): yield rows if col_count is None else [r[:col_count] for r in rows] finally: cursor.close()
4734ee3420579a6a355d66e7054f685348d1d5c6defee442e8924c2433f89f67
""" Code to manage the creation and SQL rendering of 'where' constraints. """ import operator from functools import reduce from django.core.exceptions import EmptyResultSet, FullResultSet from django.db.models.expressions import Case, When from django.db.models.lookups import Exact from django.utils import tree from django.utils.functional import cached_property # Connection types AND = "AND" OR = "OR" XOR = "XOR" class WhereNode(tree.Node): """ An SQL WHERE clause. The class is tied to the Query class that created it (in order to create the correct SQL). A child is usually an expression producing boolean values. Most likely the expression is a Lookup instance. However, a child could also be any class with as_sql() and either relabeled_clone() method or relabel_aliases() and clone() methods and contains_aggregate attribute. """ default = AND resolved = False conditional = True def split_having_qualify(self, negated=False, must_group_by=False): """ Return three possibly None nodes: one for those parts of self that should be included in the WHERE clause, one for those parts of self that must be included in the HAVING clause, and one for those parts that refer to window functions. """ if not self.contains_aggregate and not self.contains_over_clause: return self, None, None in_negated = negated ^ self.negated # Whether or not children must be connected in the same filtering # clause (WHERE > HAVING > QUALIFY) to maintain logical semantic. must_remain_connected = ( (in_negated and self.connector == AND) or (not in_negated and self.connector == OR) or self.connector == XOR ) if ( must_remain_connected and self.contains_aggregate and not self.contains_over_clause ): # It's must cheaper to short-circuit and stash everything in the # HAVING clause than split children if possible. return None, self, None where_parts = [] having_parts = [] qualify_parts = [] for c in self.children: if hasattr(c, "split_having_qualify"): where_part, having_part, qualify_part = c.split_having_qualify( in_negated, must_group_by ) if where_part is not None: where_parts.append(where_part) if having_part is not None: having_parts.append(having_part) if qualify_part is not None: qualify_parts.append(qualify_part) elif c.contains_over_clause: qualify_parts.append(c) elif c.contains_aggregate: having_parts.append(c) else: where_parts.append(c) if must_remain_connected and qualify_parts: # Disjunctive heterogeneous predicates can be pushed down to # qualify as long as no conditional aggregation is involved. if not where_parts or (where_parts and not must_group_by): return None, None, self elif where_parts: # In theory this should only be enforced when dealing with # where_parts containing predicates against multi-valued # relationships that could affect aggregation results but this # is complex to infer properly. raise NotImplementedError( "Heterogeneous disjunctive predicates against window functions are " "not implemented when performing conditional aggregation." ) where_node = ( self.create(where_parts, self.connector, self.negated) if where_parts else None ) having_node = ( self.create(having_parts, self.connector, self.negated) if having_parts else None ) qualify_node = ( self.create(qualify_parts, self.connector, self.negated) if qualify_parts else None ) return where_node, having_node, qualify_node def as_sql(self, compiler, connection): """ Return the SQL version of the where clause and the value to be substituted in. Return '', [] if this node matches everything, None, [] if this node is empty, and raise EmptyResultSet if this node can't match anything. """ result = [] result_params = [] if self.connector == AND: full_needed, empty_needed = len(self.children), 1 else: full_needed, empty_needed = 1, len(self.children) if self.connector == XOR and not connection.features.supports_logical_xor: # Convert if the database doesn't support XOR: # a XOR b XOR c XOR ... # to: # (a OR b OR c OR ...) AND (a + b + c + ...) == 1 lhs = self.__class__(self.children, OR) rhs_sum = reduce( operator.add, (Case(When(c, then=1), default=0) for c in self.children), ) rhs = Exact(1, rhs_sum) return self.__class__([lhs, rhs], AND, self.negated).as_sql( compiler, connection ) for child in self.children: try: sql, params = compiler.compile(child) except EmptyResultSet: empty_needed -= 1 except FullResultSet: full_needed -= 1 else: if sql: result.append(sql) result_params.extend(params) else: full_needed -= 1 # Check if this node matches nothing or everything. # First check the amount of full nodes and empty nodes # to make this node empty/full. # Now, check if this node is full/empty using the # counts. if empty_needed == 0: if self.negated: raise FullResultSet else: raise EmptyResultSet if full_needed == 0: if self.negated: raise EmptyResultSet else: raise FullResultSet conn = " %s " % self.connector sql_string = conn.join(result) if not sql_string: raise FullResultSet if self.negated: # Some backends (Oracle at least) need parentheses around the inner # SQL in the negated case, even if the inner SQL contains just a # single expression. sql_string = "NOT (%s)" % sql_string elif len(result) > 1 or self.resolved: sql_string = "(%s)" % sql_string return sql_string, result_params def get_group_by_cols(self): cols = [] for child in self.children: cols.extend(child.get_group_by_cols()) return cols def get_source_expressions(self): return self.children[:] def set_source_expressions(self, children): assert len(children) == len(self.children) self.children = children def relabel_aliases(self, change_map): """ Relabel the alias values of any children. 'change_map' is a dictionary mapping old (current) alias values to the new values. """ for pos, child in enumerate(self.children): if hasattr(child, "relabel_aliases"): # For example another WhereNode child.relabel_aliases(change_map) elif hasattr(child, "relabeled_clone"): self.children[pos] = child.relabeled_clone(change_map) def clone(self): clone = self.create(connector=self.connector, negated=self.negated) for child in self.children: if hasattr(child, "clone"): child = child.clone() clone.children.append(child) return clone def relabeled_clone(self, change_map): clone = self.clone() clone.relabel_aliases(change_map) return clone def replace_expressions(self, replacements): if replacement := replacements.get(self): return replacement clone = self.create(connector=self.connector, negated=self.negated) for child in self.children: clone.children.append(child.replace_expressions(replacements)) return clone @classmethod def _contains_aggregate(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_aggregate(c) for c in obj.children) return obj.contains_aggregate @cached_property def contains_aggregate(self): return self._contains_aggregate(self) @classmethod def _contains_over_clause(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_over_clause(c) for c in obj.children) return obj.contains_over_clause @cached_property def contains_over_clause(self): return self._contains_over_clause(self) @property def is_summary(self): return any(child.is_summary for child in self.children) @staticmethod def _resolve_leaf(expr, query, *args, **kwargs): if hasattr(expr, "resolve_expression"): expr = expr.resolve_expression(query, *args, **kwargs) return expr @classmethod def _resolve_node(cls, node, query, *args, **kwargs): if hasattr(node, "children"): for child in node.children: cls._resolve_node(child, query, *args, **kwargs) if hasattr(node, "lhs"): node.lhs = cls._resolve_leaf(node.lhs, query, *args, **kwargs) if hasattr(node, "rhs"): node.rhs = cls._resolve_leaf(node.rhs, query, *args, **kwargs) def resolve_expression(self, *args, **kwargs): clone = self.clone() clone._resolve_node(clone, *args, **kwargs) clone.resolved = True return clone @cached_property def output_field(self): from django.db.models import BooleanField return BooleanField() @property def _output_field_or_none(self): return self.output_field def select_format(self, compiler, sql, params): # Wrap filters with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END" return sql, params def get_db_converters(self, connection): return self.output_field.get_db_converters(connection) def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def leaves(self): for child in self.children: if isinstance(child, WhereNode): yield from child.leaves() else: yield child class NothingNode: """A node that matches nothing.""" contains_aggregate = False contains_over_clause = False def as_sql(self, compiler=None, connection=None): raise EmptyResultSet class ExtraWhere: # The contents are a black box - assume no aggregates or windows are used. contains_aggregate = False contains_over_clause = False def __init__(self, sqls, params): self.sqls = sqls self.params = params def as_sql(self, compiler=None, connection=None): sqls = ["(%s)" % sql for sql in self.sqls] return " AND ".join(sqls), list(self.params or ()) class SubqueryConstraint: # Even if aggregates or windows would be used in a subquery, # the outer query isn't interested about those. contains_aggregate = False contains_over_clause = False def __init__(self, alias, columns, targets, query_object): self.alias = alias self.columns = columns self.targets = targets query_object.clear_ordering(clear_default=True) self.query_object = query_object def as_sql(self, compiler, connection): query = self.query_object query.set_values(self.targets) query_compiler = query.get_compiler(connection=connection) return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
f580d2e2b0ca5b18ef0833800201f36c92d1eefa6c361267a98cf26e7d63db15
""" Useful auxiliary data structures for query construction. Not useful outside the SQL domain. """ from django.core.exceptions import FullResultSet from django.db.models.sql.constants import INNER, LOUTER class MultiJoin(Exception): """ Used by join construction code to indicate the point at which a multi-valued join was attempted (if the caller wants to treat that exceptionally). """ def __init__(self, names_pos, path_with_names): self.level = names_pos # The path travelled, this includes the path to the multijoin. self.names_with_path = path_with_names class Empty: pass class Join: """ Used by sql.Query and sql.SQLCompiler to generate JOIN clauses into the FROM entry. For example, the SQL generated could be LEFT OUTER JOIN "sometable" T1 ON ("othertable"."sometable_id" = "sometable"."id") This class is primarily used in Query.alias_map. All entries in alias_map must be Join compatible by providing the following attributes and methods: - table_name (string) - table_alias (possible alias for the table, can be None) - join_type (can be None for those entries that aren't joined from anything) - parent_alias (which table is this join's parent, can be None similarly to join_type) - as_sql() - relabeled_clone() """ def __init__( self, table_name, parent_alias, table_alias, join_type, join_field, nullable, filtered_relation=None, ): # Join table self.table_name = table_name self.parent_alias = parent_alias # Note: table_alias is not necessarily known at instantiation time. self.table_alias = table_alias # LOUTER or INNER self.join_type = join_type # A list of 2-tuples to use in the ON clause of the JOIN. # Each 2-tuple will create one join condition in the ON clause. self.join_cols = join_field.get_joining_columns() # Along which field (or ForeignObjectRel in the reverse join case) self.join_field = join_field # Is this join nullabled? self.nullable = nullable self.filtered_relation = filtered_relation def as_sql(self, compiler, connection): """ Generate the full LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params clause for this join. """ join_conditions = [] params = [] qn = compiler.quote_name_unless_alias qn2 = connection.ops.quote_name # Add a join condition for each pair of joining columns. for lhs_col, rhs_col in self.join_cols: join_conditions.append( "%s.%s = %s.%s" % ( qn(self.parent_alias), qn2(lhs_col), qn(self.table_alias), qn2(rhs_col), ) ) # Add a single condition inside parentheses for whatever # get_extra_restriction() returns. extra_cond = self.join_field.get_extra_restriction( self.table_alias, self.parent_alias ) if extra_cond: extra_sql, extra_params = compiler.compile(extra_cond) join_conditions.append("(%s)" % extra_sql) params.extend(extra_params) if self.filtered_relation: try: extra_sql, extra_params = compiler.compile(self.filtered_relation) except FullResultSet: pass else: join_conditions.append("(%s)" % extra_sql) params.extend(extra_params) if not join_conditions: # This might be a rel on the other end of an actual declared field. declared_field = getattr(self.join_field, "field", self.join_field) raise ValueError( "Join generated an empty ON clause. %s did not yield either " "joining columns or extra restrictions." % declared_field.__class__ ) on_clause_sql = " AND ".join(join_conditions) alias_str = ( "" if self.table_alias == self.table_name else (" %s" % self.table_alias) ) sql = "%s %s%s ON (%s)" % ( self.join_type, qn(self.table_name), alias_str, on_clause_sql, ) return sql, params def relabeled_clone(self, change_map): new_parent_alias = change_map.get(self.parent_alias, self.parent_alias) new_table_alias = change_map.get(self.table_alias, self.table_alias) if self.filtered_relation is not None: filtered_relation = self.filtered_relation.clone() filtered_relation.path = [ change_map.get(p, p) for p in self.filtered_relation.path ] else: filtered_relation = None return self.__class__( self.table_name, new_parent_alias, new_table_alias, self.join_type, self.join_field, self.nullable, filtered_relation=filtered_relation, ) @property def identity(self): return ( self.__class__, self.table_name, self.parent_alias, self.join_field, self.filtered_relation, ) def __eq__(self, other): if not isinstance(other, Join): return NotImplemented return self.identity == other.identity def __hash__(self): return hash(self.identity) def equals(self, other): # Ignore filtered_relation in equality check. return self.identity[:-1] == other.identity[:-1] def demote(self): new = self.relabeled_clone({}) new.join_type = INNER return new def promote(self): new = self.relabeled_clone({}) new.join_type = LOUTER return new class BaseTable: """ The BaseTable class is used for base table references in FROM clause. For example, the SQL "foo" in SELECT * FROM "foo" WHERE somecond could be generated by this class. """ join_type = None parent_alias = None filtered_relation = None def __init__(self, table_name, alias): self.table_name = table_name self.table_alias = alias def as_sql(self, compiler, connection): alias_str = ( "" if self.table_alias == self.table_name else (" %s" % self.table_alias) ) base_sql = compiler.quote_name_unless_alias(self.table_name) return base_sql + alias_str, [] def relabeled_clone(self, change_map): return self.__class__( self.table_name, change_map.get(self.table_alias, self.table_alias) ) @property def identity(self): return self.__class__, self.table_name, self.table_alias def __eq__(self, other): if not isinstance(other, BaseTable): return NotImplemented return self.identity == other.identity def __hash__(self): return hash(self.identity) def equals(self, other): return self.identity == other.identity
dfa033668d63f19dd16aae2df4e93f2dde380fc1e742f5fa5a32adcb89c1322f
from django.db import ProgrammingError from django.utils.functional import cached_property class BaseDatabaseFeatures: # An optional tuple indicating the minimum supported database version. minimum_database_version = None gis_enabled = False # Oracle can't group by LOB (large object) data types. allows_group_by_lob = True allows_group_by_selected_pks = False allows_group_by_refs = True empty_fetchmany_value = [] update_can_self_select = True # Does the backend distinguish between '' and None? interprets_empty_strings_as_nulls = False # Does the backend allow inserting duplicate NULL rows in a nullable # unique field? All core backends implement this correctly, but other # databases such as SQL Server do not. supports_nullable_unique_constraints = True # Does the backend allow inserting duplicate rows when a unique_together # constraint exists and some fields are nullable but not all of them? supports_partially_nullable_unique_constraints = True # Does the backend support initially deferrable unique constraints? supports_deferrable_unique_constraints = False can_use_chunked_reads = True can_return_columns_from_insert = False can_return_rows_from_bulk_insert = False has_bulk_insert = True uses_savepoints = True can_release_savepoints = False # If True, don't use integer foreign keys referring to, e.g., positive # integer primary keys. related_fields_match_type = False allow_sliced_subqueries_with_in = True has_select_for_update = False has_select_for_update_nowait = False has_select_for_update_skip_locked = False has_select_for_update_of = False has_select_for_no_key_update = False # Does the database's SELECT FOR UPDATE OF syntax require a column rather # than a table? select_for_update_of_column = False # Does the default test database allow multiple connections? # Usually an indication that the test database is in-memory test_db_allows_multiple_connections = True # Can an object be saved without an explicit primary key? supports_unspecified_pk = False # Can a fixture contain forward references? i.e., are # FK constraints checked at the end of transaction, or # at the end of each save operation? supports_forward_references = True # Does the backend truncate names properly when they are too long? truncates_names = False # Is there a REAL datatype in addition to floats/doubles? has_real_datatype = False supports_subqueries_in_group_by = True # Does the backend ignore unnecessary ORDER BY clauses in subqueries? ignores_unnecessary_order_by_in_subqueries = True # Is there a true datatype for uuid? has_native_uuid_field = False # Is there a true datatype for timedeltas? has_native_duration_field = False # Does the database driver supports same type temporal data subtraction # by returning the type used to store duration field? supports_temporal_subtraction = False # Does the __regex lookup support backreferencing and grouping? supports_regex_backreferencing = True # Can date/datetime lookups be performed using a string? supports_date_lookup_using_string = True # Can datetimes with timezones be used? supports_timezones = True # Does the database have a copy of the zoneinfo database? has_zoneinfo_database = True # When performing a GROUP BY, is an ORDER BY NULL required # to remove any ordering? requires_explicit_null_ordering_when_grouping = False # Does the backend order NULL values as largest or smallest? nulls_order_largest = False # Does the backend support NULLS FIRST and NULLS LAST in ORDER BY? supports_order_by_nulls_modifier = True # Does the backend orders NULLS FIRST by default? order_by_nulls_first = False # The database's limit on the number of query parameters. max_query_params = None # Can an object have an autoincrement primary key of 0? allows_auto_pk_0 = True # Do we need to NULL a ForeignKey out, or can the constraint check be # deferred can_defer_constraint_checks = False # Does the backend support tablespaces? Default to False because it isn't # in the SQL standard. supports_tablespaces = False # Does the backend reset sequences between tests? supports_sequence_reset = True # Can the backend introspect the default value of a column? can_introspect_default = True # Confirm support for introspected foreign keys # Every database can do this reliably, except MySQL, # which can't do it for MyISAM tables can_introspect_foreign_keys = True # Map fields which some backends may not be able to differentiate to the # field it's introspected as. introspected_field_types = { "AutoField": "AutoField", "BigAutoField": "BigAutoField", "BigIntegerField": "BigIntegerField", "BinaryField": "BinaryField", "BooleanField": "BooleanField", "CharField": "CharField", "DurationField": "DurationField", "GenericIPAddressField": "GenericIPAddressField", "IntegerField": "IntegerField", "PositiveBigIntegerField": "PositiveBigIntegerField", "PositiveIntegerField": "PositiveIntegerField", "PositiveSmallIntegerField": "PositiveSmallIntegerField", "SmallAutoField": "SmallAutoField", "SmallIntegerField": "SmallIntegerField", "TimeField": "TimeField", } # Can the backend introspect the column order (ASC/DESC) for indexes? supports_index_column_ordering = True # Does the backend support introspection of materialized views? can_introspect_materialized_views = False # Support for the DISTINCT ON clause can_distinct_on_fields = False # Does the backend prevent running SQL queries in broken transactions? atomic_transactions = True # Can we roll back DDL in a transaction? can_rollback_ddl = False # Does it support operations requiring references rename in a transaction? supports_atomic_references_rename = True # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? supports_combined_alters = False # Does it support foreign keys? supports_foreign_keys = True # Can it create foreign key constraints inline when adding columns? can_create_inline_fk = True # Can an index be renamed? can_rename_index = False # Does it automatically index foreign keys? indexes_foreign_keys = True # Does it support CHECK constraints? supports_column_check_constraints = True supports_table_check_constraints = True # Does the backend support introspection of CHECK constraints? can_introspect_check_constraints = True # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) # parameter passing? Note this can be provided by the backend even if not # supported by the Python driver supports_paramstyle_pyformat = True # Does the backend require literal defaults, rather than parameterized ones? requires_literal_defaults = False # Does the backend require a connection reset after each material schema change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? closed_cursor_error_class = ProgrammingError # Does 'a' LIKE 'A' match? has_case_insensitive_like = False # Suffix for backends that don't support "SELECT xxx;" queries. bare_select_suffix = "" # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False # Does the backend support "select for update" queries with limit (and offset)? supports_select_for_update_with_limit = True # Does the backend ignore null expressions in GREATEST and LEAST queries unless # every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? # Defaults to False to allow third-party backends to opt-in. can_clone_databases = False # Does the backend consider table names with different casing to # be equal? ignores_table_name_case = False # Place FOR UPDATE right after FROM clause. Used on MSSQL. for_update_after_from = False # Combinatorial flags supports_select_union = True supports_select_intersection = True supports_select_difference = True supports_slicing_ordering_in_compound = False supports_parentheses_in_compound = True requires_compound_order_by_subquery = False # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate # expressions? supports_aggregate_filter_clause = False # Does the backend support indexing a TextField? supports_index_on_text_field = True # Does the backend support window expressions (expression OVER (...))? supports_over_clause = False supports_frame_range_fixed_distance = False only_supports_unbounded_with_preceding_and_following = False # Does the backend support CAST with precision? supports_cast_with_precision = True # How many second decimals does the database return when casting a value to # a type with time? time_cast_precision = 6 # SQL to create a procedure for use by the Django test suite. The # functionality of the procedure isn't important. create_test_procedure_without_params_sql = None create_test_procedure_with_int_param_sql = None # SQL to create a table with a composite primary key for use by the Django # test suite. create_test_table_with_composite_primary_key = None # Does the backend support keyword parameters for cursor.callproc()? supports_callproc_kwargs = False # What formats does the backend EXPLAIN syntax support? supported_explain_formats = set() # Does the backend support the default parameter in lead() and lag()? supports_default_in_lead_lag = True # Does the backend support ignoring constraint or uniqueness errors during # INSERT? supports_ignore_conflicts = True # Does the backend support updating rows on constraint or uniqueness errors # during INSERT? supports_update_conflicts = False supports_update_conflicts_with_target = False # Does this backend require casting the results of CASE expressions used # in UPDATE statements to ensure the expression has the correct type? requires_casted_case_in_updates = False # Does the backend support partial indexes (CREATE INDEX ... WHERE ...)? supports_partial_indexes = True supports_functions_in_partial_indexes = True # Does the backend support covering indexes (CREATE INDEX ... INCLUDE ...)? supports_covering_indexes = False # Does the backend support indexes on expressions? supports_expression_indexes = True # Does the backend treat COLLATE as an indexed expression? collate_as_index_expression = False # Does the database allow more than one constraint or index on the same # field(s)? allows_multiple_constraints_on_same_fields = True # Does the backend support boolean expressions in SELECT and GROUP BY # clauses? supports_boolean_expr_in_select_clause = True # Does the backend support comparing boolean expressions in WHERE clauses? # Eg: WHERE (price > 0) IS NOT NULL supports_comparing_boolean_expr = True # Does the backend support JSONField? supports_json_field = True # Can the backend introspect a JSONField? can_introspect_json_field = True # Does the backend support primitives in JSONField? supports_primitives_in_json_field = True # Is there a true datatype for JSON? has_native_json_field = False # Does the backend use PostgreSQL-style JSON operators like '->'? has_json_operators = False # Does the backend support __contains and __contained_by lookups for # a JSONField? supports_json_field_contains = True # Does value__d__contains={'f': 'g'} (without a list around the dict) match # {'d': [{'f': 'g'}]}? json_key_contains_list_matching_requires_list = False # Does the backend support JSONObject() database function? has_json_object_function = True # Does the backend support column collations? supports_collation_on_charfield = True supports_collation_on_textfield = True # Does the backend support non-deterministic collations? supports_non_deterministic_collations = True # Does the backend support the logical XOR operator? supports_logical_xor = False # Collation names for use by the Django test suite. test_collations = { "ci": None, # Case-insensitive. "cs": None, # Case-sensitive. "non_default": None, # Non-default. "swedish_ci": None, # Swedish case-insensitive. } # SQL template override for tests.aggregation.tests.NowUTC test_now_utc_template = None # A set of dotted paths to tests in Django's test suite that are expected # to fail on this database. django_test_expected_failures = set() # A map of reasons to sets of dotted paths to tests in Django's test suite # that should be skipped for this database. django_test_skips = {} def __init__(self, connection): self.connection = connection @cached_property def supports_explaining_query_execution(self): """Does this backend support explaining query execution?""" return self.connection.ops.explain_prefix is not None @cached_property def supports_transactions(self): """Confirm support for transactions.""" with self.connection.cursor() as cursor: cursor.execute("CREATE TABLE ROLLBACK_TEST (X INT)") self.connection.set_autocommit(False) cursor.execute("INSERT INTO ROLLBACK_TEST (X) VALUES (8)") self.connection.rollback() self.connection.set_autocommit(True) cursor.execute("SELECT COUNT(X) FROM ROLLBACK_TEST") (count,) = cursor.fetchone() cursor.execute("DROP TABLE ROLLBACK_TEST") return count == 0 def allows_group_by_selected_pks_on_model(self, model): if not self.allows_group_by_selected_pks: return False return model._meta.managed
cb4b760b6aab991791932819b73c21cf09abed6eb957225ecbcda3cf1eaa15bf
import operator from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () allows_group_by_selected_pks = True related_fields_match_type = True # MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME. allow_sliced_subqueries_with_in = False has_select_for_update = True supports_forward_references = False supports_regex_backreferencing = False supports_date_lookup_using_string = False supports_timezones = False requires_explicit_null_ordering_when_grouping = True can_release_savepoints = True atomic_transactions = False can_clone_databases = True supports_temporal_subtraction = True supports_slicing_ordering_in_compound = True supports_index_on_text_field = False supports_update_conflicts = True create_test_procedure_without_params_sql = """ CREATE PROCEDURE test_procedure () BEGIN DECLARE V_I INTEGER; SET V_I = 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE test_procedure (P_I INTEGER) BEGIN DECLARE V_I INTEGER; SET V_I = P_I; END; """ create_test_table_with_composite_primary_key = """ CREATE TABLE test_table_composite_pk ( column_1 INTEGER NOT NULL, column_2 INTEGER NOT NULL, PRIMARY KEY(column_1, column_2) ) """ # Neither MySQL nor MariaDB support partial indexes. supports_partial_indexes = False # COLLATE must be wrapped in parentheses because MySQL treats COLLATE as an # indexed expression. collate_as_index_expression = True supports_order_by_nulls_modifier = False order_by_nulls_first = True supports_logical_xor = True @cached_property def minimum_database_version(self): if self.connection.mysql_is_mariadb: return (10, 4) else: return (8,) @cached_property def test_collations(self): charset = "utf8" if ( self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 6) ) or ( not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 30) ): # utf8 is an alias for utf8mb3 in MariaDB 10.6+ and MySQL 8.0.30+. charset = "utf8mb3" return { "ci": f"{charset}_general_ci", "non_default": f"{charset}_esperanto_ci", "swedish_ci": f"{charset}_swedish_ci", } test_now_utc_template = "UTC_TIMESTAMP(6)" @cached_property def django_test_skips(self): skips = { "This doesn't work on MySQL.": { "db_functions.comparison.test_greatest.GreatestTests." "test_coalesce_workaround", "db_functions.comparison.test_least.LeastTests." "test_coalesce_workaround", }, "Running on MySQL requires utf8mb4 encoding (#18392).": { "model_fields.test_textfield.TextFieldTests.test_emoji", "model_fields.test_charfield.TestCharField.test_emoji", }, "MySQL doesn't support functional indexes on a function that " "returns JSON": { "schema.tests.SchemaTests.test_func_index_json_key_transform", }, "MySQL supports multiplying and dividing DurationFields by a " "scalar value but it's not implemented (#25287).": { "expressions.tests.FTimeDeltaTests.test_durationfield_multiply_divide", }, "UPDATE ... ORDER BY syntax on MySQL/MariaDB does not support ordering by" "related fields.": { "update.tests.AdvancedTests." "test_update_ordered_by_inline_m2m_annotation", "update.tests.AdvancedTests.test_update_ordered_by_m2m_annotation", }, } if self.connection.mysql_is_mariadb and ( 10, 4, 3, ) < self.connection.mysql_version < (10, 5, 2): skips.update( { "https://jira.mariadb.org/browse/MDEV-19598": { "schema.tests.SchemaTests." "test_alter_not_unique_field_to_primary_key", }, } ) if self.connection.mysql_is_mariadb and ( 10, 4, 12, ) < self.connection.mysql_version < (10, 5): skips.update( { "https://jira.mariadb.org/browse/MDEV-22775": { "schema.tests.SchemaTests." "test_alter_pk_with_self_referential_field", }, } ) if not self.supports_explain_analyze: skips.update( { "MariaDB and MySQL >= 8.0.18 specific.": { "queries.test_explain.ExplainTests.test_mysql_analyze", }, } ) return skips @cached_property def _mysql_storage_engine(self): "Internal method used in Django tests. Don't rely on this from your code" return self.connection.mysql_server_data["default_storage_engine"] @cached_property def allows_auto_pk_0(self): """ Autoincrement primary key can be set to 0 if it doesn't generate new autoincrement values. """ return "NO_AUTO_VALUE_ON_ZERO" in self.connection.sql_mode @cached_property def update_can_self_select(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= ( 10, 3, 2, ) @cached_property def can_introspect_foreign_keys(self): "Confirm support for introspected foreign keys" return self._mysql_storage_engine != "MyISAM" @cached_property def introspected_field_types(self): return { **super().introspected_field_types, "BinaryField": "TextField", "BooleanField": "IntegerField", "DurationField": "BigIntegerField", "GenericIPAddressField": "CharField", } @cached_property def can_return_columns_from_insert(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= ( 10, 5, 0, ) can_return_rows_from_bulk_insert = property( operator.attrgetter("can_return_columns_from_insert") ) @cached_property def has_zoneinfo_database(self): return self.connection.mysql_server_data["has_zoneinfo_database"] @cached_property def is_sql_auto_is_null_enabled(self): return self.connection.mysql_server_data["sql_auto_is_null"] @cached_property def supports_over_clause(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 2) supports_frame_range_fixed_distance = property( operator.attrgetter("supports_over_clause") ) @cached_property def supports_column_check_constraints(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 16) supports_table_check_constraints = property( operator.attrgetter("supports_column_check_constraints") ) @cached_property def can_introspect_check_constraints(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 16) @cached_property def has_select_for_update_skip_locked(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 6) return self.connection.mysql_version >= (8, 0, 1) @cached_property def has_select_for_update_nowait(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 1) @cached_property def has_select_for_update_of(self): return ( not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1) ) @cached_property def supports_explain_analyze(self): return self.connection.mysql_is_mariadb or self.connection.mysql_version >= ( 8, 0, 18, ) @cached_property def supported_explain_formats(self): # Alias MySQL's TRADITIONAL to TEXT for consistency with other # backends. formats = {"JSON", "TEXT", "TRADITIONAL"} if not self.connection.mysql_is_mariadb and self.connection.mysql_version >= ( 8, 0, 16, ): formats.add("TREE") return formats @cached_property def supports_transactions(self): """ All storage engines except MyISAM support transactions. """ return self._mysql_storage_engine != "MyISAM" uses_savepoints = property(operator.attrgetter("supports_transactions")) can_release_savepoints = property(operator.attrgetter("supports_transactions")) @cached_property def ignores_table_name_case(self): return self.connection.mysql_server_data["lower_case_table_names"] @cached_property def supports_default_in_lead_lag(self): # To be added in https://jira.mariadb.org/browse/MDEV-12981. return not self.connection.mysql_is_mariadb @cached_property def can_introspect_json_field(self): if self.connection.mysql_is_mariadb: return self.can_introspect_check_constraints return True @cached_property def supports_index_column_ordering(self): if self._mysql_storage_engine != "InnoDB": return False if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 8) return self.connection.mysql_version >= (8, 0, 1) @cached_property def supports_expression_indexes(self): return ( not self.connection.mysql_is_mariadb and self._mysql_storage_engine != "MyISAM" and self.connection.mysql_version >= (8, 0, 13) ) @cached_property def supports_select_intersection(self): is_mariadb = self.connection.mysql_is_mariadb return is_mariadb or self.connection.mysql_version >= (8, 0, 31) supports_select_difference = property( operator.attrgetter("supports_select_intersection") ) @cached_property def can_rename_index(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 5, 2) return True
48f85bb078bcc7faf8646f14efe4e7aabe85d06e2bb313b226321328f1f3df31
from django.core.exceptions import FieldError, FullResultSet from django.db.models.expressions import Col from django.db.models.sql import compiler class SQLCompiler(compiler.SQLCompiler): def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name sql, params = self.as_sql() return ( "(%s) IN (%s)" % ( ", ".join("%s.%s" % (qn(alias), qn2(column)) for column in columns), sql, ), params, ) class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler): pass class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler): def as_sql(self): # Prefer the non-standard DELETE FROM syntax over the SQL generated by # the SQLDeleteCompiler's default implementation when multiple tables # are involved since MySQL/MariaDB will generate a more efficient query # plan than when using a subquery. where, having, qualify = self.query.where.split_having_qualify( must_group_by=self.query.group_by is not None ) if self.single_alias or having or qualify: # DELETE FROM cannot be used when filtering against aggregates or # window functions as it doesn't allow for GROUP BY/HAVING clauses # and the subquery wrapping (necessary to emulate QUALIFY). return super().as_sql() result = [ "DELETE %s FROM" % self.quote_name_unless_alias(self.query.get_initial_alias()) ] from_sql, params = self.get_from_clause() result.extend(from_sql) try: where_sql, where_params = self.compile(where) except FullResultSet: pass else: result.append("WHERE %s" % where_sql) params.extend(where_params) return " ".join(result), tuple(params) class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler): def as_sql(self): update_query, update_params = super().as_sql() # MySQL and MariaDB support UPDATE ... ORDER BY syntax. if self.query.order_by: order_by_sql = [] order_by_params = [] db_table = self.query.get_meta().db_table try: for resolved, (sql, params, _) in self.get_order_by(): if ( isinstance(resolved.expression, Col) and resolved.expression.alias != db_table ): # Ignore ordering if it contains joined fields, because # they cannot be used in the ORDER BY clause. raise FieldError order_by_sql.append(sql) order_by_params.extend(params) update_query += " ORDER BY " + ", ".join(order_by_sql) update_params += tuple(order_by_params) except FieldError: # Ignore ordering if it contains annotations, because they're # removed in .update() and cannot be resolved. pass return update_query, update_params class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler): pass
da7d958cbd1d5df4a220931754e8efa4f5bcb6ef1a2a2aa977dc184649a28877
import warnings from urllib.parse import urlencode from urllib.request import urlopen from django.apps import apps as django_apps from django.conf import settings from django.core import paginator from django.core.exceptions import ImproperlyConfigured from django.urls import NoReverseMatch, reverse from django.utils import translation from django.utils.deprecation import RemovedInDjango50Warning PING_URL = "https://www.google.com/webmasters/tools/ping" class SitemapNotFound(Exception): pass def ping_google(sitemap_url=None, ping_url=PING_URL, sitemap_uses_https=True): """ Alert Google that the sitemap for the current site has been updated. If sitemap_url is provided, it should be an absolute path to the sitemap for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this function will attempt to deduce it by using urls.reverse(). """ sitemap_full_url = _get_sitemap_full_url(sitemap_url, sitemap_uses_https) params = urlencode({"sitemap": sitemap_full_url}) urlopen("%s?%s" % (ping_url, params)) def _get_sitemap_full_url(sitemap_url, sitemap_uses_https=True): if not django_apps.is_installed("django.contrib.sites"): raise ImproperlyConfigured( "ping_google requires django.contrib.sites, which isn't installed." ) if sitemap_url is None: try: # First, try to get the "index" sitemap URL. sitemap_url = reverse("django.contrib.sitemaps.views.index") except NoReverseMatch: try: # Next, try for the "global" sitemap URL. sitemap_url = reverse("django.contrib.sitemaps.views.sitemap") except NoReverseMatch: pass if sitemap_url is None: raise SitemapNotFound( "You didn't provide a sitemap_url, and the sitemap URL couldn't be " "auto-detected." ) Site = django_apps.get_model("sites.Site") current_site = Site.objects.get_current() scheme = "https" if sitemap_uses_https else "http" return "%s://%s%s" % (scheme, current_site.domain, sitemap_url) class Sitemap: # This limit is defined by Google. See the index documentation at # https://www.sitemaps.org/protocol.html#index. limit = 50000 # If protocol is None, the URLs in the sitemap will use the protocol # with which the sitemap was requested. protocol = None # Enables generating URLs for all languages. i18n = False # Override list of languages to use. languages = None # Enables generating alternate/hreflang links. alternates = False # Add an alternate/hreflang link with value 'x-default'. x_default = False def _get(self, name, item, default=None): try: attr = getattr(self, name) except AttributeError: return default if callable(attr): if self.i18n: # Split the (item, lang_code) tuples again for the location, # priority, lastmod and changefreq method calls. item, lang_code = item return attr(item) return attr def _languages(self): if self.languages is not None: return self.languages return [lang_code for lang_code, _ in settings.LANGUAGES] def _items(self): if self.i18n: # Create (item, lang_code) tuples for all items and languages. # This is necessary to paginate with all languages already considered. items = [ (item, lang_code) for lang_code in self._languages() for item in self.items() ] return items return self.items() def _location(self, item, force_lang_code=None): if self.i18n: obj, lang_code = item # Activate language from item-tuple or forced one before calling location. with translation.override(force_lang_code or lang_code): return self._get("location", item) return self._get("location", item) @property def paginator(self): return paginator.Paginator(self._items(), self.limit) def items(self): return [] def location(self, item): return item.get_absolute_url() def get_protocol(self, protocol=None): # Determine protocol if self.protocol is None and protocol is None: warnings.warn( "The default sitemap protocol will be changed from 'http' to " "'https' in Django 5.0. Set Sitemap.protocol to silence this " "warning.", category=RemovedInDjango50Warning, stacklevel=2, ) # RemovedInDjango50Warning: when the deprecation ends, replace 'http' # with 'https'. return self.protocol or protocol or "http" def get_domain(self, site=None): # Determine domain if site is None: if django_apps.is_installed("django.contrib.sites"): Site = django_apps.get_model("sites.Site") try: site = Site.objects.get_current() except Site.DoesNotExist: pass if site is None: raise ImproperlyConfigured( "To use sitemaps, either enable the sites framework or pass " "a Site/RequestSite object in your view." ) return site.domain def get_urls(self, page=1, site=None, protocol=None): protocol = self.get_protocol(protocol) domain = self.get_domain(site) return self._urls(page, protocol, domain) def get_latest_lastmod(self): if not hasattr(self, "lastmod"): return None if callable(self.lastmod): try: return max([self.lastmod(item) for item in self.items()], default=None) except TypeError: return None else: return self.lastmod def _urls(self, page, protocol, domain): urls = [] latest_lastmod = None all_items_lastmod = True # track if all items have a lastmod paginator_page = self.paginator.page(page) for item in paginator_page.object_list: loc = f"{protocol}://{domain}{self._location(item)}" priority = self._get("priority", item) lastmod = self._get("lastmod", item) if all_items_lastmod: all_items_lastmod = lastmod is not None if all_items_lastmod and ( latest_lastmod is None or lastmod > latest_lastmod ): latest_lastmod = lastmod url_info = { "item": item, "location": loc, "lastmod": lastmod, "changefreq": self._get("changefreq", item), "priority": str(priority if priority is not None else ""), "alternates": [], } if self.i18n and self.alternates: for lang_code in self._languages(): loc = f"{protocol}://{domain}{self._location(item, lang_code)}" url_info["alternates"].append( { "location": loc, "lang_code": lang_code, } ) if self.x_default: lang_code = settings.LANGUAGE_CODE loc = f"{protocol}://{domain}{self._location(item, lang_code)}" loc = loc.replace(f"/{lang_code}/", "/", 1) url_info["alternates"].append( { "location": loc, "lang_code": "x-default", } ) urls.append(url_info) if all_items_lastmod and latest_lastmod: self.latest_lastmod = latest_lastmod return urls class GenericSitemap(Sitemap): priority = None changefreq = None def __init__(self, info_dict, priority=None, changefreq=None, protocol=None): self.queryset = info_dict["queryset"] self.date_field = info_dict.get("date_field") self.priority = self.priority or priority self.changefreq = self.changefreq or changefreq self.protocol = self.protocol or protocol def items(self): # Make sure to return a clone; we don't want premature evaluation. return self.queryset.filter() def lastmod(self, item): if self.date_field is not None: return getattr(item, self.date_field) return None def get_latest_lastmod(self): if self.date_field is not None: return ( self.queryset.order_by("-" + self.date_field) .values_list(self.date_field, flat=True) .first() ) return None
bd742d0e9a37328f50422072401a98f8ea53875d97c930232e66fe7a56b76b4f
import datetime from decimal import Decimal from django.db.models import ( Avg, Case, Count, Exists, F, Max, OuterRef, Q, StdDev, Subquery, Sum, Variance, When, ) from django.test import TestCase from django.test.utils import Approximate from .models import Author, Book, Publisher class FilteredAggregateTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name="test", age=40) cls.a2 = Author.objects.create(name="test2", age=60) cls.a3 = Author.objects.create(name="test3", age=100) cls.p1 = Publisher.objects.create( name="Apress", num_awards=3, duration=datetime.timedelta(days=1) ) cls.b1 = Book.objects.create( isbn="159059725", name="The Definitive Guide to Django: Web Development Done Right", pages=447, rating=4.5, price=Decimal("30.00"), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6), ) cls.b2 = Book.objects.create( isbn="067232959", name="Sams Teach Yourself Django in 24 Hours", pages=528, rating=3.0, price=Decimal("23.09"), contact=cls.a2, publisher=cls.p1, pubdate=datetime.date(2008, 3, 3), ) cls.b3 = Book.objects.create( isbn="159059996", name="Practical Django Projects", pages=600, rating=4.5, price=Decimal("29.69"), contact=cls.a3, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23), ) cls.a1.friends.add(cls.a2) cls.a1.friends.add(cls.a3) cls.b1.authors.add(cls.a1) cls.b1.authors.add(cls.a3) cls.b2.authors.add(cls.a2) cls.b3.authors.add(cls.a3) def test_filtered_aggregates(self): agg = Sum("age", filter=Q(name__startswith="test")) self.assertEqual(Author.objects.aggregate(age=agg)["age"], 200) def test_filtered_numerical_aggregates(self): for aggregate, expected_result in ( (Avg, Approximate(66.7, 1)), (StdDev, Approximate(24.9, 1)), (Variance, Approximate(622.2, 1)), ): with self.subTest(aggregate=aggregate.__name__): agg = aggregate("age", filter=Q(name__startswith="test")) self.assertEqual( Author.objects.aggregate(age=agg)["age"], expected_result ) def test_double_filtered_aggregates(self): agg = Sum("age", filter=Q(Q(name="test2") & ~Q(name="test"))) self.assertEqual(Author.objects.aggregate(age=agg)["age"], 60) def test_excluded_aggregates(self): agg = Sum("age", filter=~Q(name="test2")) self.assertEqual(Author.objects.aggregate(age=agg)["age"], 140) def test_related_aggregates_m2m(self): agg = Sum("friends__age", filter=~Q(friends__name="test")) self.assertEqual( Author.objects.filter(name="test").aggregate(age=agg)["age"], 160 ) def test_related_aggregates_m2m_and_fk(self): q = Q(friends__book__publisher__name="Apress") & ~Q(friends__name="test3") agg = Sum("friends__book__pages", filter=q) self.assertEqual( Author.objects.filter(name="test").aggregate(pages=agg)["pages"], 528 ) def test_plain_annotate(self): agg = Sum("book__pages", filter=Q(book__rating__gt=3)) qs = Author.objects.annotate(pages=agg).order_by("pk") self.assertSequenceEqual([a.pages for a in qs], [447, None, 1047]) def test_filtered_aggregate_on_annotate(self): pages_annotate = Sum("book__pages", filter=Q(book__rating__gt=3)) age_agg = Sum("age", filter=Q(total_pages__gte=400)) aggregated = Author.objects.annotate(total_pages=pages_annotate).aggregate( summed_age=age_agg ) self.assertEqual(aggregated, {"summed_age": 140}) def test_case_aggregate(self): agg = Sum( Case(When(friends__age=40, then=F("friends__age"))), filter=Q(friends__name__startswith="test"), ) self.assertEqual(Author.objects.aggregate(age=agg)["age"], 80) def test_sum_star_exception(self): msg = "Star cannot be used with filter. Please specify a field." with self.assertRaisesMessage(ValueError, msg): Count("*", filter=Q(age=40)) def test_filtered_reused_subquery(self): qs = Author.objects.annotate( older_friends_count=Count("friends", filter=Q(friends__age__gt=F("age"))), ).filter( older_friends_count__gte=2, ) self.assertEqual(qs.get(pk__in=qs.values("pk")), self.a1) def test_filtered_aggregate_ref_annotation(self): aggs = Author.objects.annotate(double_age=F("age") * 2,).aggregate( cnt=Count("pk", filter=Q(double_age__gt=100)), ) self.assertEqual(aggs["cnt"], 2) def test_filtered_aggregate_ref_subquery_annotation(self): aggs = Author.objects.annotate( earliest_book_year=Subquery( Book.objects.filter( contact__pk=OuterRef("pk"), ) .order_by("pubdate") .values("pubdate__year")[:1] ), ).aggregate( cnt=Count("pk", filter=Q(earliest_book_year=2008)), ) self.assertEqual(aggs["cnt"], 2) def test_filtered_aggregate_ref_multiple_subquery_annotation(self): aggregate = ( Book.objects.values("publisher") .annotate( has_authors=Exists( Book.authors.through.objects.filter(book=OuterRef("pk")), ), authors_have_other_books=Exists( Book.objects.filter( authors__in=Author.objects.filter( book_contact_set=OuterRef(OuterRef("pk")), ) ).exclude(pk=OuterRef("pk")), ), ) .aggregate( max_rating=Max( "rating", filter=Q(has_authors=True, authors_have_other_books=False), ) ) ) self.assertEqual(aggregate, {"max_rating": 4.5}) def test_filtered_aggregate_on_exists(self): aggregate = Book.objects.values("publisher").aggregate( max_rating=Max( "rating", filter=Exists( Book.authors.through.objects.filter(book=OuterRef("pk")), ), ), ) self.assertEqual(aggregate, {"max_rating": 4.5}) def test_filtered_aggregate_empty_condition(self): book = Book.objects.annotate( authors_count=Count( "authors", filter=Q(authors__in=[]), ), ).get(pk=self.b1.pk) self.assertEqual(book.authors_count, 0) aggregate = Book.objects.aggregate( max_rating=Max("rating", filter=Q(rating__in=[])) ) self.assertEqual(aggregate, {"max_rating": None}) def test_filtered_aggregate_full_condition(self): book = Book.objects.annotate( authors_count=Count( "authors", filter=~Q(authors__in=[]), ), ).get(pk=self.b1.pk) self.assertEqual(book.authors_count, 2) aggregate = Book.objects.aggregate( max_rating=Max("rating", filter=~Q(rating__in=[])) ) self.assertEqual(aggregate, {"max_rating": 4.5})
715127caf698f665a456f70656d3c6b69e227021dfb239c1a44cba65d6338188
import datetime import pickle from decimal import Decimal from operator import attrgetter from unittest import mock from django.contrib.contenttypes.models import ContentType from django.core.exceptions import FieldError from django.db import connection from django.db.models import ( Aggregate, Avg, Case, Count, DecimalField, F, IntegerField, Max, Q, StdDev, Sum, Value, Variance, When, ) from django.test import TestCase, skipUnlessDBFeature from django.test.utils import Approximate from .models import ( Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag, Publisher, SelfRefFK, Store, WithManualPK, ) class AggregationTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34) cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35) cls.a3 = Author.objects.create(name="Brad Dayley", age=45) cls.a4 = Author.objects.create(name="James Bennett", age=29) cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37) cls.a6 = Author.objects.create(name="Paul Bissex", age=29) cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25) cls.a8 = Author.objects.create(name="Peter Norvig", age=57) cls.a9 = Author.objects.create(name="Stuart Russell", age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name="Apress", num_awards=3) cls.p2 = Publisher.objects.create(name="Sams", num_awards=1) cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7) cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn="159059725", name="The Definitive Guide to Django: Web Development Done Right", pages=447, rating=4.5, price=Decimal("30.00"), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6), ) cls.b2 = Book.objects.create( isbn="067232959", name="Sams Teach Yourself Django in 24 Hours", pages=528, rating=3.0, price=Decimal("23.09"), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3), ) cls.b3 = Book.objects.create( isbn="159059996", name="Practical Django Projects", pages=300, rating=4.0, price=Decimal("29.69"), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23), ) cls.b4 = Book.objects.create( isbn="013235613", name="Python Web Development with Django", pages=350, rating=4.0, price=Decimal("29.69"), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3), ) cls.b5 = HardbackBook.objects.create( isbn="013790395", name="Artificial Intelligence: A Modern Approach", pages=1132, rating=4.0, price=Decimal("82.80"), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15), weight=4.5, ) cls.b6 = HardbackBook.objects.create( isbn="155860191", name=( "Paradigms of Artificial Intelligence Programming: Case Studies in " "Common Lisp" ), pages=946, rating=5.0, price=Decimal("75.00"), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15), weight=3.7, ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name="Amazon.com", original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59), ) s2 = Store.objects.create( name="Books.com", original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59), ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30), ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def assertObjectAttrs(self, obj, **kwargs): for attr, value in kwargs.items(): self.assertEqual(getattr(obj, attr), value) def test_annotation_with_value(self): values = ( Book.objects.filter( name="Practical Django Projects", ) .annotate( discount_price=F("price") * 2, ) .values( "discount_price", ) .annotate(sum_discount=Sum("discount_price")) ) with self.assertNumQueries(1) as ctx: self.assertSequenceEqual( values, [ { "discount_price": Decimal("59.38"), "sum_discount": Decimal("59.38"), } ], ) if connection.features.allows_group_by_refs: alias = connection.ops.quote_name("discount_price") self.assertIn(f"GROUP BY {alias}", ctx[0]["sql"]) def test_aggregates_in_where_clause(self): """ Regression test for #12822: DatabaseError: aggregates not allowed in WHERE clause The subselect works and returns results equivalent to a query with the IDs listed. Before the corresponding fix for this bug, this test passed in 1.1 and failed in 1.2-beta (trunk). """ qs = Book.objects.values("contact").annotate(Max("id")) qs = qs.order_by("contact").values_list("id__max", flat=True) # don't do anything with the queryset (qs) before including it as a # subquery books = Book.objects.order_by("id") qs1 = books.filter(id__in=qs) qs2 = books.filter(id__in=list(qs)) self.assertEqual(list(qs1), list(qs2)) def test_aggregates_in_where_clause_pre_eval(self): """ Regression test for #12822: DatabaseError: aggregates not allowed in WHERE clause Same as the above test, but evaluates the queryset for the subquery before it's used as a subquery. Before the corresponding fix for this bug, this test failed in both 1.1 and 1.2-beta (trunk). """ qs = Book.objects.values("contact").annotate(Max("id")) qs = qs.order_by("contact").values_list("id__max", flat=True) # force the queryset (qs) for the subquery to be evaluated in its # current state list(qs) books = Book.objects.order_by("id") qs1 = books.filter(id__in=qs) qs2 = books.filter(id__in=list(qs)) self.assertEqual(list(qs1), list(qs2)) @skipUnlessDBFeature("supports_subqueries_in_group_by") def test_annotate_with_extra(self): """ Regression test for #11916: Extra params + aggregation creates incorrect SQL. """ # Oracle doesn't support subqueries in group by clause shortest_book_sql = """ SELECT name FROM aggregation_regress_book b WHERE b.publisher_id = aggregation_regress_publisher.id ORDER BY b.pages LIMIT 1 """ # tests that this query does not raise a DatabaseError due to the full # subselect being (erroneously) added to the GROUP BY parameters qs = Publisher.objects.extra( select={ "name_of_shortest_book": shortest_book_sql, } ).annotate(total_books=Count("book")) # force execution of the query list(qs) def test_aggregate(self): # Ordering requests are ignored self.assertEqual( Author.objects.order_by("name").aggregate(Avg("age")), {"age__avg": Approximate(37.444, places=1)}, ) # Implicit ordering is also ignored self.assertEqual( Book.objects.aggregate(Sum("pages")), {"pages__sum": 3703}, ) # Baseline results self.assertEqual( Book.objects.aggregate(Sum("pages"), Avg("pages")), {"pages__sum": 3703, "pages__avg": Approximate(617.166, places=2)}, ) # Empty values query doesn't affect grouping or results self.assertEqual( Book.objects.values().aggregate(Sum("pages"), Avg("pages")), {"pages__sum": 3703, "pages__avg": Approximate(617.166, places=2)}, ) # Aggregate overrides extra selected column self.assertEqual( Book.objects.extra(select={"price_per_page": "price / pages"}).aggregate( Sum("pages") ), {"pages__sum": 3703}, ) def test_annotation(self): # Annotations get combined with extra select clauses obj = ( Book.objects.annotate(mean_auth_age=Avg("authors__age")) .extra(select={"manufacture_cost": "price * .5"}) .get(pk=self.b2.pk) ) self.assertObjectAttrs( obj, contact_id=self.a3.id, isbn="067232959", mean_auth_age=45.0, name="Sams Teach Yourself Django in 24 Hours", pages=528, price=Decimal("23.09"), pubdate=datetime.date(2008, 3, 3), publisher_id=self.p2.id, rating=3.0, ) # Different DB backends return different types for the extra select computation self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545"))) # Order of the annotate/extra in the query doesn't matter obj = ( Book.objects.extra(select={"manufacture_cost": "price * .5"}) .annotate(mean_auth_age=Avg("authors__age")) .get(pk=self.b2.pk) ) self.assertObjectAttrs( obj, contact_id=self.a3.id, isbn="067232959", mean_auth_age=45.0, name="Sams Teach Yourself Django in 24 Hours", pages=528, price=Decimal("23.09"), pubdate=datetime.date(2008, 3, 3), publisher_id=self.p2.id, rating=3.0, ) # Different DB backends return different types for the extra select computation self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545"))) # Values queries can be combined with annotate and extra obj = ( Book.objects.annotate(mean_auth_age=Avg("authors__age")) .extra(select={"manufacture_cost": "price * .5"}) .values() .get(pk=self.b2.pk) ) manufacture_cost = obj["manufacture_cost"] self.assertIn(manufacture_cost, (11.545, Decimal("11.545"))) del obj["manufacture_cost"] self.assertEqual( obj, { "id": self.b2.id, "contact_id": self.a3.id, "isbn": "067232959", "mean_auth_age": 45.0, "name": "Sams Teach Yourself Django in 24 Hours", "pages": 528, "price": Decimal("23.09"), "pubdate": datetime.date(2008, 3, 3), "publisher_id": self.p2.id, "rating": 3.0, }, ) # The order of the (empty) values, annotate and extra clauses doesn't # matter obj = ( Book.objects.values() .annotate(mean_auth_age=Avg("authors__age")) .extra(select={"manufacture_cost": "price * .5"}) .get(pk=self.b2.pk) ) manufacture_cost = obj["manufacture_cost"] self.assertIn(manufacture_cost, (11.545, Decimal("11.545"))) del obj["manufacture_cost"] self.assertEqual( obj, { "id": self.b2.id, "contact_id": self.a3.id, "isbn": "067232959", "mean_auth_age": 45.0, "name": "Sams Teach Yourself Django in 24 Hours", "pages": 528, "price": Decimal("23.09"), "pubdate": datetime.date(2008, 3, 3), "publisher_id": self.p2.id, "rating": 3.0, }, ) # If the annotation precedes the values clause, it won't be included # unless it is explicitly named obj = ( Book.objects.annotate(mean_auth_age=Avg("authors__age")) .extra(select={"price_per_page": "price / pages"}) .values("name") .get(pk=self.b1.pk) ) self.assertEqual( obj, { "name": "The Definitive Guide to Django: Web Development Done Right", }, ) obj = ( Book.objects.annotate(mean_auth_age=Avg("authors__age")) .extra(select={"price_per_page": "price / pages"}) .values("name", "mean_auth_age") .get(pk=self.b1.pk) ) self.assertEqual( obj, { "mean_auth_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", }, ) # If an annotation isn't included in the values, it can still be used # in a filter qs = ( Book.objects.annotate(n_authors=Count("authors")) .values("name") .filter(n_authors__gt=2) ) self.assertSequenceEqual( qs, [{"name": "Python Web Development with Django"}], ) # The annotations are added to values output if values() precedes # annotate() obj = ( Book.objects.values("name") .annotate(mean_auth_age=Avg("authors__age")) .extra(select={"price_per_page": "price / pages"}) .get(pk=self.b1.pk) ) self.assertEqual( obj, { "mean_auth_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", }, ) # All of the objects are getting counted (allow_nulls) and that values # respects the amount of objects self.assertEqual(len(Author.objects.annotate(Avg("friends__age")).values()), 9) # Consecutive calls to annotate accumulate in the query qs = ( Book.objects.values("price") .annotate(oldest=Max("authors__age")) .order_by("oldest", "price") .annotate(Max("publisher__num_awards")) ) self.assertSequenceEqual( qs, [ {"price": Decimal("30"), "oldest": 35, "publisher__num_awards__max": 3}, { "price": Decimal("29.69"), "oldest": 37, "publisher__num_awards__max": 7, }, { "price": Decimal("23.09"), "oldest": 45, "publisher__num_awards__max": 1, }, {"price": Decimal("75"), "oldest": 57, "publisher__num_awards__max": 9}, { "price": Decimal("82.8"), "oldest": 57, "publisher__num_awards__max": 7, }, ], ) def test_aggregate_annotation(self): # Aggregates can be composed over annotations. # The return type is derived from the composed aggregate vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate( Max("pages"), Max("price"), Sum("num_authors"), Avg("num_authors") ) self.assertEqual( vals, { "num_authors__sum": 10, "num_authors__avg": Approximate(1.666, places=2), "pages__max": 1132, "price__max": Decimal("82.80"), }, ) # Regression for #15624 - Missing SELECT columns when using values, annotate # and aggregate in a single query self.assertEqual( Book.objects.annotate(c=Count("authors")).values("c").aggregate(Max("c")), {"c__max": 3}, ) def test_conditional_aggregate(self): # Conditional aggregation of a grouped queryset. self.assertEqual( Book.objects.annotate(c=Count("authors")) .values("pk") .aggregate(test=Sum(Case(When(c__gt=1, then=1))))["test"], 3, ) def test_sliced_conditional_aggregate(self): self.assertEqual( Author.objects.order_by("pk")[:5].aggregate( test=Sum(Case(When(age__lte=35, then=1))) )["test"], 3, ) def test_annotated_conditional_aggregate(self): annotated_qs = Book.objects.annotate( discount_price=F("price") * Decimal("0.75") ) self.assertAlmostEqual( annotated_qs.aggregate( test=Avg( Case( When(pages__lt=400, then="discount_price"), output_field=DecimalField(), ) ) )["test"], Decimal("22.27"), places=2, ) def test_distinct_conditional_aggregate(self): self.assertEqual( Book.objects.distinct().aggregate( test=Avg( Case( When(price=Decimal("29.69"), then="pages"), output_field=IntegerField(), ) ) )["test"], 325, ) def test_conditional_aggregate_on_complex_condition(self): self.assertEqual( Book.objects.distinct().aggregate( test=Avg( Case( When( Q(price__gte=Decimal("29")) & Q(price__lt=Decimal("30")), then="pages", ), output_field=IntegerField(), ) ) )["test"], 325, ) def test_q_annotation_aggregate(self): self.assertEqual(Book.objects.annotate(has_pk=Q(pk__isnull=False)).count(), 6) def test_decimal_aggregate_annotation_filter(self): """ Filtering on an aggregate annotation with Decimal values should work. Requires special handling on SQLite (#18247). """ self.assertEqual( len( Author.objects.annotate(sum=Sum("book_contact_set__price")).filter( sum__gt=Decimal(40) ) ), 1, ) self.assertEqual( len( Author.objects.annotate(sum=Sum("book_contact_set__price")).filter( sum__lte=Decimal(40) ) ), 4, ) def test_field_error(self): # Bad field requests in aggregates are caught and reported msg = ( "Cannot resolve keyword 'foo' into field. Choices are: authors, " "contact, contact_id, hardbackbook, id, isbn, name, pages, price, " "pubdate, publisher, publisher_id, rating, store, tags" ) with self.assertRaisesMessage(FieldError, msg): Book.objects.aggregate(num_authors=Count("foo")) with self.assertRaisesMessage(FieldError, msg): Book.objects.annotate(num_authors=Count("foo")) msg = ( "Cannot resolve keyword 'foo' into field. Choices are: authors, " "contact, contact_id, hardbackbook, id, isbn, name, num_authors, " "pages, price, pubdate, publisher, publisher_id, rating, store, tags" ) with self.assertRaisesMessage(FieldError, msg): Book.objects.annotate(num_authors=Count("authors__id")).aggregate( Max("foo") ) def test_more(self): # Old-style count aggregations can be mixed with new-style self.assertEqual(Book.objects.annotate(num_authors=Count("authors")).count(), 6) # Non-ordinal, non-computed Aggregates over annotations correctly # inherit the annotation's internal type if the annotation is ordinal # or computed vals = Book.objects.annotate(num_authors=Count("authors")).aggregate( Max("num_authors") ) self.assertEqual(vals, {"num_authors__max": 3}) vals = Publisher.objects.annotate(avg_price=Avg("book__price")).aggregate( Max("avg_price") ) self.assertEqual(vals, {"avg_price__max": 75.0}) # Aliases are quoted to protected aliases that might be reserved names vals = Book.objects.aggregate(number=Max("pages"), select=Max("pages")) self.assertEqual(vals, {"number": 1132, "select": 1132}) # Regression for #10064: select_related() plays nice with aggregates obj = ( Book.objects.select_related("publisher") .annotate(num_authors=Count("authors")) .values() .get(isbn="013790395") ) self.assertEqual( obj, { "contact_id": self.a8.id, "id": self.b5.id, "isbn": "013790395", "name": "Artificial Intelligence: A Modern Approach", "num_authors": 2, "pages": 1132, "price": Decimal("82.8"), "pubdate": datetime.date(1995, 1, 15), "publisher_id": self.p3.id, "rating": 4.0, }, ) # Regression for #10010: exclude on an aggregate field is correctly # negated self.assertEqual(len(Book.objects.annotate(num_authors=Count("authors"))), 6) self.assertEqual( len( Book.objects.annotate(num_authors=Count("authors")).filter( num_authors__gt=2 ) ), 1, ) self.assertEqual( len( Book.objects.annotate(num_authors=Count("authors")).exclude( num_authors__gt=2 ) ), 5, ) self.assertEqual( len( Book.objects.annotate(num_authors=Count("authors")) .filter(num_authors__lt=3) .exclude(num_authors__lt=2) ), 2, ) self.assertEqual( len( Book.objects.annotate(num_authors=Count("authors")) .exclude(num_authors__lt=2) .filter(num_authors__lt=3) ), 2, ) def test_aggregate_fexpr(self): # Aggregates can be used with F() expressions # ... where the F() is pushed into the HAVING clause qs = ( Publisher.objects.annotate(num_books=Count("book")) .filter(num_books__lt=F("num_awards") / 2) .order_by("name") .values("name", "num_books", "num_awards") ) self.assertSequenceEqual( qs, [ {"num_books": 1, "name": "Morgan Kaufmann", "num_awards": 9}, {"num_books": 2, "name": "Prentice Hall", "num_awards": 7}, ], ) qs = ( Publisher.objects.annotate(num_books=Count("book")) .exclude(num_books__lt=F("num_awards") / 2) .order_by("name") .values("name", "num_books", "num_awards") ) self.assertSequenceEqual( qs, [ {"num_books": 2, "name": "Apress", "num_awards": 3}, {"num_books": 0, "name": "Jonno's House of Books", "num_awards": 0}, {"num_books": 1, "name": "Sams", "num_awards": 1}, ], ) # ... and where the F() references an aggregate qs = ( Publisher.objects.annotate(num_books=Count("book")) .filter(num_awards__gt=2 * F("num_books")) .order_by("name") .values("name", "num_books", "num_awards") ) self.assertSequenceEqual( qs, [ {"num_books": 1, "name": "Morgan Kaufmann", "num_awards": 9}, {"num_books": 2, "name": "Prentice Hall", "num_awards": 7}, ], ) qs = ( Publisher.objects.annotate(num_books=Count("book")) .exclude(num_books__lt=F("num_awards") / 2) .order_by("name") .values("name", "num_books", "num_awards") ) self.assertSequenceEqual( qs, [ {"num_books": 2, "name": "Apress", "num_awards": 3}, {"num_books": 0, "name": "Jonno's House of Books", "num_awards": 0}, {"num_books": 1, "name": "Sams", "num_awards": 1}, ], ) def test_db_col_table(self): # Tests on fields with non-default table and column names. qs = Clues.objects.values("EntryID__Entry").annotate( Appearances=Count("EntryID"), Distinct_Clues=Count("Clue", distinct=True) ) self.assertSequenceEqual(qs, []) qs = Entries.objects.annotate(clue_count=Count("clues__ID")) self.assertSequenceEqual(qs, []) def test_boolean_conversion(self): # Aggregates mixed up ordering of columns for backend's convert_values # method. Refs #21126. e = Entries.objects.create(Entry="foo") c = Clues.objects.create(EntryID=e, Clue="bar") qs = Clues.objects.select_related("EntryID").annotate(Count("ID")) self.assertSequenceEqual(qs, [c]) self.assertEqual(qs[0].EntryID, e) self.assertIs(qs[0].EntryID.Exclude, False) def test_empty(self): # Regression for #10089: Check handling of empty result sets with # aggregates self.assertEqual(Book.objects.filter(id__in=[]).count(), 0) vals = Book.objects.filter(id__in=[]).aggregate( num_authors=Count("authors"), avg_authors=Avg("authors"), max_authors=Max("authors"), max_price=Max("price"), max_rating=Max("rating"), ) self.assertEqual( vals, { "max_authors": None, "max_rating": None, "num_authors": 0, "avg_authors": None, "max_price": None, }, ) qs = ( Publisher.objects.filter(name="Jonno's House of Books") .annotate( num_authors=Count("book__authors"), avg_authors=Avg("book__authors"), max_authors=Max("book__authors"), max_price=Max("book__price"), max_rating=Max("book__rating"), ) .values() ) self.assertSequenceEqual( qs, [ { "max_authors": None, "name": "Jonno's House of Books", "num_awards": 0, "max_price": None, "num_authors": 0, "max_rating": None, "id": self.p5.id, "avg_authors": None, } ], ) def test_more_more(self): # Regression for #10113 - Fields mentioned in order_by() must be # included in the GROUP BY. This only becomes a problem when the # order_by introduces a new join. self.assertQuerySetEqual( Book.objects.annotate(num_authors=Count("authors")).order_by( "publisher__name", "name" ), [ "Practical Django Projects", "The Definitive Guide to Django: Web Development Done Right", "Paradigms of Artificial Intelligence Programming: Case Studies in " "Common Lisp", "Artificial Intelligence: A Modern Approach", "Python Web Development with Django", "Sams Teach Yourself Django in 24 Hours", ], lambda b: b.name, ) # Regression for #10127 - Empty select_related() works with annotate qs = ( Book.objects.filter(rating__lt=4.5) .select_related() .annotate(Avg("authors__age")) .order_by("name") ) self.assertQuerySetEqual( qs, [ ( "Artificial Intelligence: A Modern Approach", 51.5, "Prentice Hall", "Peter Norvig", ), ("Practical Django Projects", 29.0, "Apress", "James Bennett"), ( "Python Web Development with Django", Approximate(30.333, places=2), "Prentice Hall", "Jeffrey Forcier", ), ("Sams Teach Yourself Django in 24 Hours", 45.0, "Sams", "Brad Dayley"), ], lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name), ) # Regression for #10132 - If the values() clause only mentioned extra # (select=) columns, those columns are used for grouping qs = ( Book.objects.extra(select={"pub": "publisher_id"}) .values("pub") .annotate(Count("id")) .order_by("pub") ) self.assertSequenceEqual( qs, [ {"pub": self.p1.id, "id__count": 2}, {"pub": self.p2.id, "id__count": 1}, {"pub": self.p3.id, "id__count": 2}, {"pub": self.p4.id, "id__count": 1}, ], ) qs = ( Book.objects.extra(select={"pub": "publisher_id", "foo": "pages"}) .values("pub") .annotate(Count("id")) .order_by("pub") ) self.assertSequenceEqual( qs, [ {"pub": self.p1.id, "id__count": 2}, {"pub": self.p2.id, "id__count": 1}, {"pub": self.p3.id, "id__count": 2}, {"pub": self.p4.id, "id__count": 1}, ], ) # Regression for #10182 - Queries with aggregate calls are correctly # realiased when used in a subquery ids = ( Book.objects.filter(pages__gt=100) .annotate(n_authors=Count("authors")) .filter(n_authors__gt=2) .order_by("n_authors") ) self.assertQuerySetEqual( Book.objects.filter(id__in=ids), [ "Python Web Development with Django", ], lambda b: b.name, ) # Regression for #15709 - Ensure each group_by field only exists once # per query qstr = str( Book.objects.values("publisher") .annotate(max_pages=Max("pages")) .order_by() .query ) # There is just one GROUP BY clause (zero commas means at most one clause). self.assertEqual(qstr[qstr.index("GROUP BY") :].count(", "), 0) def test_duplicate_alias(self): # Regression for #11256 - duplicating a default alias raises ValueError. msg = ( "The named annotation 'authors__age__avg' conflicts with " "the default name for another annotation." ) with self.assertRaisesMessage(ValueError, msg): Book.objects.annotate( Avg("authors__age"), authors__age__avg=Avg("authors__age") ) def test_field_name_conflict(self): # Regression for #11256 - providing an aggregate name # that conflicts with a field name on the model raises ValueError msg = "The annotation 'age' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate(age=Avg("friends__age")) def test_m2m_name_conflict(self): # Regression for #11256 - providing an aggregate name # that conflicts with an m2m name on the model raises ValueError msg = "The annotation 'friends' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate(friends=Count("friends")) def test_fk_attname_conflict(self): msg = "The annotation 'contact_id' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Book.objects.annotate(contact_id=F("publisher_id")) def test_values_queryset_non_conflict(self): # If you're using a values query set, some potential conflicts are # avoided. # age is a field on Author, so it shouldn't be allowed as an aggregate. # But age isn't included in values(), so it is. results = ( Author.objects.values("name") .annotate(age=Count("book_contact_set")) .order_by("name") ) self.assertEqual(len(results), 9) self.assertEqual(results[0]["name"], "Adrian Holovaty") self.assertEqual(results[0]["age"], 1) # Same problem, but aggregating over m2m fields results = ( Author.objects.values("name") .annotate(age=Avg("friends__age")) .order_by("name") ) self.assertEqual(len(results), 9) self.assertEqual(results[0]["name"], "Adrian Holovaty") self.assertEqual(results[0]["age"], 32.0) # Same problem, but colliding with an m2m field results = ( Author.objects.values("name") .annotate(friends=Count("friends")) .order_by("name") ) self.assertEqual(len(results), 9) self.assertEqual(results[0]["name"], "Adrian Holovaty") self.assertEqual(results[0]["friends"], 2) def test_reverse_relation_name_conflict(self): # Regression for #11256 - providing an aggregate name # that conflicts with a reverse-related name on the model raises ValueError msg = "The annotation 'book_contact_set' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate(book_contact_set=Avg("friends__age")) def test_pickle(self): # Regression for #10197 -- Queries with aggregates can be pickled. # First check that pickling is possible at all. No crash = success qs = Book.objects.annotate(num_authors=Count("authors")) pickle.dumps(qs) # Then check that the round trip works. query = qs.query.get_compiler(qs.db).as_sql()[0] qs2 = pickle.loads(pickle.dumps(qs)) self.assertEqual( qs2.query.get_compiler(qs2.db).as_sql()[0], query, ) def test_more_more_more(self): # Regression for #10199 - Aggregate calls clone the original query so # the original query can still be used books = Book.objects.all() books.aggregate(Avg("authors__age")) self.assertQuerySetEqual( books.all(), [ "Artificial Intelligence: A Modern Approach", "Paradigms of Artificial Intelligence Programming: Case Studies in " "Common Lisp", "Practical Django Projects", "Python Web Development with Django", "Sams Teach Yourself Django in 24 Hours", "The Definitive Guide to Django: Web Development Done Right", ], lambda b: b.name, ) # Regression for #10248 - Annotations work with dates() qs = ( Book.objects.annotate(num_authors=Count("authors")) .filter(num_authors=2) .dates("pubdate", "day") ) self.assertSequenceEqual( qs, [ datetime.date(1995, 1, 15), datetime.date(2007, 12, 6), ], ) # Regression for #10290 - extra selects with parameters can be used for # grouping. qs = ( Book.objects.annotate(mean_auth_age=Avg("authors__age")) .extra(select={"sheets": "(pages + %s) / %s"}, select_params=[1, 2]) .order_by("sheets") .values("sheets") ) self.assertQuerySetEqual( qs, [150, 175, 224, 264, 473, 566], lambda b: int(b["sheets"]) ) # Regression for 10425 - annotations don't get in the way of a count() # clause self.assertEqual( Book.objects.values("publisher").annotate(Count("publisher")).count(), 4 ) self.assertEqual( Book.objects.annotate(Count("publisher")).values("publisher").count(), 6 ) # Note: intentionally no order_by(), that case needs tests, too. publishers = Publisher.objects.filter(id__in=[self.p1.id, self.p2.id]) self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"]) publishers = publishers.annotate(n_books=Count("book")) sorted_publishers = sorted(publishers, key=lambda x: x.name) self.assertEqual(sorted_publishers[0].n_books, 2) self.assertEqual(sorted_publishers[1].n_books, 1) self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"]) books = Book.objects.filter(publisher__in=publishers) self.assertQuerySetEqual( books, [ "Practical Django Projects", "Sams Teach Yourself Django in 24 Hours", "The Definitive Guide to Django: Web Development Done Right", ], lambda b: b.name, ) self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"]) # Regression for 10666 - inherited fields work with annotations and # aggregations self.assertEqual( HardbackBook.objects.aggregate(n_pages=Sum("book_ptr__pages")), {"n_pages": 2078}, ) self.assertEqual( HardbackBook.objects.aggregate(n_pages=Sum("pages")), {"n_pages": 2078}, ) qs = ( HardbackBook.objects.annotate( n_authors=Count("book_ptr__authors"), ) .values("name", "n_authors") .order_by("name") ) self.assertSequenceEqual( qs, [ {"n_authors": 2, "name": "Artificial Intelligence: A Modern Approach"}, { "n_authors": 1, "name": ( "Paradigms of Artificial Intelligence Programming: Case " "Studies in Common Lisp" ), }, ], ) qs = ( HardbackBook.objects.annotate(n_authors=Count("authors")) .values("name", "n_authors") .order_by("name") ) self.assertSequenceEqual( qs, [ {"n_authors": 2, "name": "Artificial Intelligence: A Modern Approach"}, { "n_authors": 1, "name": ( "Paradigms of Artificial Intelligence Programming: Case " "Studies in Common Lisp" ), }, ], ) # Regression for #10766 - Shouldn't be able to reference an aggregate # fields in an aggregate() call. msg = "Cannot compute Avg('mean_age'): 'mean_age' is an aggregate" with self.assertRaisesMessage(FieldError, msg): Book.objects.annotate(mean_age=Avg("authors__age")).annotate( Avg("mean_age") ) def test_empty_filter_count(self): self.assertEqual( Author.objects.filter(id__in=[]).annotate(Count("friends")).count(), 0 ) def test_empty_filter_aggregate(self): self.assertEqual( Author.objects.filter(id__in=[]) .annotate(Count("friends")) .aggregate(Count("pk")), {"pk__count": 0}, ) def test_none_call_before_aggregate(self): # Regression for #11789 self.assertEqual( Author.objects.none().aggregate(Avg("age")), {"age__avg": None} ) def test_annotate_and_join(self): self.assertEqual( Author.objects.annotate(c=Count("friends__name")) .exclude(friends__name="Joe") .count(), Author.objects.count(), ) def test_f_expression_annotation(self): # Books with less than 200 pages per author. qs = ( Book.objects.values("name") .annotate(n_authors=Count("authors")) .filter(pages__lt=F("n_authors") * 200) .values_list("pk") ) self.assertQuerySetEqual( Book.objects.filter(pk__in=qs), ["Python Web Development with Django"], attrgetter("name"), ) def test_values_annotate_values(self): qs = ( Book.objects.values("name") .annotate(n_authors=Count("authors")) .values_list("pk", flat=True) .order_by("name") ) self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True))) def test_having_group_by(self): # When a field occurs on the LHS of a HAVING clause that it # appears correctly in the GROUP BY clause qs = ( Book.objects.values_list("name") .annotate(n_authors=Count("authors")) .filter(pages__gt=F("n_authors")) .values_list("name", flat=True) .order_by("name") ) # Results should be the same, all Books have more pages than authors self.assertEqual(list(qs), list(Book.objects.values_list("name", flat=True))) def test_values_list_annotation_args_ordering(self): """ Annotate *args ordering should be preserved in values_list results. **kwargs comes after *args. Regression test for #23659. """ books = ( Book.objects.values_list("publisher__name") .annotate( Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages") ) .order_by("-publisher__name") ) self.assertEqual(books[0], ("Sams", 1, Decimal("23.09"), 45.0, 528.0)) def test_annotation_disjunction(self): qs = ( Book.objects.annotate(n_authors=Count("authors")) .filter(Q(n_authors=2) | Q(name="Python Web Development with Django")) .order_by("name") ) self.assertQuerySetEqual( qs, [ "Artificial Intelligence: A Modern Approach", "Python Web Development with Django", "The Definitive Guide to Django: Web Development Done Right", ], attrgetter("name"), ) qs = ( Book.objects.annotate(n_authors=Count("authors")).filter( Q(name="The Definitive Guide to Django: Web Development Done Right") | ( Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3) ) ) ).order_by("name") self.assertQuerySetEqual( qs, [ "The Definitive Guide to Django: Web Development Done Right", ], attrgetter("name"), ) qs = ( Publisher.objects.annotate( rating_sum=Sum("book__rating"), book_count=Count("book") ) .filter(Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)) .order_by("pk") ) self.assertQuerySetEqual( qs, [ "Apress", "Prentice Hall", "Jonno's House of Books", ], attrgetter("name"), ) qs = ( Publisher.objects.annotate( rating_sum=Sum("book__rating"), book_count=Count("book") ) .filter(Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None)) .order_by("num_awards") ) self.assertQuerySetEqual( qs, [ "Jonno's House of Books", "Sams", "Apress", "Prentice Hall", "Morgan Kaufmann", ], attrgetter("name"), ) def test_quoting_aggregate_order_by(self): qs = ( Book.objects.filter(name="Python Web Development with Django") .annotate(authorCount=Count("authors")) .order_by("authorCount") ) self.assertQuerySetEqual( qs, [ ("Python Web Development with Django", 3), ], lambda b: (b.name, b.authorCount), ) def test_stddev(self): self.assertEqual( Book.objects.aggregate(StdDev("pages")), {"pages__stddev": Approximate(311.46, 1)}, ) self.assertEqual( Book.objects.aggregate(StdDev("rating")), {"rating__stddev": Approximate(0.60, 1)}, ) self.assertEqual( Book.objects.aggregate(StdDev("price")), {"price__stddev": Approximate(Decimal("24.16"), 2)}, ) self.assertEqual( Book.objects.aggregate(StdDev("pages", sample=True)), {"pages__stddev": Approximate(341.19, 2)}, ) self.assertEqual( Book.objects.aggregate(StdDev("rating", sample=True)), {"rating__stddev": Approximate(0.66, 2)}, ) self.assertEqual( Book.objects.aggregate(StdDev("price", sample=True)), {"price__stddev": Approximate(Decimal("26.46"), 1)}, ) self.assertEqual( Book.objects.aggregate(Variance("pages")), {"pages__variance": Approximate(97010.80, 1)}, ) self.assertEqual( Book.objects.aggregate(Variance("rating")), {"rating__variance": Approximate(0.36, 1)}, ) self.assertEqual( Book.objects.aggregate(Variance("price")), {"price__variance": Approximate(Decimal("583.77"), 1)}, ) self.assertEqual( Book.objects.aggregate(Variance("pages", sample=True)), {"pages__variance": Approximate(116412.96, 1)}, ) self.assertEqual( Book.objects.aggregate(Variance("rating", sample=True)), {"rating__variance": Approximate(0.44, 2)}, ) self.assertEqual( Book.objects.aggregate(Variance("price", sample=True)), {"price__variance": Approximate(Decimal("700.53"), 2)}, ) def test_filtering_by_annotation_name(self): # Regression test for #14476 # The name of the explicitly provided annotation name in this case # poses no problem qs = ( Author.objects.annotate(book_cnt=Count("book")) .filter(book_cnt=2) .order_by("name") ) self.assertQuerySetEqual(qs, ["Peter Norvig"], lambda b: b.name) # Neither in this case qs = ( Author.objects.annotate(book_count=Count("book")) .filter(book_count=2) .order_by("name") ) self.assertQuerySetEqual(qs, ["Peter Norvig"], lambda b: b.name) # This case used to fail because the ORM couldn't resolve the # automatically generated annotation name `book__count` qs = ( Author.objects.annotate(Count("book")) .filter(book__count=2) .order_by("name") ) self.assertQuerySetEqual(qs, ["Peter Norvig"], lambda b: b.name) # Referencing the auto-generated name in an aggregate() also works. self.assertEqual( Author.objects.annotate(Count("book")).aggregate(Max("book__count")), {"book__count__max": 2}, ) def test_annotate_joins(self): """ The base table's join isn't promoted to LOUTER. This could cause the query generation to fail if there is an exclude() for fk-field in the query, too. Refs #19087. """ qs = Book.objects.annotate(n=Count("pk")) self.assertIs(qs.query.alias_map["aggregation_regress_book"].join_type, None) # The query executes without problems. self.assertEqual(len(qs.exclude(publisher=-1)), 6) @skipUnlessDBFeature("allows_group_by_selected_pks") def test_aggregate_duplicate_columns(self): # Regression test for #17144 results = Author.objects.annotate(num_contacts=Count("book_contact_set")) # There should only be one GROUP BY clause, for the `id` column. # `name` and `age` should not be grouped on. _, _, group_by = results.query.get_compiler(using="default").pre_sql_setup() self.assertEqual(len(group_by), 1) self.assertIn("id", group_by[0][0]) self.assertNotIn("name", group_by[0][0]) self.assertNotIn("age", group_by[0][0]) self.assertEqual( [(a.name, a.num_contacts) for a in results.order_by("name")], [ ("Adrian Holovaty", 1), ("Brad Dayley", 1), ("Jacob Kaplan-Moss", 0), ("James Bennett", 1), ("Jeffrey Forcier", 1), ("Paul Bissex", 0), ("Peter Norvig", 2), ("Stuart Russell", 0), ("Wesley J. Chun", 0), ], ) @skipUnlessDBFeature("allows_group_by_selected_pks") def test_aggregate_duplicate_columns_only(self): # Works with only() too. results = Author.objects.only("id", "name").annotate( num_contacts=Count("book_contact_set") ) _, _, grouping = results.query.get_compiler(using="default").pre_sql_setup() self.assertEqual(len(grouping), 1) self.assertIn("id", grouping[0][0]) self.assertNotIn("name", grouping[0][0]) self.assertNotIn("age", grouping[0][0]) self.assertEqual( [(a.name, a.num_contacts) for a in results.order_by("name")], [ ("Adrian Holovaty", 1), ("Brad Dayley", 1), ("Jacob Kaplan-Moss", 0), ("James Bennett", 1), ("Jeffrey Forcier", 1), ("Paul Bissex", 0), ("Peter Norvig", 2), ("Stuart Russell", 0), ("Wesley J. Chun", 0), ], ) @skipUnlessDBFeature("allows_group_by_selected_pks") def test_aggregate_duplicate_columns_select_related(self): # And select_related() results = Book.objects.select_related("contact").annotate( num_authors=Count("authors") ) _, _, grouping = results.query.get_compiler(using="default").pre_sql_setup() self.assertEqual(len(grouping), 2) self.assertIn("id", grouping[0][0]) self.assertNotIn("name", grouping[0][0]) self.assertNotIn("contact", grouping[0][0]) self.assertEqual( [(b.name, b.num_authors) for b in results.order_by("name")], [ ("Artificial Intelligence: A Modern Approach", 2), ( "Paradigms of Artificial Intelligence Programming: Case Studies in " "Common Lisp", 1, ), ("Practical Django Projects", 1), ("Python Web Development with Django", 3), ("Sams Teach Yourself Django in 24 Hours", 1), ("The Definitive Guide to Django: Web Development Done Right", 2), ], ) @skipUnlessDBFeature("allows_group_by_selected_pks") def test_aggregate_unmanaged_model_columns(self): """ Unmanaged models are sometimes used to represent database views which may not allow grouping by selected primary key. """ def assertQuerysetResults(queryset): self.assertEqual( [(b.name, b.num_authors) for b in queryset.order_by("name")], [ ("Artificial Intelligence: A Modern Approach", 2), ( "Paradigms of Artificial Intelligence Programming: Case " "Studies in Common Lisp", 1, ), ("Practical Django Projects", 1), ("Python Web Development with Django", 3), ("Sams Teach Yourself Django in 24 Hours", 1), ("The Definitive Guide to Django: Web Development Done Right", 2), ], ) queryset = Book.objects.select_related("contact").annotate( num_authors=Count("authors") ) # Unmanaged origin model. with mock.patch.object(Book._meta, "managed", False): _, _, grouping = queryset.query.get_compiler( using="default" ).pre_sql_setup() self.assertEqual(len(grouping), len(Book._meta.fields) + 1) for index, field in enumerate(Book._meta.fields): self.assertIn(field.name, grouping[index][0]) self.assertIn(Author._meta.pk.name, grouping[-1][0]) assertQuerysetResults(queryset) # Unmanaged related model. with mock.patch.object(Author._meta, "managed", False): _, _, grouping = queryset.query.get_compiler( using="default" ).pre_sql_setup() self.assertEqual(len(grouping), len(Author._meta.fields) + 1) self.assertIn(Book._meta.pk.name, grouping[0][0]) for index, field in enumerate(Author._meta.fields): self.assertIn(field.name, grouping[index + 1][0]) assertQuerysetResults(queryset) @skipUnlessDBFeature("allows_group_by_selected_pks") def test_aggregate_unmanaged_model_as_tables(self): qs = Book.objects.select_related("contact").annotate( num_authors=Count("authors") ) # Force treating unmanaged models as tables. with mock.patch( "django.db.connection.features.allows_group_by_selected_pks_on_model", return_value=True, ): with mock.patch.object(Book._meta, "managed", False), mock.patch.object( Author._meta, "managed", False ): _, _, grouping = qs.query.get_compiler(using="default").pre_sql_setup() self.assertEqual(len(grouping), 2) self.assertIn("id", grouping[0][0]) self.assertIn("id", grouping[1][0]) self.assertQuerySetEqual( qs.order_by("name"), [ ("Artificial Intelligence: A Modern Approach", 2), ( "Paradigms of Artificial Intelligence Programming: Case " "Studies in Common Lisp", 1, ), ("Practical Django Projects", 1), ("Python Web Development with Django", 3), ("Sams Teach Yourself Django in 24 Hours", 1), ( "The Definitive Guide to Django: Web Development Done " "Right", 2, ), ], attrgetter("name", "num_authors"), ) def test_reverse_join_trimming(self): qs = Author.objects.annotate(Count("book_contact_set__contact")) self.assertIn(" JOIN ", str(qs.query)) def test_aggregation_with_generic_reverse_relation(self): """ Regression test for #10870: Aggregates with joins ignore extra filters provided by setup_joins tests aggregations with generic reverse relations """ django_book = Book.objects.get(name="Practical Django Projects") ItemTag.objects.create( object_id=django_book.id, tag="intermediate", content_type=ContentType.objects.get_for_model(django_book), ) ItemTag.objects.create( object_id=django_book.id, tag="django", content_type=ContentType.objects.get_for_model(django_book), ) # Assign a tag to model with same PK as the book above. If the JOIN # used in aggregation doesn't have content type as part of the # condition the annotation will also count the 'hi mom' tag for b. wmpk = WithManualPK.objects.create(id=django_book.pk) ItemTag.objects.create( object_id=wmpk.id, tag="hi mom", content_type=ContentType.objects.get_for_model(wmpk), ) ai_book = Book.objects.get( name__startswith="Paradigms of Artificial Intelligence" ) ItemTag.objects.create( object_id=ai_book.id, tag="intermediate", content_type=ContentType.objects.get_for_model(ai_book), ) self.assertEqual(Book.objects.aggregate(Count("tags")), {"tags__count": 3}) results = Book.objects.annotate(Count("tags")).order_by("-tags__count", "name") self.assertEqual( [(b.name, b.tags__count) for b in results], [ ("Practical Django Projects", 2), ( "Paradigms of Artificial Intelligence Programming: Case Studies in " "Common Lisp", 1, ), ("Artificial Intelligence: A Modern Approach", 0), ("Python Web Development with Django", 0), ("Sams Teach Yourself Django in 24 Hours", 0), ("The Definitive Guide to Django: Web Development Done Right", 0), ], ) def test_negated_aggregation(self): expected_results = Author.objects.exclude( pk__in=Author.objects.annotate(book_cnt=Count("book")).filter(book_cnt=2) ).order_by("name") expected_results = [a.name for a in expected_results] qs = ( Author.objects.annotate(book_cnt=Count("book")) .exclude(Q(book_cnt=2), Q(book_cnt=2)) .order_by("name") ) self.assertQuerySetEqual(qs, expected_results, lambda b: b.name) expected_results = Author.objects.exclude( pk__in=Author.objects.annotate(book_cnt=Count("book")).filter(book_cnt=2) ).order_by("name") expected_results = [a.name for a in expected_results] qs = ( Author.objects.annotate(book_cnt=Count("book")) .exclude(Q(book_cnt=2) | Q(book_cnt=2)) .order_by("name") ) self.assertQuerySetEqual(qs, expected_results, lambda b: b.name) def test_name_filters(self): qs = ( Author.objects.annotate(Count("book")) .filter(Q(book__count__exact=2) | Q(name="Adrian Holovaty")) .order_by("name") ) self.assertQuerySetEqual( qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name ) def test_name_expressions(self): # Aggregates are spotted correctly from F objects. # Note that Adrian's age is 34 in the fixtures, and he has one book # so both conditions match one author. qs = ( Author.objects.annotate(Count("book")) .filter(Q(name="Peter Norvig") | Q(age=F("book__count") + 33)) .order_by("name") ) self.assertQuerySetEqual( qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name ) def test_filter_aggregates_or_connector(self): q1 = Q(price__gt=50) q2 = Q(authors__count__gt=1) query = Book.objects.annotate(Count("authors")).filter(q1 | q2).order_by("pk") self.assertQuerySetEqual( query, [self.b1.pk, self.b4.pk, self.b5.pk, self.b6.pk], attrgetter("pk"), ) def test_filter_aggregates_negated_and_connector(self): q1 = Q(price__gt=50) q2 = Q(authors__count__gt=1) query = ( Book.objects.annotate(Count("authors")).filter(~(q1 & q2)).order_by("pk") ) self.assertQuerySetEqual( query, [self.b1.pk, self.b2.pk, self.b3.pk, self.b4.pk, self.b6.pk], attrgetter("pk"), ) def test_filter_aggregates_xor_connector(self): q1 = Q(price__gt=50) q2 = Q(authors__count__gt=1) query = Book.objects.annotate(Count("authors")).filter(q1 ^ q2).order_by("pk") self.assertQuerySetEqual( query, [self.b1.pk, self.b4.pk, self.b6.pk], attrgetter("pk"), ) def test_filter_aggregates_negated_xor_connector(self): q1 = Q(price__gt=50) q2 = Q(authors__count__gt=1) query = ( Book.objects.annotate(Count("authors")).filter(~(q1 ^ q2)).order_by("pk") ) self.assertQuerySetEqual( query, [self.b2.pk, self.b3.pk, self.b5.pk], attrgetter("pk"), ) def test_ticket_11293_q_immutable(self): """ Splitting a q object to parts for where/having doesn't alter the original q-object. """ q1 = Q(isbn="") q2 = Q(authors__count__gt=1) query = Book.objects.annotate(Count("authors")) query.filter(q1 | q2) self.assertEqual(len(q2.children), 1) def test_fobj_group_by(self): """ An F() object referring to related column works correctly in group by. """ qs = Book.objects.annotate(account=Count("authors")).filter( account=F("publisher__num_awards") ) self.assertQuerySetEqual( qs, ["Sams Teach Yourself Django in 24 Hours"], lambda b: b.name ) def test_annotate_reserved_word(self): """ Regression #18333 - Ensure annotated column name is properly quoted. """ vals = Book.objects.annotate(select=Count("authors__id")).aggregate( Sum("select"), Avg("select") ) self.assertEqual( vals, { "select__sum": 10, "select__avg": Approximate(1.666, places=2), }, ) def test_annotate_on_relation(self): book = Book.objects.annotate( avg_price=Avg("price"), publisher_name=F("publisher__name") ).get(pk=self.b1.pk) self.assertEqual(book.avg_price, 30.00) self.assertEqual(book.publisher_name, "Apress") def test_aggregate_on_relation(self): # A query with an existing annotation aggregation on a relation should # succeed. qs = Book.objects.annotate(avg_price=Avg("price")).aggregate( publisher_awards=Sum("publisher__num_awards") ) self.assertEqual(qs["publisher_awards"], 30) def test_annotate_distinct_aggregate(self): # There are three books with rating of 4.0 and two of the books have # the same price. Hence, the distinct removes one rating of 4.0 # from the results. vals1 = ( Book.objects.values("rating", "price") .distinct() .aggregate(result=Sum("rating")) ) vals2 = Book.objects.aggregate(result=Sum("rating") - Value(4.0)) self.assertEqual(vals1, vals2) def test_annotate_values_list_flat(self): """Find ages that are shared by at least two authors.""" qs = ( Author.objects.values_list("age", flat=True) .annotate(age_count=Count("age")) .filter(age_count__gt=1) ) self.assertSequenceEqual(qs, [29]) def test_allow_distinct(self): class MyAggregate(Aggregate): pass with self.assertRaisesMessage(TypeError, "MyAggregate does not allow distinct"): MyAggregate("foo", distinct=True) class DistinctAggregate(Aggregate): allow_distinct = True DistinctAggregate("foo", distinct=True) @skipUnlessDBFeature("supports_subqueries_in_group_by") def test_having_subquery_select(self): authors = Author.objects.filter(pk=self.a1.pk) books = Book.objects.annotate(Count("authors")).filter( Q(authors__in=authors) | Q(authors__count__gt=2) ) self.assertEqual(set(books), {self.b1, self.b4}) class JoinPromotionTests(TestCase): def test_ticket_21150(self): b = Bravo.objects.create() c = Charlie.objects.create(bravo=b) qs = Charlie.objects.select_related("alfa").annotate(Count("bravo__charlie")) self.assertSequenceEqual(qs, [c]) self.assertIs(qs[0].alfa, None) a = Alfa.objects.create() c.alfa = a c.save() # Force re-evaluation qs = qs.all() self.assertSequenceEqual(qs, [c]) self.assertEqual(qs[0].alfa, a) def test_existing_join_not_promoted(self): # No promotion for existing joins qs = Charlie.objects.filter(alfa__name__isnull=False).annotate( Count("alfa__name") ) self.assertIn(" INNER JOIN ", str(qs.query)) # Also, the existing join is unpromoted when doing filtering for already # promoted join. qs = Charlie.objects.annotate(Count("alfa__name")).filter( alfa__name__isnull=False ) self.assertIn(" INNER JOIN ", str(qs.query)) # But, as the join is nullable first use by annotate will be LOUTER qs = Charlie.objects.annotate(Count("alfa__name")) self.assertIn(" LEFT OUTER JOIN ", str(qs.query)) def test_non_nullable_fk_not_promoted(self): qs = Book.objects.annotate(Count("contact__name")) self.assertIn(" INNER JOIN ", str(qs.query)) class SelfReferentialFKTests(TestCase): def test_ticket_24748(self): t1 = SelfRefFK.objects.create(name="t1") SelfRefFK.objects.create(name="t2", parent=t1) SelfRefFK.objects.create(name="t3", parent=t1) self.assertQuerySetEqual( SelfRefFK.objects.annotate(num_children=Count("children")).order_by("name"), [("t1", 2), ("t2", 0), ("t3", 0)], lambda x: (x.name, x.num_children), )
f9fc03a61eaa8da36abca4e140a2e319fcc02a9bca1651b1812ee1fc8fe2f517
import datetime from decimal import Decimal from django.core.exceptions import FieldDoesNotExist, FieldError from django.db.models import ( BooleanField, Case, CharField, Count, DateTimeField, DecimalField, Exists, ExpressionWrapper, F, FloatField, Func, IntegerField, Max, OuterRef, Q, Subquery, Sum, Value, When, ) from django.db.models.expressions import RawSQL from django.db.models.functions import ( Cast, Coalesce, ExtractYear, Floor, Length, Lower, Trim, ) from django.test import TestCase, skipUnlessDBFeature from django.test.utils import register_lookup from .models import ( Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket, ) class NonAggregateAnnotationTestCase(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34) cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35) cls.a3 = Author.objects.create(name="Brad Dayley", age=45) cls.a4 = Author.objects.create(name="James Bennett", age=29) cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37) cls.a6 = Author.objects.create(name="Paul Bissex", age=29) cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25) cls.a8 = Author.objects.create(name="Peter Norvig", age=57) cls.a9 = Author.objects.create(name="Stuart Russell", age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name="Apress", num_awards=3) cls.p2 = Publisher.objects.create(name="Sams", num_awards=1) cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7) cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn="159059725", name="The Definitive Guide to Django: Web Development Done Right", pages=447, rating=4.5, price=Decimal("30.00"), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6), ) cls.b2 = Book.objects.create( isbn="067232959", name="Sams Teach Yourself Django in 24 Hours", pages=528, rating=3.0, price=Decimal("23.09"), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3), ) cls.b3 = Book.objects.create( isbn="159059996", name="Practical Django Projects", pages=300, rating=4.0, price=Decimal("29.69"), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23), ) cls.b4 = Book.objects.create( isbn="013235613", name="Python Web Development with Django", pages=350, rating=4.0, price=Decimal("29.69"), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3), ) cls.b5 = Book.objects.create( isbn="013790395", name="Artificial Intelligence: A Modern Approach", pages=1132, rating=4.0, price=Decimal("82.80"), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15), ) cls.b6 = Book.objects.create( isbn="155860191", name=( "Paradigms of Artificial Intelligence Programming: Case Studies in " "Common Lisp" ), pages=946, rating=5.0, price=Decimal("75.00"), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15), ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) cls.s1 = Store.objects.create( name="Amazon.com", original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59), ) cls.s2 = Store.objects.create( name="Books.com", original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59), ) cls.s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30), ) cls.s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) cls.s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) cls.s3.books.add(cls.b3, cls.b4, cls.b6) def test_basic_annotation(self): books = Book.objects.annotate(is_book=Value(1)) for book in books: self.assertEqual(book.is_book, 1) def test_basic_f_annotation(self): books = Book.objects.annotate(another_rating=F("rating")) for book in books: self.assertEqual(book.another_rating, book.rating) def test_joined_annotation(self): books = Book.objects.select_related("publisher").annotate( num_awards=F("publisher__num_awards") ) for book in books: self.assertEqual(book.num_awards, book.publisher.num_awards) def test_joined_transformed_annotation(self): Employee.objects.bulk_create( [ Employee( first_name="John", last_name="Doe", age=18, store=self.s1, salary=15000, ), Employee( first_name="Jane", last_name="Jones", age=30, store=self.s2, salary=30000, ), Employee( first_name="Jo", last_name="Smith", age=55, store=self.s3, salary=50000, ), ] ) employees = Employee.objects.annotate( store_opened_year=F("store__original_opening__year"), ) for employee in employees: self.assertEqual( employee.store_opened_year, employee.store.original_opening.year, ) def test_custom_transform_annotation(self): with register_lookup(DecimalField, Floor): books = Book.objects.annotate(floor_price=F("price__floor")) self.assertCountEqual( books.values_list("pk", "floor_price"), [ (self.b1.pk, 30), (self.b2.pk, 23), (self.b3.pk, 29), (self.b4.pk, 29), (self.b5.pk, 82), (self.b6.pk, 75), ], ) def test_chaining_transforms(self): Company.objects.create(name=" Django Software Foundation ") Company.objects.create(name="Yahoo") with register_lookup(CharField, Trim), register_lookup(CharField, Length): for expr in [Length("name__trim"), F("name__trim__length")]: with self.subTest(expr=expr): self.assertCountEqual( Company.objects.annotate(length=expr).values("name", "length"), [ {"name": " Django Software Foundation ", "length": 26}, {"name": "Yahoo", "length": 5}, ], ) def test_mixed_type_annotation_date_interval(self): active = datetime.datetime(2015, 3, 20, 14, 0, 0) duration = datetime.timedelta(hours=1) expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration Ticket.objects.create(active_at=active, duration=duration) t = Ticket.objects.annotate( expires=ExpressionWrapper( F("active_at") + F("duration"), output_field=DateTimeField() ) ).first() self.assertEqual(t.expires, expires) def test_mixed_type_annotation_numbers(self): test = self.b1 b = Book.objects.annotate( combined=ExpressionWrapper( F("pages") + F("rating"), output_field=IntegerField() ) ).get(isbn=test.isbn) combined = int(test.pages + test.rating) self.assertEqual(b.combined, combined) def test_empty_expression_annotation(self): books = Book.objects.annotate( selected=ExpressionWrapper(Q(pk__in=[]), output_field=BooleanField()) ) self.assertEqual(len(books), Book.objects.count()) self.assertTrue(all(not book.selected for book in books)) books = Book.objects.annotate( selected=ExpressionWrapper( Q(pk__in=Book.objects.none()), output_field=BooleanField() ) ) self.assertEqual(len(books), Book.objects.count()) self.assertTrue(all(not book.selected for book in books)) def test_full_expression_annotation(self): books = Book.objects.annotate( selected=ExpressionWrapper(~Q(pk__in=[]), output_field=BooleanField()), ) self.assertEqual(len(books), Book.objects.count()) self.assertTrue(all(book.selected for book in books)) def test_full_expression_wrapped_annotation(self): books = Book.objects.annotate( selected=Coalesce(~Q(pk__in=[]), True), ) self.assertEqual(len(books), Book.objects.count()) self.assertTrue(all(book.selected for book in books)) def test_full_expression_annotation_with_aggregation(self): qs = Book.objects.filter(isbn="159059725").annotate( selected=ExpressionWrapper(~Q(pk__in=[]), output_field=BooleanField()), rating_count=Count("rating"), ) self.assertEqual([book.rating_count for book in qs], [1]) def test_aggregate_over_full_expression_annotation(self): qs = Book.objects.annotate( selected=ExpressionWrapper(~Q(pk__in=[]), output_field=BooleanField()), ).aggregate(selected__sum=Sum(Cast("selected", IntegerField()))) self.assertEqual(qs["selected__sum"], Book.objects.count()) def test_empty_queryset_annotation(self): qs = Author.objects.annotate(empty=Subquery(Author.objects.values("id").none())) self.assertIsNone(qs.first().empty) def test_annotate_with_aggregation(self): books = Book.objects.annotate(is_book=Value(1), rating_count=Count("rating")) for book in books: self.assertEqual(book.is_book, 1) self.assertEqual(book.rating_count, 1) def test_combined_expression_annotation_with_aggregation(self): book = Book.objects.annotate( combined=ExpressionWrapper( Value(3) * Value(4), output_field=IntegerField() ), rating_count=Count("rating"), ).first() self.assertEqual(book.combined, 12) self.assertEqual(book.rating_count, 1) def test_combined_f_expression_annotation_with_aggregation(self): book = ( Book.objects.filter(isbn="159059725") .annotate( combined=ExpressionWrapper( F("price") * F("pages"), output_field=FloatField() ), rating_count=Count("rating"), ) .first() ) self.assertEqual(book.combined, 13410.0) self.assertEqual(book.rating_count, 1) @skipUnlessDBFeature("supports_boolean_expr_in_select_clause") def test_q_expression_annotation_with_aggregation(self): book = ( Book.objects.filter(isbn="159059725") .annotate( isnull_pubdate=ExpressionWrapper( Q(pubdate__isnull=True), output_field=BooleanField(), ), rating_count=Count("rating"), ) .first() ) self.assertIs(book.isnull_pubdate, False) self.assertEqual(book.rating_count, 1) @skipUnlessDBFeature("supports_boolean_expr_in_select_clause") def test_grouping_by_q_expression_annotation(self): authors = ( Author.objects.annotate( under_40=ExpressionWrapper(Q(age__lt=40), output_field=BooleanField()), ) .values("under_40") .annotate( count_id=Count("id"), ) .values("under_40", "count_id") ) self.assertCountEqual( authors, [ {"under_40": False, "count_id": 3}, {"under_40": True, "count_id": 6}, ], ) def test_aggregate_over_annotation(self): agg = Author.objects.annotate(other_age=F("age")).aggregate( otherage_sum=Sum("other_age") ) other_agg = Author.objects.aggregate(age_sum=Sum("age")) self.assertEqual(agg["otherage_sum"], other_agg["age_sum"]) @skipUnlessDBFeature("can_distinct_on_fields") def test_distinct_on_with_annotation(self): store = Store.objects.create( name="test store", original_opening=datetime.datetime.now(), friday_night_closing=datetime.time(21, 00, 00), ) names = [ "Theodore Roosevelt", "Eleanor Roosevelt", "Franklin Roosevelt", "Ned Stark", "Catelyn Stark", ] for name in names: Employee.objects.create( store=store, first_name=name.split()[0], last_name=name.split()[1], age=30, salary=2000, ) people = Employee.objects.annotate( name_lower=Lower("last_name"), ).distinct("name_lower") self.assertEqual({p.last_name for p in people}, {"Stark", "Roosevelt"}) self.assertEqual(len(people), 2) people2 = Employee.objects.annotate( test_alias=F("store__name"), ).distinct("test_alias") self.assertEqual(len(people2), 1) lengths = ( Employee.objects.annotate( name_len=Length("first_name"), ) .distinct("name_len") .values_list("name_len", flat=True) ) self.assertCountEqual(lengths, [3, 7, 8]) def test_filter_annotation(self): books = Book.objects.annotate(is_book=Value(1)).filter(is_book=1) for book in books: self.assertEqual(book.is_book, 1) def test_filter_annotation_with_f(self): books = Book.objects.annotate(other_rating=F("rating")).filter(other_rating=3.5) for book in books: self.assertEqual(book.other_rating, 3.5) def test_filter_annotation_with_double_f(self): books = Book.objects.annotate(other_rating=F("rating")).filter( other_rating=F("rating") ) for book in books: self.assertEqual(book.other_rating, book.rating) def test_filter_agg_with_double_f(self): books = Book.objects.annotate(sum_rating=Sum("rating")).filter( sum_rating=F("sum_rating") ) for book in books: self.assertEqual(book.sum_rating, book.rating) def test_filter_wrong_annotation(self): with self.assertRaisesMessage( FieldError, "Cannot resolve keyword 'nope' into field." ): list( Book.objects.annotate(sum_rating=Sum("rating")).filter( sum_rating=F("nope") ) ) def test_decimal_annotation(self): salary = Decimal(10) ** -Employee._meta.get_field("salary").decimal_places Employee.objects.create( first_name="Max", last_name="Paine", store=Store.objects.first(), age=23, salary=salary, ) self.assertEqual( Employee.objects.annotate(new_salary=F("salary") / 10).get().new_salary, salary / 10, ) def test_filter_decimal_annotation(self): qs = ( Book.objects.annotate(new_price=F("price") + 1) .filter(new_price=Decimal(31)) .values_list("new_price") ) self.assertEqual(qs.get(), (Decimal(31),)) def test_combined_annotation_commutative(self): book1 = Book.objects.annotate(adjusted_rating=F("rating") + 2).get( pk=self.b1.pk ) book2 = Book.objects.annotate(adjusted_rating=2 + F("rating")).get( pk=self.b1.pk ) self.assertEqual(book1.adjusted_rating, book2.adjusted_rating) book1 = Book.objects.annotate(adjusted_rating=F("rating") + None).get( pk=self.b1.pk ) book2 = Book.objects.annotate(adjusted_rating=None + F("rating")).get( pk=self.b1.pk ) self.assertIs(book1.adjusted_rating, None) self.assertEqual(book1.adjusted_rating, book2.adjusted_rating) def test_update_with_annotation(self): book_preupdate = Book.objects.get(pk=self.b2.pk) Book.objects.annotate(other_rating=F("rating") - 1).update( rating=F("other_rating") ) book_postupdate = Book.objects.get(pk=self.b2.pk) self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating) def test_annotation_with_m2m(self): books = ( Book.objects.annotate(author_age=F("authors__age")) .filter(pk=self.b1.pk) .order_by("author_age") ) self.assertEqual(books[0].author_age, 34) self.assertEqual(books[1].author_age, 35) def test_annotation_reverse_m2m(self): books = ( Book.objects.annotate( store_name=F("store__name"), ) .filter( name="Practical Django Projects", ) .order_by("store_name") ) self.assertQuerySetEqual( books, ["Amazon.com", "Books.com", "Mamma and Pappa's Books"], lambda b: b.store_name, ) def test_values_annotation(self): """ Annotations can reference fields in a values clause, and contribute to an existing values clause. """ # annotate references a field in values() qs = Book.objects.values("rating").annotate(other_rating=F("rating") - 1) book = qs.get(pk=self.b1.pk) self.assertEqual(book["rating"] - 1, book["other_rating"]) # filter refs the annotated value book = qs.get(other_rating=4) self.assertEqual(book["other_rating"], 4) # can annotate an existing values with a new field book = qs.annotate(other_isbn=F("isbn")).get(other_rating=4) self.assertEqual(book["other_rating"], 4) self.assertEqual(book["other_isbn"], "155860191") def test_values_with_pk_annotation(self): # annotate references a field in values() with pk publishers = Publisher.objects.values("id", "book__rating").annotate( total=Sum("book__rating") ) for publisher in publishers.filter(pk=self.p1.pk): self.assertEqual(publisher["book__rating"], publisher["total"]) def test_defer_annotation(self): """ Deferred attributes can be referenced by an annotation, but they are not themselves deferred, and cannot be deferred. """ qs = Book.objects.defer("rating").annotate(other_rating=F("rating") - 1) with self.assertNumQueries(2): book = qs.get(other_rating=4) self.assertEqual(book.rating, 5) self.assertEqual(book.other_rating, 4) with self.assertRaisesMessage( FieldDoesNotExist, "Book has no field named 'other_rating'" ): book = qs.defer("other_rating").get(other_rating=4) def test_mti_annotations(self): """ Fields on an inherited model can be referenced by an annotated field. """ d = DepartmentStore.objects.create( name="Angus & Robinson", original_opening=datetime.date(2014, 3, 8), friday_night_closing=datetime.time(21, 00, 00), chain="Westfield", ) books = Book.objects.filter(rating__gt=4) for b in books: d.books.add(b) qs = ( DepartmentStore.objects.annotate( other_name=F("name"), other_chain=F("chain"), is_open=Value(True, BooleanField()), book_isbn=F("books__isbn"), ) .order_by("book_isbn") .filter(chain="Westfield") ) self.assertQuerySetEqual( qs, [ ("Angus & Robinson", "Westfield", True, "155860191"), ("Angus & Robinson", "Westfield", True, "159059725"), ], lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn), ) def test_null_annotation(self): """ Annotating None onto a model round-trips """ book = Book.objects.annotate( no_value=Value(None, output_field=IntegerField()) ).first() self.assertIsNone(book.no_value) def test_order_by_annotation(self): authors = Author.objects.annotate(other_age=F("age")).order_by("other_age") self.assertQuerySetEqual( authors, [ 25, 29, 29, 34, 35, 37, 45, 46, 57, ], lambda a: a.other_age, ) def test_order_by_aggregate(self): authors = ( Author.objects.values("age") .annotate(age_count=Count("age")) .order_by("age_count", "age") ) self.assertQuerySetEqual( authors, [ (25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2), ], lambda a: (a["age"], a["age_count"]), ) def test_raw_sql_with_inherited_field(self): DepartmentStore.objects.create( name="Angus & Robinson", original_opening=datetime.date(2014, 3, 8), friday_night_closing=datetime.time(21), chain="Westfield", area=123, ) tests = ( ("name", "Angus & Robinson"), ("surface", 123), ("case when name='Angus & Robinson' then chain else name end", "Westfield"), ) for sql, expected_result in tests: with self.subTest(sql=sql): self.assertSequenceEqual( DepartmentStore.objects.annotate( annotation=RawSQL(sql, ()), ).values_list("annotation", flat=True), [expected_result], ) def test_annotate_exists(self): authors = Author.objects.annotate(c=Count("id")).filter(c__gt=1) self.assertFalse(authors.exists()) def test_column_field_ordering(self): """ Columns are aligned in the correct order for resolve_columns. This test will fail on MySQL if column ordering is out. Column fields should be aligned as: 1. extra_select 2. model_fields 3. annotation_fields 4. model_related_fields """ store = Store.objects.first() Employee.objects.create( id=1, first_name="Max", manager=True, last_name="Paine", store=store, age=23, salary=Decimal(50000.00), ) Employee.objects.create( id=2, first_name="Buffy", manager=False, last_name="Summers", store=store, age=18, salary=Decimal(40000.00), ) qs = ( Employee.objects.extra(select={"random_value": "42"}) .select_related("store") .annotate( annotated_value=Value(17), ) ) rows = [ (1, "Max", True, 42, "Paine", 23, Decimal(50000.00), store.name, 17), (2, "Buffy", False, 42, "Summers", 18, Decimal(40000.00), store.name, 17), ] self.assertQuerySetEqual( qs.order_by("id"), rows, lambda e: ( e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age, e.salary, e.store.name, e.annotated_value, ), ) def test_column_field_ordering_with_deferred(self): store = Store.objects.first() Employee.objects.create( id=1, first_name="Max", manager=True, last_name="Paine", store=store, age=23, salary=Decimal(50000.00), ) Employee.objects.create( id=2, first_name="Buffy", manager=False, last_name="Summers", store=store, age=18, salary=Decimal(40000.00), ) qs = ( Employee.objects.extra(select={"random_value": "42"}) .select_related("store") .annotate( annotated_value=Value(17), ) ) rows = [ (1, "Max", True, 42, "Paine", 23, Decimal(50000.00), store.name, 17), (2, "Buffy", False, 42, "Summers", 18, Decimal(40000.00), store.name, 17), ] # and we respect deferred columns! self.assertQuerySetEqual( qs.defer("age").order_by("id"), rows, lambda e: ( e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age, e.salary, e.store.name, e.annotated_value, ), ) def test_custom_functions(self): Company( name="Apple", motto=None, ticker_name="APPL", description="Beautiful Devices", ).save() Company( name="Django Software Foundation", motto=None, ticker_name=None, description=None, ).save() Company( name="Google", motto="Do No Evil", ticker_name="GOOG", description="Internet Company", ).save() Company( name="Yahoo", motto=None, ticker_name=None, description="Internet Company" ).save() qs = Company.objects.annotate( tagline=Func( F("motto"), F("ticker_name"), F("description"), Value("No Tag"), function="COALESCE", ) ).order_by("name") self.assertQuerySetEqual( qs, [ ("Apple", "APPL"), ("Django Software Foundation", "No Tag"), ("Google", "Do No Evil"), ("Yahoo", "Internet Company"), ], lambda c: (c.name, c.tagline), ) def test_custom_functions_can_ref_other_functions(self): Company( name="Apple", motto=None, ticker_name="APPL", description="Beautiful Devices", ).save() Company( name="Django Software Foundation", motto=None, ticker_name=None, description=None, ).save() Company( name="Google", motto="Do No Evil", ticker_name="GOOG", description="Internet Company", ).save() Company( name="Yahoo", motto=None, ticker_name=None, description="Internet Company" ).save() class Lower(Func): function = "LOWER" qs = ( Company.objects.annotate( tagline=Func( F("motto"), F("ticker_name"), F("description"), Value("No Tag"), function="COALESCE", ) ) .annotate( tagline_lower=Lower(F("tagline")), ) .order_by("name") ) # LOWER function supported by: # oracle, postgres, mysql, sqlite, sqlserver self.assertQuerySetEqual( qs, [ ("Apple", "APPL".lower()), ("Django Software Foundation", "No Tag".lower()), ("Google", "Do No Evil".lower()), ("Yahoo", "Internet Company".lower()), ], lambda c: (c.name, c.tagline_lower), ) def test_boolean_value_annotation(self): books = Book.objects.annotate( is_book=Value(True, output_field=BooleanField()), is_pony=Value(False, output_field=BooleanField()), is_none=Value(None, output_field=BooleanField(null=True)), ) self.assertGreater(len(books), 0) for book in books: self.assertIs(book.is_book, True) self.assertIs(book.is_pony, False) self.assertIsNone(book.is_none) def test_annotation_in_f_grouped_by_annotation(self): qs = ( Publisher.objects.annotate(multiplier=Value(3)) # group by option => sum of value * multiplier .values("name") .annotate(multiplied_value_sum=Sum(F("multiplier") * F("num_awards"))) .order_by() ) self.assertCountEqual( qs, [ {"multiplied_value_sum": 9, "name": "Apress"}, {"multiplied_value_sum": 0, "name": "Jonno's House of Books"}, {"multiplied_value_sum": 27, "name": "Morgan Kaufmann"}, {"multiplied_value_sum": 21, "name": "Prentice Hall"}, {"multiplied_value_sum": 3, "name": "Sams"}, ], ) def test_arguments_must_be_expressions(self): msg = "QuerySet.annotate() received non-expression(s): %s." with self.assertRaisesMessage(TypeError, msg % BooleanField()): Book.objects.annotate(BooleanField()) with self.assertRaisesMessage(TypeError, msg % True): Book.objects.annotate(is_book=True) with self.assertRaisesMessage( TypeError, msg % ", ".join([str(BooleanField()), "True"]) ): Book.objects.annotate(BooleanField(), Value(False), is_book=True) def test_chaining_annotation_filter_with_m2m(self): qs = ( Author.objects.filter( name="Adrian Holovaty", friends__age=35, ) .annotate( jacob_name=F("friends__name"), ) .filter( friends__age=29, ) .annotate( james_name=F("friends__name"), ) .values("jacob_name", "james_name") ) self.assertCountEqual( qs, [{"jacob_name": "Jacob Kaplan-Moss", "james_name": "James Bennett"}], ) def test_annotation_filter_with_subquery(self): long_books_qs = ( Book.objects.filter( publisher=OuterRef("pk"), pages__gt=400, ) .values("publisher") .annotate(count=Count("pk")) .values("count") ) publisher_books_qs = ( Publisher.objects.annotate( total_books=Count("book"), ) .filter( total_books=Subquery(long_books_qs, output_field=IntegerField()), ) .values("name") ) self.assertCountEqual( publisher_books_qs, [{"name": "Sams"}, {"name": "Morgan Kaufmann"}] ) def test_annotation_and_alias_filter_in_subquery(self): awarded_publishers_qs = ( Publisher.objects.filter(num_awards__gt=4) .annotate(publisher_annotate=Value(1)) .alias(publisher_alias=Value(1)) ) qs = Publisher.objects.filter(pk__in=awarded_publishers_qs) self.assertCountEqual(qs, [self.p3, self.p4]) def test_annotation_and_alias_filter_related_in_subquery(self): long_books_qs = ( Book.objects.filter(pages__gt=400) .annotate(book_annotate=Value(1)) .alias(book_alias=Value(1)) ) publisher_books_qs = Publisher.objects.filter( book__in=long_books_qs, ).values("name") self.assertCountEqual( publisher_books_qs, [ {"name": "Apress"}, {"name": "Sams"}, {"name": "Prentice Hall"}, {"name": "Morgan Kaufmann"}, ], ) def test_annotation_exists_aggregate_values_chaining(self): qs = ( Book.objects.values("publisher") .annotate( has_authors=Exists( Book.authors.through.objects.filter(book=OuterRef("pk")) ), max_pubdate=Max("pubdate"), ) .values_list("max_pubdate", flat=True) .order_by("max_pubdate") ) self.assertCountEqual( qs, [ datetime.date(1991, 10, 15), datetime.date(2008, 3, 3), datetime.date(2008, 6, 23), datetime.date(2008, 11, 3), ], ) @skipUnlessDBFeature("supports_subqueries_in_group_by") def test_annotation_subquery_and_aggregate_values_chaining(self): qs = ( Book.objects.annotate(pub_year=ExtractYear("pubdate")) .values("pub_year") .annotate( top_rating=Subquery( Book.objects.filter(pubdate__year=OuterRef("pub_year")) .order_by("-rating") .values("rating")[:1] ), total_pages=Sum("pages"), ) .values("pub_year", "total_pages", "top_rating") ) self.assertCountEqual( qs, [ {"pub_year": 1991, "top_rating": 5.0, "total_pages": 946}, {"pub_year": 1995, "top_rating": 4.0, "total_pages": 1132}, {"pub_year": 2007, "top_rating": 4.5, "total_pages": 447}, {"pub_year": 2008, "top_rating": 4.0, "total_pages": 1178}, ], ) def test_annotation_subquery_outerref_transform(self): qs = Book.objects.annotate( top_rating_year=Subquery( Book.objects.filter(pubdate__year=OuterRef("pubdate__year")) .order_by("-rating") .values("rating")[:1] ), ).values("pubdate__year", "top_rating_year") self.assertCountEqual( qs, [ {"pubdate__year": 1991, "top_rating_year": 5.0}, {"pubdate__year": 1995, "top_rating_year": 4.0}, {"pubdate__year": 2007, "top_rating_year": 4.5}, {"pubdate__year": 2008, "top_rating_year": 4.0}, {"pubdate__year": 2008, "top_rating_year": 4.0}, {"pubdate__year": 2008, "top_rating_year": 4.0}, ], ) def test_annotation_aggregate_with_m2o(self): qs = ( Author.objects.filter(age__lt=30) .annotate( max_pages=Case( When(book_contact_set__isnull=True, then=Value(0)), default=Max(F("book__pages")), ), ) .values("name", "max_pages") ) self.assertCountEqual( qs, [ {"name": "James Bennett", "max_pages": 300}, {"name": "Paul Bissex", "max_pages": 0}, {"name": "Wesley J. Chun", "max_pages": 0}, ], ) def test_alias_sql_injection(self): crafted_alias = """injected_name" from "annotations_book"; --""" msg = ( "Column aliases cannot contain whitespace characters, quotation marks, " "semicolons, or SQL comments." ) with self.assertRaisesMessage(ValueError, msg): Book.objects.annotate(**{crafted_alias: Value(1)}) def test_alias_forbidden_chars(self): tests = [ 'al"ias', "a'lias", "ali`as", "alia s", "alias\t", "ali\nas", "alias--", "ali/*as", "alias*/", "alias;", # [] are used by MSSQL. "alias[", "alias]", ] msg = ( "Column aliases cannot contain whitespace characters, quotation marks, " "semicolons, or SQL comments." ) for crafted_alias in tests: with self.subTest(crafted_alias): with self.assertRaisesMessage(ValueError, msg): Book.objects.annotate(**{crafted_alias: Value(1)}) class AliasTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34) cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35) cls.a3 = Author.objects.create(name="James Bennett", age=34) cls.a4 = Author.objects.create(name="Peter Norvig", age=57) cls.a5 = Author.objects.create(name="Stuart Russell", age=46) p1 = Publisher.objects.create(name="Apress", num_awards=3) cls.b1 = Book.objects.create( isbn="159059725", pages=447, rating=4.5, price=Decimal("30.00"), contact=cls.a1, publisher=p1, pubdate=datetime.date(2007, 12, 6), name="The Definitive Guide to Django: Web Development Done Right", ) cls.b2 = Book.objects.create( isbn="159059996", pages=300, rating=4.0, price=Decimal("29.69"), contact=cls.a3, publisher=p1, pubdate=datetime.date(2008, 6, 23), name="Practical Django Projects", ) cls.b3 = Book.objects.create( isbn="013790395", pages=1132, rating=4.0, price=Decimal("82.80"), contact=cls.a4, publisher=p1, pubdate=datetime.date(1995, 1, 15), name="Artificial Intelligence: A Modern Approach", ) cls.b4 = Book.objects.create( isbn="155860191", pages=946, rating=5.0, price=Decimal("75.00"), contact=cls.a4, publisher=p1, pubdate=datetime.date(1991, 10, 15), name=( "Paradigms of Artificial Intelligence Programming: Case Studies in " "Common Lisp" ), ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4, cls.a5) cls.b4.authors.add(cls.a4) Store.objects.create( name="Amazon.com", original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59), ) Store.objects.create( name="Books.com", original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59), ) def test_basic_alias(self): qs = Book.objects.alias(is_book=Value(1)) self.assertIs(hasattr(qs.first(), "is_book"), False) def test_basic_alias_annotation(self): qs = Book.objects.alias( is_book_alias=Value(1), ).annotate(is_book=F("is_book_alias")) self.assertIs(hasattr(qs.first(), "is_book_alias"), False) for book in qs: with self.subTest(book=book): self.assertEqual(book.is_book, 1) def test_basic_alias_f_annotation(self): qs = Book.objects.alias(another_rating_alias=F("rating")).annotate( another_rating=F("another_rating_alias") ) self.assertIs(hasattr(qs.first(), "another_rating_alias"), False) for book in qs: with self.subTest(book=book): self.assertEqual(book.another_rating, book.rating) def test_basic_alias_f_transform_annotation(self): qs = Book.objects.alias( pubdate_alias=F("pubdate"), ).annotate(pubdate_year=F("pubdate_alias__year")) self.assertIs(hasattr(qs.first(), "pubdate_alias"), False) for book in qs: with self.subTest(book=book): self.assertEqual(book.pubdate_year, book.pubdate.year) def test_alias_after_annotation(self): qs = Book.objects.annotate( is_book=Value(1), ).alias(is_book_alias=F("is_book")) book = qs.first() self.assertIs(hasattr(book, "is_book"), True) self.assertIs(hasattr(book, "is_book_alias"), False) def test_overwrite_annotation_with_alias(self): qs = Book.objects.annotate(is_book=Value(1)).alias(is_book=F("is_book")) self.assertIs(hasattr(qs.first(), "is_book"), False) def test_overwrite_alias_with_annotation(self): qs = Book.objects.alias(is_book=Value(1)).annotate(is_book=F("is_book")) for book in qs: with self.subTest(book=book): self.assertEqual(book.is_book, 1) def test_alias_annotation_expression(self): qs = Book.objects.alias( is_book_alias=Value(1), ).annotate(is_book=Coalesce("is_book_alias", 0)) self.assertIs(hasattr(qs.first(), "is_book_alias"), False) for book in qs: with self.subTest(book=book): self.assertEqual(book.is_book, 1) def test_alias_default_alias_expression(self): qs = Author.objects.alias( Sum("book__pages"), ).filter(book__pages__sum__gt=2000) self.assertIs(hasattr(qs.first(), "book__pages__sum"), False) self.assertSequenceEqual(qs, [self.a4]) def test_joined_alias_annotation(self): qs = ( Book.objects.select_related("publisher") .alias( num_awards_alias=F("publisher__num_awards"), ) .annotate(num_awards=F("num_awards_alias")) ) self.assertIs(hasattr(qs.first(), "num_awards_alias"), False) for book in qs: with self.subTest(book=book): self.assertEqual(book.num_awards, book.publisher.num_awards) def test_alias_annotate_with_aggregation(self): qs = Book.objects.alias( is_book_alias=Value(1), rating_count_alias=Count("rating"), ).annotate( is_book=F("is_book_alias"), rating_count=F("rating_count_alias"), ) book = qs.first() self.assertIs(hasattr(book, "is_book_alias"), False) self.assertIs(hasattr(book, "rating_count_alias"), False) for book in qs: with self.subTest(book=book): self.assertEqual(book.is_book, 1) self.assertEqual(book.rating_count, 1) def test_filter_alias_with_f(self): qs = Book.objects.alias( other_rating=F("rating"), ).filter(other_rating=4.5) self.assertIs(hasattr(qs.first(), "other_rating"), False) self.assertSequenceEqual(qs, [self.b1]) def test_filter_alias_with_double_f(self): qs = Book.objects.alias( other_rating=F("rating"), ).filter(other_rating=F("rating")) self.assertIs(hasattr(qs.first(), "other_rating"), False) self.assertEqual(qs.count(), Book.objects.count()) def test_filter_alias_agg_with_double_f(self): qs = Book.objects.alias( sum_rating=Sum("rating"), ).filter(sum_rating=F("sum_rating")) self.assertIs(hasattr(qs.first(), "sum_rating"), False) self.assertEqual(qs.count(), Book.objects.count()) def test_update_with_alias(self): Book.objects.alias( other_rating=F("rating") - 1, ).update(rating=F("other_rating")) self.b1.refresh_from_db() self.assertEqual(self.b1.rating, 3.5) def test_order_by_alias(self): qs = Author.objects.alias(other_age=F("age")).order_by("other_age") self.assertIs(hasattr(qs.first(), "other_age"), False) self.assertQuerySetEqual(qs, [34, 34, 35, 46, 57], lambda a: a.age) def test_order_by_alias_aggregate(self): qs = ( Author.objects.values("age") .alias(age_count=Count("age")) .order_by("age_count", "age") ) self.assertIs(hasattr(qs.first(), "age_count"), False) self.assertQuerySetEqual(qs, [35, 46, 57, 34], lambda a: a["age"]) def test_dates_alias(self): qs = Book.objects.alias( pubdate_alias=F("pubdate"), ).dates("pubdate_alias", "month") self.assertCountEqual( qs, [ datetime.date(1991, 10, 1), datetime.date(1995, 1, 1), datetime.date(2007, 12, 1), datetime.date(2008, 6, 1), ], ) def test_datetimes_alias(self): qs = Store.objects.alias( original_opening_alias=F("original_opening"), ).datetimes("original_opening_alias", "year") self.assertCountEqual( qs, [ datetime.datetime(1994, 1, 1), datetime.datetime(2001, 1, 1), ], ) def test_aggregate_alias(self): msg = ( "Cannot aggregate over the 'other_age' alias. Use annotate() to promote it." ) with self.assertRaisesMessage(FieldError, msg): Author.objects.alias( other_age=F("age"), ).aggregate(otherage_sum=Sum("other_age")) def test_defer_only_alias(self): qs = Book.objects.alias(rating_alias=F("rating") - 1) msg = "Book has no field named 'rating_alias'" for operation in ["defer", "only"]: with self.subTest(operation=operation): with self.assertRaisesMessage(FieldDoesNotExist, msg): getattr(qs, operation)("rating_alias").first() @skipUnlessDBFeature("can_distinct_on_fields") def test_distinct_on_alias(self): qs = Book.objects.alias(rating_alias=F("rating") - 1) msg = "Cannot resolve keyword 'rating_alias' into field." with self.assertRaisesMessage(FieldError, msg): qs.distinct("rating_alias").first() def test_values_alias(self): qs = Book.objects.alias(rating_alias=F("rating") - 1) msg = "Cannot select the 'rating_alias' alias. Use annotate() to promote it." for operation in ["values", "values_list"]: with self.subTest(operation=operation): with self.assertRaisesMessage(FieldError, msg): getattr(qs, operation)("rating_alias") def test_alias_sql_injection(self): crafted_alias = """injected_name" from "annotations_book"; --""" msg = ( "Column aliases cannot contain whitespace characters, quotation marks, " "semicolons, or SQL comments." ) with self.assertRaisesMessage(ValueError, msg): Book.objects.alias(**{crafted_alias: Value(1)})
0ed1532ad2fcfa6b15376d210c6239e8a6d963cdaf30b33a8b9ce76bbb783db5
import json from django.contrib.contenttypes.fields import GenericForeignKey from django.db import models from django.test import TestCase from django.test.utils import isolate_apps from .models import Answer, Post, Question @isolate_apps("contenttypes_tests") class GenericForeignKeyTests(TestCase): def test_str(self): class Model(models.Model): field = GenericForeignKey() self.assertEqual(str(Model.field), "contenttypes_tests.Model.field") def test_get_content_type_no_arguments(self): with self.assertRaisesMessage( Exception, "Impossible arguments to GFK.get_content_type!" ): Answer.question.get_content_type() def test_incorrect_get_prefetch_queryset_arguments(self): with self.assertRaisesMessage( ValueError, "Custom queryset can't be used for this lookup." ): Answer.question.get_prefetch_queryset( Answer.objects.all(), Answer.objects.all() ) def test_get_object_cache_respects_deleted_objects(self): question = Question.objects.create(text="Who?") post = Post.objects.create(title="Answer", parent=question) question_pk = question.pk Question.objects.all().delete() post = Post.objects.get(pk=post.pk) with self.assertNumQueries(1): self.assertEqual(post.object_id, question_pk) self.assertIsNone(post.parent) self.assertIsNone(post.parent) def test_clear_cached_generic_relation(self): question = Question.objects.create(text="What is your name?") answer = Answer.objects.create(text="Answer", question=question) old_entity = answer.question answer.refresh_from_db() new_entity = answer.question self.assertIsNot(old_entity, new_entity) class GenericRelationTests(TestCase): def test_value_to_string(self): question = Question.objects.create(text="test") answer1 = Answer.objects.create(question=question) answer2 = Answer.objects.create(question=question) result = json.loads(Question.answer_set.field.value_to_string(question)) self.assertCountEqual(result, [answer1.pk, answer2.pk])
0908fdbb155409a8677ff317112bbe01adc97a027bc8ffbd8c293e30770744fa
import datetime import pickle import sys import unittest from operator import attrgetter from threading import Lock from django.core.exceptions import EmptyResultSet, FieldError, FullResultSet from django.db import DEFAULT_DB_ALIAS, connection from django.db.models import CharField, Count, Exists, F, Max, OuterRef, Q from django.db.models.expressions import RawSQL from django.db.models.functions import ExtractYear, Length, LTrim from django.db.models.sql.constants import LOUTER from django.db.models.sql.where import AND, OR, NothingNode, WhereNode from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import CaptureQueriesContext, ignore_warnings, register_lookup from django.utils.deprecation import RemovedInDjango50Warning from .models import ( FK1, Annotation, Article, Author, BaseA, BaseUser, Book, CategoryItem, CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA, Classroom, CommonMixedCaseForeignKeys, Company, Cover, CustomPk, CustomPkTag, DateTimePK, Detail, DumbCategory, Eaten, Employment, ExtraInfo, Fan, Food, Identifier, Individual, Item, Job, JobResponsibilities, Join, LeafA, LeafB, LoopX, LoopZ, ManagedModel, Member, MixedCaseDbColumnCategoryItem, MixedCaseFieldCategoryItem, ModelA, ModelB, ModelC, ModelD, MyObject, NamedCategory, Node, Note, NullableName, Number, ObjectA, ObjectB, ObjectC, OneToOneCategory, Order, OrderItem, Page, Paragraph, Person, Plaything, PointerA, Program, ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking, Related, RelatedIndividual, RelatedObject, Report, ReportComment, ReservedName, Responsibility, School, SharedConnection, SimpleCategory, SingleObject, SpecialCategory, Staff, StaffUser, Student, Tag, Task, Teacher, Ticket21203Child, Ticket21203Parent, Ticket23605A, Ticket23605B, Ticket23605C, TvChef, Valid, X, ) class Queries1Tests(TestCase): @classmethod def setUpTestData(cls): cls.nc1 = generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name="t1", category=generic) cls.t2 = Tag.objects.create(name="t2", parent=cls.t1, category=generic) cls.t3 = Tag.objects.create(name="t3", parent=cls.t1) cls.t4 = Tag.objects.create(name="t4", parent=cls.t3) cls.t5 = Tag.objects.create(name="t5", parent=cls.t3) cls.n1 = Note.objects.create(note="n1", misc="foo", id=1) cls.n2 = Note.objects.create(note="n2", misc="bar", id=2) cls.n3 = Note.objects.create(note="n3", misc="foo", id=3, negate=False) cls.ann1 = Annotation.objects.create(name="a1", tag=cls.t1) cls.ann1.notes.add(cls.n1) ann2 = Annotation.objects.create(name="a2", tag=cls.t4) ann2.notes.add(cls.n2, cls.n3) # Create these out of order so that sorting by 'id' will be different to sorting # by 'info'. Helps detect some problems later. cls.e2 = ExtraInfo.objects.create( info="e2", note=cls.n2, value=41, filterable=False ) e1 = ExtraInfo.objects.create(info="e1", note=cls.n1, value=42) cls.a1 = Author.objects.create(name="a1", num=1001, extra=e1) cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1) cls.a3 = Author.objects.create(name="a3", num=3003, extra=cls.e2) cls.a4 = Author.objects.create(name="a4", num=4004, extra=cls.e2) cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0) cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0) time3 = datetime.datetime(2007, 12, 20, 22, 25, 0) time4 = datetime.datetime(2007, 12, 20, 21, 0, 0) cls.i1 = Item.objects.create( name="one", created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3, ) cls.i1.tags.set([cls.t1, cls.t2]) cls.i2 = Item.objects.create( name="two", created=cls.time2, creator=cls.a2, note=cls.n2 ) cls.i2.tags.set([cls.t1, cls.t3]) cls.i3 = Item.objects.create( name="three", created=time3, creator=cls.a2, note=cls.n3 ) cls.i4 = Item.objects.create( name="four", created=time4, creator=cls.a4, note=cls.n3 ) cls.i4.tags.set([cls.t4]) cls.r1 = Report.objects.create(name="r1", creator=cls.a1) cls.r2 = Report.objects.create(name="r2", creator=cls.a3) cls.r3 = Report.objects.create(name="r3") # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering # will be rank3, rank2, rank1. cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2) cls.c1 = Cover.objects.create(title="first", item=cls.i4) cls.c2 = Cover.objects.create(title="second", item=cls.i2) def test_subquery_condition(self): qs1 = Tag.objects.filter(pk__lte=0) qs2 = Tag.objects.filter(parent__in=qs1) qs3 = Tag.objects.filter(parent__in=qs2) self.assertEqual(qs3.query.subq_aliases, {"T", "U", "V"}) self.assertIn("v0", str(qs3.query).lower()) qs4 = qs3.filter(parent__in=qs1) self.assertEqual(qs4.query.subq_aliases, {"T", "U", "V"}) # It is possible to reuse U for the second subquery, no need to use W. self.assertNotIn("w0", str(qs4.query).lower()) # So, 'U0."id"' is referenced in SELECT and WHERE twice. self.assertEqual(str(qs4.query).lower().count("u0."), 4) def test_ticket1050(self): self.assertSequenceEqual( Item.objects.filter(tags__isnull=True), [self.i3], ) self.assertSequenceEqual( Item.objects.filter(tags__id__isnull=True), [self.i3], ) def test_ticket1801(self): self.assertSequenceEqual( Author.objects.filter(item=self.i2), [self.a2], ) self.assertSequenceEqual( Author.objects.filter(item=self.i3), [self.a2], ) self.assertSequenceEqual( Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3), [self.a2], ) def test_ticket2306(self): # Checking that no join types are "left outer" joins. query = Item.objects.filter(tags=self.t2).query self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()]) self.assertSequenceEqual( Item.objects.filter(Q(tags=self.t1)).order_by("name"), [self.i1, self.i2], ) self.assertSequenceEqual( Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)), [self.i1], ) self.assertSequenceEqual( Item.objects.filter(Q(tags=self.t1)).filter( Q(creator__name="fred") | Q(tags=self.t2) ), [self.i1], ) # Each filter call is processed "at once" against a single table, so this is # different from the previous example as it tries to find tags that are two # things at once (rather than two tags). self.assertSequenceEqual( Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), [] ) self.assertSequenceEqual( Item.objects.filter( Q(tags=self.t1), Q(creator__name="fred") | Q(tags=self.t2) ), [], ) qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id) self.assertSequenceEqual(list(qs), [self.a2]) self.assertEqual(2, qs.query.count_active_tables(), 2) qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id) self.assertEqual(qs.query.count_active_tables(), 3) def test_ticket4464(self): self.assertSequenceEqual( Item.objects.filter(tags=self.t1).filter(tags=self.t2), [self.i1], ) self.assertSequenceEqual( Item.objects.filter(tags__in=[self.t1, self.t2]) .distinct() .order_by("name"), [self.i1, self.i2], ) self.assertSequenceEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3), [self.i2], ) # Make sure .distinct() works with slicing (this was broken in Oracle). self.assertSequenceEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).order_by("name")[:3], [self.i1, self.i1, self.i2], ) self.assertSequenceEqual( Item.objects.filter(tags__in=[self.t1, self.t2]) .distinct() .order_by("name")[:3], [self.i1, self.i2], ) def test_tickets_2080_3592(self): self.assertSequenceEqual( Author.objects.filter(item__name="one") | Author.objects.filter(name="a3"), [self.a1, self.a3], ) self.assertSequenceEqual( Author.objects.filter(Q(item__name="one") | Q(name="a3")), [self.a1, self.a3], ) self.assertSequenceEqual( Author.objects.filter(Q(name="a3") | Q(item__name="one")), [self.a1, self.a3], ) self.assertSequenceEqual( Author.objects.filter(Q(item__name="three") | Q(report__name="r3")), [self.a2], ) def test_ticket6074(self): # Merging two empty result sets shouldn't leave a queryset with no constraints # (which would match everything). self.assertSequenceEqual(Author.objects.filter(Q(id__in=[])), []) self.assertSequenceEqual(Author.objects.filter(Q(id__in=[]) | Q(id__in=[])), []) def test_tickets_1878_2939(self): self.assertEqual(Item.objects.values("creator").distinct().count(), 3) # Create something with a duplicate 'name' so that we can test multi-column # cases (which require some tricky SQL transformations under the covers). xx = Item(name="four", created=self.time1, creator=self.a2, note=self.n1) xx.save() self.assertEqual( Item.objects.exclude(name="two") .values("creator", "name") .distinct() .count(), 4, ) self.assertEqual( ( Item.objects.exclude(name="two") .extra(select={"foo": "%s"}, select_params=(1,)) .values("creator", "name", "foo") .distinct() .count() ), 4, ) self.assertEqual( ( Item.objects.exclude(name="two") .extra(select={"foo": "%s"}, select_params=(1,)) .values("creator", "name") .distinct() .count() ), 4, ) xx.delete() def test_ticket7323(self): self.assertEqual(Item.objects.values("creator", "name").count(), 4) def test_ticket2253(self): q1 = Item.objects.order_by("name") q2 = Item.objects.filter(id=self.i1.id) self.assertSequenceEqual(q1, [self.i4, self.i1, self.i3, self.i2]) self.assertSequenceEqual(q2, [self.i1]) self.assertSequenceEqual( (q1 | q2).order_by("name"), [self.i4, self.i1, self.i3, self.i2], ) self.assertSequenceEqual((q1 & q2).order_by("name"), [self.i1]) q1 = Item.objects.filter(tags=self.t1) q2 = Item.objects.filter(note=self.n3, tags=self.t2) q3 = Item.objects.filter(creator=self.a4) self.assertSequenceEqual( ((q1 & q2) | q3).order_by("name"), [self.i4, self.i1], ) def test_order_by_tables(self): q1 = Item.objects.order_by("name") q2 = Item.objects.filter(id=self.i1.id) list(q2) combined_query = (q1 & q2).order_by("name").query self.assertEqual( len( [ t for t in combined_query.alias_map if combined_query.alias_refcount[t] ] ), 1, ) def test_order_by_join_unref(self): """ This test is related to the above one, testing that there aren't old JOINs in the query. """ qs = Celebrity.objects.order_by("greatest_fan__fan_of") self.assertIn("OUTER JOIN", str(qs.query)) qs = qs.order_by("id") self.assertNotIn("OUTER JOIN", str(qs.query)) def test_order_by_related_field_transform(self): extra_12 = ExtraInfo.objects.create( info="extra 12", date=DateTimePK.objects.create(date=datetime.datetime(2021, 12, 10)), ) extra_11 = ExtraInfo.objects.create( info="extra 11", date=DateTimePK.objects.create(date=datetime.datetime(2022, 11, 10)), ) self.assertSequenceEqual( ExtraInfo.objects.filter(date__isnull=False).order_by("date__month"), [extra_11, extra_12], ) def test_filter_by_related_field_transform(self): extra_old = ExtraInfo.objects.create( info="extra 12", date=DateTimePK.objects.create(date=datetime.datetime(2020, 12, 10)), ) ExtraInfo.objects.create(info="extra 11", date=DateTimePK.objects.create()) a5 = Author.objects.create(name="a5", num=5005, extra=extra_old) fk_field = ExtraInfo._meta.get_field("date") with register_lookup(fk_field, ExtractYear): self.assertSequenceEqual( ExtraInfo.objects.filter(date__year=2020), [extra_old], ) self.assertSequenceEqual( Author.objects.filter(extra__date__year=2020), [a5] ) def test_filter_by_related_field_nested_transforms(self): extra = ExtraInfo.objects.create(info=" extra") a5 = Author.objects.create(name="a5", num=5005, extra=extra) info_field = ExtraInfo._meta.get_field("info") with register_lookup(info_field, Length), register_lookup(CharField, LTrim): self.assertSequenceEqual( Author.objects.filter(extra__info__ltrim__length=5), [a5] ) def test_get_clears_ordering(self): """ get() should clear ordering for optimization purposes. """ with CaptureQueriesContext(connection) as captured_queries: Author.objects.order_by("name").get(pk=self.a1.pk) self.assertNotIn("order by", captured_queries[0]["sql"].lower()) def test_tickets_4088_4306(self): self.assertSequenceEqual(Report.objects.filter(creator=1001), [self.r1]) self.assertSequenceEqual(Report.objects.filter(creator__num=1001), [self.r1]) self.assertSequenceEqual(Report.objects.filter(creator__id=1001), []) self.assertSequenceEqual( Report.objects.filter(creator__id=self.a1.id), [self.r1] ) self.assertSequenceEqual(Report.objects.filter(creator__name="a1"), [self.r1]) def test_ticket4510(self): self.assertSequenceEqual( Author.objects.filter(report__name="r1"), [self.a1], ) def test_ticket7378(self): self.assertSequenceEqual(self.a1.report_set.all(), [self.r1]) def test_tickets_5324_6704(self): self.assertSequenceEqual( Item.objects.filter(tags__name="t4"), [self.i4], ) self.assertSequenceEqual( Item.objects.exclude(tags__name="t4").order_by("name").distinct(), [self.i1, self.i3, self.i2], ) self.assertSequenceEqual( Item.objects.exclude(tags__name="t4").order_by("name").distinct().reverse(), [self.i2, self.i3, self.i1], ) self.assertSequenceEqual( Author.objects.exclude(item__name="one").distinct().order_by("name"), [self.a2, self.a3, self.a4], ) # Excluding across a m2m relation when there is more than one related # object associated was problematic. self.assertSequenceEqual( Item.objects.exclude(tags__name="t1").order_by("name"), [self.i4, self.i3], ) self.assertSequenceEqual( Item.objects.exclude(tags__name="t1").exclude(tags__name="t4"), [self.i3], ) # Excluding from a relation that cannot be NULL should not use outer joins. query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()]) # Similarly, when one of the joins cannot possibly, ever, involve NULL # values (Author -> ExtraInfo, in the following), it should never be # promoted to a left outer join. So the following query should only # involve one "left outer" join (Author -> Item is 0-to-many). qs = Author.objects.filter(id=self.a1.id).filter( Q(extra__note=self.n1) | Q(item__note=self.n3) ) self.assertEqual( len( [ x for x in qs.query.alias_map.values() if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias] ] ), 1, ) # The previous changes shouldn't affect nullable foreign key joins. self.assertSequenceEqual( Tag.objects.filter(parent__isnull=True).order_by("name"), [self.t1] ) self.assertSequenceEqual( Tag.objects.exclude(parent__isnull=True).order_by("name"), [self.t2, self.t3, self.t4, self.t5], ) self.assertSequenceEqual( Tag.objects.exclude(Q(parent__name="t1") | Q(parent__isnull=True)).order_by( "name" ), [self.t4, self.t5], ) self.assertSequenceEqual( Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name="t1")).order_by( "name" ), [self.t4, self.t5], ) self.assertSequenceEqual( Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by("name"), [self.t4, self.t5], ) self.assertSequenceEqual( Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by("name"), [self.t4, self.t5], ) def test_ticket2091(self): t = Tag.objects.get(name="t4") self.assertSequenceEqual(Item.objects.filter(tags__in=[t]), [self.i4]) def test_avoid_infinite_loop_on_too_many_subqueries(self): x = Tag.objects.filter(pk=1) local_recursion_limit = sys.getrecursionlimit() // 16 msg = "Maximum recursion depth exceeded: too many subqueries." with self.assertRaisesMessage(RecursionError, msg): for i in range(local_recursion_limit + 2): x = Tag.objects.filter(pk__in=x) def test_reasonable_number_of_subq_aliases(self): x = Tag.objects.filter(pk=1) for _ in range(20): x = Tag.objects.filter(pk__in=x) self.assertEqual( x.query.subq_aliases, { "T", "U", "V", "W", "X", "Y", "Z", "AA", "AB", "AC", "AD", "AE", "AF", "AG", "AH", "AI", "AJ", "AK", "AL", "AM", "AN", }, ) def test_heterogeneous_qs_combination(self): # Combining querysets built on different models should behave in a well-defined # fashion. We raise an error. msg = "Cannot combine queries on two different base models." with self.assertRaisesMessage(TypeError, msg): Author.objects.all() & Tag.objects.all() with self.assertRaisesMessage(TypeError, msg): Author.objects.all() | Tag.objects.all() def test_ticket3141(self): self.assertEqual(Author.objects.extra(select={"foo": "1"}).count(), 4) self.assertEqual( Author.objects.extra(select={"foo": "%s"}, select_params=(1,)).count(), 4 ) def test_ticket2400(self): self.assertSequenceEqual( Author.objects.filter(item__isnull=True), [self.a3], ) self.assertSequenceEqual( Tag.objects.filter(item__isnull=True), [self.t5], ) def test_ticket2496(self): self.assertSequenceEqual( Item.objects.extra(tables=["queries_author"]) .select_related() .order_by("name")[:1], [self.i4], ) def test_error_raised_on_filter_with_dictionary(self): with self.assertRaisesMessage(FieldError, "Cannot parse keyword query as dict"): Note.objects.filter({"note": "n1", "misc": "foo"}) def test_tickets_2076_7256(self): # Ordering on related tables should be possible, even if the table is # not otherwise involved. self.assertSequenceEqual( Item.objects.order_by("note__note", "name"), [self.i2, self.i4, self.i1, self.i3], ) # Ordering on a related field should use the remote model's default # ordering as a final step. self.assertSequenceEqual( Author.objects.order_by("extra", "-name"), [self.a2, self.a1, self.a4, self.a3], ) # Using remote model default ordering can span multiple models (in this # case, Cover is ordered by Item's default, which uses Note's default). self.assertSequenceEqual(Cover.objects.all(), [self.c1, self.c2]) # If the remote model does not have a default ordering, we order by its 'id' # field. self.assertSequenceEqual( Item.objects.order_by("creator", "name"), [self.i1, self.i3, self.i2, self.i4], ) # Ordering by a many-valued attribute (e.g. a many-to-many or reverse # ForeignKey) is legal, but the results might not make sense. That # isn't Django's problem. Garbage in, garbage out. self.assertSequenceEqual( Item.objects.filter(tags__isnull=False).order_by("tags", "id"), [self.i1, self.i2, self.i1, self.i2, self.i4], ) # If we replace the default ordering, Django adjusts the required # tables automatically. Item normally requires a join with Note to do # the default ordering, but that isn't needed here. qs = Item.objects.order_by("name") self.assertSequenceEqual(qs, [self.i4, self.i1, self.i3, self.i2]) self.assertEqual(len(qs.query.alias_map), 1) def test_tickets_2874_3002(self): qs = Item.objects.select_related().order_by("note__note", "name") self.assertQuerySetEqual(qs, [self.i2, self.i4, self.i1, self.i3]) # This is also a good select_related() test because there are multiple # Note entries in the SQL. The two Note items should be different. self.assertEqual(repr(qs[0].note), "<Note: n2>") self.assertEqual(repr(qs[0].creator.extra.note), "<Note: n1>") def test_ticket3037(self): self.assertSequenceEqual( Item.objects.filter( Q(creator__name="a3", name="two") | Q(creator__name="a4", name="four") ), [self.i4], ) def test_tickets_5321_7070(self): # Ordering columns must be included in the output columns. Note that # this means results that might otherwise be distinct are not (if there # are multiple values in the ordering cols), as in this example. This # isn't a bug; it's a warning to be careful with the selection of # ordering columns. self.assertSequenceEqual( Note.objects.values("misc").distinct().order_by("note", "-misc"), [{"misc": "foo"}, {"misc": "bar"}, {"misc": "foo"}], ) def test_ticket4358(self): # If you don't pass any fields to values(), relation fields are # returned as "foo_id" keys, not "foo". For consistency, you should be # able to pass "foo_id" in the fields list and have it work, too. We # actually allow both "foo" and "foo_id". # The *_id version is returned by default. self.assertIn("note_id", ExtraInfo.objects.values()[0]) # You can also pass it in explicitly. self.assertSequenceEqual( ExtraInfo.objects.values("note_id"), [{"note_id": 1}, {"note_id": 2}] ) # ...or use the field name. self.assertSequenceEqual( ExtraInfo.objects.values("note"), [{"note": 1}, {"note": 2}] ) def test_ticket6154(self): # Multiple filter statements are joined using "AND" all the time. self.assertSequenceEqual( Author.objects.filter(id=self.a1.id).filter( Q(extra__note=self.n1) | Q(item__note=self.n3) ), [self.a1], ) self.assertSequenceEqual( Author.objects.filter( Q(extra__note=self.n1) | Q(item__note=self.n3) ).filter(id=self.a1.id), [self.a1], ) def test_ticket6981(self): self.assertSequenceEqual( Tag.objects.select_related("parent").order_by("name"), [self.t1, self.t2, self.t3, self.t4, self.t5], ) def test_ticket9926(self): self.assertSequenceEqual( Tag.objects.select_related("parent", "category").order_by("name"), [self.t1, self.t2, self.t3, self.t4, self.t5], ) self.assertSequenceEqual( Tag.objects.select_related("parent", "parent__category").order_by("name"), [self.t1, self.t2, self.t3, self.t4, self.t5], ) def test_tickets_6180_6203(self): # Dates with limits and/or counts self.assertEqual(Item.objects.count(), 4) self.assertEqual(Item.objects.datetimes("created", "month").count(), 1) self.assertEqual(Item.objects.datetimes("created", "day").count(), 2) self.assertEqual(len(Item.objects.datetimes("created", "day")), 2) self.assertEqual( Item.objects.datetimes("created", "day")[0], datetime.datetime(2007, 12, 19, 0, 0), ) def test_tickets_7087_12242(self): # Dates with extra select columns self.assertSequenceEqual( Item.objects.datetimes("created", "day").extra(select={"a": 1}), [ datetime.datetime(2007, 12, 19, 0, 0), datetime.datetime(2007, 12, 20, 0, 0), ], ) self.assertSequenceEqual( Item.objects.extra(select={"a": 1}).datetimes("created", "day"), [ datetime.datetime(2007, 12, 19, 0, 0), datetime.datetime(2007, 12, 20, 0, 0), ], ) name = "one" self.assertSequenceEqual( Item.objects.datetimes("created", "day").extra( where=["name=%s"], params=[name] ), [datetime.datetime(2007, 12, 19, 0, 0)], ) self.assertSequenceEqual( Item.objects.extra(where=["name=%s"], params=[name]).datetimes( "created", "day" ), [datetime.datetime(2007, 12, 19, 0, 0)], ) def test_ticket7155(self): # Nullable dates self.assertSequenceEqual( Item.objects.datetimes("modified", "day"), [datetime.datetime(2007, 12, 19, 0, 0)], ) def test_order_by_rawsql(self): self.assertSequenceEqual( Item.objects.values("note__note").order_by( RawSQL("queries_note.note", ()), "id", ), [ {"note__note": "n2"}, {"note__note": "n3"}, {"note__note": "n3"}, {"note__note": "n3"}, ], ) def test_ticket7096(self): # Make sure exclude() with multiple conditions continues to work. self.assertSequenceEqual( Tag.objects.filter(parent=self.t1, name="t3").order_by("name"), [self.t3], ) self.assertSequenceEqual( Tag.objects.exclude(parent=self.t1, name="t3").order_by("name"), [self.t1, self.t2, self.t4, self.t5], ) self.assertSequenceEqual( Item.objects.exclude(tags__name="t1", name="one") .order_by("name") .distinct(), [self.i4, self.i3, self.i2], ) self.assertSequenceEqual( Item.objects.filter(name__in=["three", "four"]) .exclude(tags__name="t1") .order_by("name"), [self.i4, self.i3], ) # More twisted cases, involving nested negations. self.assertSequenceEqual( Item.objects.exclude(~Q(tags__name="t1", name="one")), [self.i1], ) self.assertSequenceEqual( Item.objects.filter(~Q(tags__name="t1", name="one"), name="two"), [self.i2], ) self.assertSequenceEqual( Item.objects.exclude(~Q(tags__name="t1", name="one"), name="two"), [self.i4, self.i1, self.i3], ) def test_tickets_7204_7506(self): # Make sure querysets with related fields can be pickled. If this # doesn't crash, it's a Good Thing. pickle.dumps(Item.objects.all()) def test_ticket7813(self): # We should also be able to pickle things that use select_related(). # The only tricky thing here is to ensure that we do the related # selections properly after unpickling. qs = Item.objects.select_related() query = qs.query.get_compiler(qs.db).as_sql()[0] query2 = pickle.loads(pickle.dumps(qs.query)) self.assertEqual(query2.get_compiler(qs.db).as_sql()[0], query) def test_deferred_load_qs_pickling(self): # Check pickling of deferred-loading querysets qs = Item.objects.defer("name", "creator") q2 = pickle.loads(pickle.dumps(qs)) self.assertEqual(list(qs), list(q2)) q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL)) self.assertEqual(list(qs), list(q3)) def test_ticket7277(self): self.assertSequenceEqual( self.n1.annotation_set.filter( Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5) ), [self.ann1], ) def test_tickets_7448_7707(self): # Complex objects should be converted to strings before being used in # lookups. self.assertSequenceEqual( Item.objects.filter(created__in=[self.time1, self.time2]), [self.i1, self.i2], ) def test_ticket7235(self): # An EmptyQuerySet should not raise exceptions if it is filtered. Eaten.objects.create(meal="m") q = Eaten.objects.none() with self.assertNumQueries(0): self.assertSequenceEqual(q.all(), []) self.assertSequenceEqual(q.filter(meal="m"), []) self.assertSequenceEqual(q.exclude(meal="m"), []) self.assertSequenceEqual(q.complex_filter({"pk": 1}), []) self.assertSequenceEqual(q.select_related("food"), []) self.assertSequenceEqual(q.annotate(Count("food")), []) self.assertSequenceEqual(q.order_by("meal", "food"), []) self.assertSequenceEqual(q.distinct(), []) self.assertSequenceEqual(q.extra(select={"foo": "1"}), []) self.assertSequenceEqual(q.reverse(), []) q.query.low_mark = 1 msg = "Cannot change a query once a slice has been taken." with self.assertRaisesMessage(TypeError, msg): q.extra(select={"foo": "1"}) self.assertSequenceEqual(q.defer("meal"), []) self.assertSequenceEqual(q.only("meal"), []) def test_ticket7791(self): # There were "issues" when ordering and distinct-ing on fields related # via ForeignKeys. self.assertEqual(len(Note.objects.order_by("extrainfo__info").distinct()), 3) # Pickling of QuerySets using datetimes() should work. qs = Item.objects.datetimes("created", "month") pickle.loads(pickle.dumps(qs)) def test_ticket9997(self): # If a ValuesList or Values queryset is passed as an inner query, we # make sure it's only requesting a single value and use that as the # thing to select. self.assertSequenceEqual( Tag.objects.filter( name__in=Tag.objects.filter(parent=self.t1).values("name") ), [self.t2, self.t3], ) # Multi-valued values() and values_list() querysets should raise errors. with self.assertRaisesMessage( TypeError, "Cannot use multi-field values as a filter value." ): Tag.objects.filter( name__in=Tag.objects.filter(parent=self.t1).values("name", "id") ) with self.assertRaisesMessage( TypeError, "Cannot use multi-field values as a filter value." ): Tag.objects.filter( name__in=Tag.objects.filter(parent=self.t1).values_list("name", "id") ) def test_ticket9985(self): # qs.values_list(...).values(...) combinations should work. self.assertSequenceEqual( Note.objects.values_list("note", flat=True).values("id").order_by("id"), [{"id": 1}, {"id": 2}, {"id": 3}], ) self.assertSequenceEqual( Annotation.objects.filter( notes__in=Note.objects.filter(note="n1") .values_list("note") .values("id") ), [self.ann1], ) def test_ticket10205(self): # When bailing out early because of an empty "__in" filter, we need # to set things up correctly internally so that subqueries can continue # properly. self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0) def test_ticket10432(self): # Testing an empty "__in" filter with a generator as the value. def f(): return iter([]) n_obj = Note.objects.all()[0] def g(): yield n_obj.pk self.assertSequenceEqual(Note.objects.filter(pk__in=f()), []) self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj]) def test_ticket10742(self): # Queries used in an __in clause don't execute subqueries subq = Author.objects.filter(num__lt=3000) qs = Author.objects.filter(pk__in=subq) self.assertSequenceEqual(qs, [self.a1, self.a2]) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) subq = Author.objects.filter(num__lt=3000) qs = Author.objects.exclude(pk__in=subq) self.assertSequenceEqual(qs, [self.a3, self.a4]) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) subq = Author.objects.filter(num__lt=3000) self.assertSequenceEqual( Author.objects.filter(Q(pk__in=subq) & Q(name="a1")), [self.a1], ) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) def test_ticket7076(self): # Excluding shouldn't eliminate NULL entries. self.assertSequenceEqual( Item.objects.exclude(modified=self.time1).order_by("name"), [self.i4, self.i3, self.i2], ) self.assertSequenceEqual( Tag.objects.exclude(parent__name=self.t1.name), [self.t1, self.t4, self.t5], ) def test_ticket7181(self): # Ordering by related tables should accommodate nullable fields (this # test is a little tricky, since NULL ordering is database dependent. # Instead, we just count the number of results). self.assertEqual(len(Tag.objects.order_by("parent__name")), 5) # Empty querysets can be merged with others. self.assertSequenceEqual( Note.objects.none() | Note.objects.all(), [self.n1, self.n2, self.n3], ) self.assertSequenceEqual( Note.objects.all() | Note.objects.none(), [self.n1, self.n2, self.n3], ) self.assertSequenceEqual(Note.objects.none() & Note.objects.all(), []) self.assertSequenceEqual(Note.objects.all() & Note.objects.none(), []) def test_ticket8439(self): # Complex combinations of conjunctions, disjunctions and nullable # relations. self.assertSequenceEqual( Author.objects.filter( Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name="xyz") ), [self.a2], ) self.assertSequenceEqual( Author.objects.filter( Q(report=self.r1, name="xyz") | Q(item__note__extrainfo=self.e2) ), [self.a2], ) self.assertSequenceEqual( Annotation.objects.filter( Q(tag__parent=self.t1) | Q(notes__note="n1", name="a1") ), [self.ann1], ) xx = ExtraInfo.objects.create(info="xx", note=self.n3) self.assertSequenceEqual( Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)), [self.n1, self.n3], ) q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query self.assertEqual( len( [ x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias] ] ), 1, ) def test_ticket17429(self): """ Meta.ordering=None works the same as Meta.ordering=[] """ original_ordering = Tag._meta.ordering Tag._meta.ordering = None try: self.assertCountEqual( Tag.objects.all(), [self.t1, self.t2, self.t3, self.t4, self.t5], ) finally: Tag._meta.ordering = original_ordering def test_exclude(self): self.assertQuerySetEqual( Item.objects.exclude(tags__name="t4"), Item.objects.filter(~Q(tags__name="t4")), ) self.assertQuerySetEqual( Item.objects.exclude(Q(tags__name="t4") | Q(tags__name="t3")), Item.objects.filter(~(Q(tags__name="t4") | Q(tags__name="t3"))), ) self.assertQuerySetEqual( Item.objects.exclude(Q(tags__name="t4") | ~Q(tags__name="t3")), Item.objects.filter(~(Q(tags__name="t4") | ~Q(tags__name="t3"))), ) def test_nested_exclude(self): self.assertQuerySetEqual( Item.objects.exclude(~Q(tags__name="t4")), Item.objects.filter(~~Q(tags__name="t4")), ) def test_double_exclude(self): self.assertQuerySetEqual( Item.objects.filter(Q(tags__name="t4")), Item.objects.filter(~~Q(tags__name="t4")), ) self.assertQuerySetEqual( Item.objects.filter(Q(tags__name="t4")), Item.objects.filter(~Q(~Q(tags__name="t4"))), ) def test_exclude_in(self): self.assertQuerySetEqual( Item.objects.exclude(Q(tags__name__in=["t4", "t3"])), Item.objects.filter(~Q(tags__name__in=["t4", "t3"])), ) self.assertQuerySetEqual( Item.objects.filter(Q(tags__name__in=["t4", "t3"])), Item.objects.filter(~~Q(tags__name__in=["t4", "t3"])), ) def test_ticket_10790_1(self): # Querying direct fields with isnull should trim the left outer join. # It also should not create INNER JOIN. q = Tag.objects.filter(parent__isnull=True) self.assertSequenceEqual(q, [self.t1]) self.assertNotIn("JOIN", str(q.query)) q = Tag.objects.filter(parent__isnull=False) self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5]) self.assertNotIn("JOIN", str(q.query)) q = Tag.objects.exclude(parent__isnull=True) self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5]) self.assertNotIn("JOIN", str(q.query)) q = Tag.objects.exclude(parent__isnull=False) self.assertSequenceEqual(q, [self.t1]) self.assertNotIn("JOIN", str(q.query)) q = Tag.objects.exclude(parent__parent__isnull=False) self.assertSequenceEqual(q, [self.t1, self.t2, self.t3]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1) self.assertNotIn("INNER JOIN", str(q.query)) def test_ticket_10790_2(self): # Querying across several tables should strip only the last outer join, # while preserving the preceding inner joins. q = Tag.objects.filter(parent__parent__isnull=False) self.assertSequenceEqual(q, [self.t4, self.t5]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q.query).count("INNER JOIN"), 1) # Querying without isnull should not convert anything to left outer join. q = Tag.objects.filter(parent__parent=self.t1) self.assertSequenceEqual(q, [self.t4, self.t5]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q.query).count("INNER JOIN"), 1) def test_ticket_10790_3(self): # Querying via indirect fields should populate the left outer join q = NamedCategory.objects.filter(tag__isnull=True) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1) # join to dumbcategory ptr_id self.assertEqual(str(q.query).count("INNER JOIN"), 1) self.assertSequenceEqual(q, []) # Querying across several tables should strip only the last join, while # preserving the preceding left outer joins. q = NamedCategory.objects.filter(tag__parent__isnull=True) self.assertEqual(str(q.query).count("INNER JOIN"), 1) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1) self.assertSequenceEqual(q, [self.nc1]) def test_ticket_10790_4(self): # Querying across m2m field should not strip the m2m table from join. q = Author.objects.filter(item__tags__isnull=True) self.assertSequenceEqual(q, [self.a2, self.a3]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 2) self.assertNotIn("INNER JOIN", str(q.query)) q = Author.objects.filter(item__tags__parent__isnull=True) self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3) self.assertNotIn("INNER JOIN", str(q.query)) def test_ticket_10790_5(self): # Querying with isnull=False across m2m field should not create outer joins q = Author.objects.filter(item__tags__isnull=False) self.assertSequenceEqual(q, [self.a1, self.a1, self.a2, self.a2, self.a4]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q.query).count("INNER JOIN"), 2) q = Author.objects.filter(item__tags__parent__isnull=False) self.assertSequenceEqual(q, [self.a1, self.a2, self.a4]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q.query).count("INNER JOIN"), 3) q = Author.objects.filter(item__tags__parent__parent__isnull=False) self.assertSequenceEqual(q, [self.a4]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q.query).count("INNER JOIN"), 4) def test_ticket_10790_6(self): # Querying with isnull=True across m2m field should not create inner joins # and strip last outer join q = Author.objects.filter(item__tags__parent__parent__isnull=True) self.assertSequenceEqual( q, [self.a1, self.a1, self.a2, self.a2, self.a2, self.a3], ) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 4) self.assertEqual(str(q.query).count("INNER JOIN"), 0) q = Author.objects.filter(item__tags__parent__isnull=True) self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3) self.assertEqual(str(q.query).count("INNER JOIN"), 0) def test_ticket_10790_7(self): # Reverse querying with isnull should not strip the join q = Author.objects.filter(item__isnull=True) self.assertSequenceEqual(q, [self.a3]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1) self.assertEqual(str(q.query).count("INNER JOIN"), 0) q = Author.objects.filter(item__isnull=False) self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a4]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q.query).count("INNER JOIN"), 1) def test_ticket_10790_8(self): # Querying with combined q-objects should also strip the left outer join q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1)) self.assertSequenceEqual(q, [self.t1, self.t2, self.t3]) self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q.query).count("INNER JOIN"), 0) def test_ticket_10790_combine(self): # Combining queries should not re-populate the left outer join q1 = Tag.objects.filter(parent__isnull=True) q2 = Tag.objects.filter(parent__isnull=False) q3 = q1 | q2 self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3, self.t4, self.t5]) self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q3.query).count("INNER JOIN"), 0) q3 = q1 & q2 self.assertSequenceEqual(q3, []) self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q3.query).count("INNER JOIN"), 0) q2 = Tag.objects.filter(parent=self.t1) q3 = q1 | q2 self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3]) self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q3.query).count("INNER JOIN"), 0) q3 = q2 | q1 self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3]) self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(q3.query).count("INNER JOIN"), 0) q1 = Tag.objects.filter(parent__isnull=True) q2 = Tag.objects.filter(parent__parent__isnull=True) q3 = q1 | q2 self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3]) self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1) self.assertEqual(str(q3.query).count("INNER JOIN"), 0) q3 = q2 | q1 self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3]) self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1) self.assertEqual(str(q3.query).count("INNER JOIN"), 0) def test_ticket19672(self): self.assertSequenceEqual( Report.objects.filter( Q(creator__isnull=False) & ~Q(creator__extra__value=41) ), [self.r1], ) def test_ticket_20250(self): # A negated Q along with an annotated queryset failed in Django 1.4 qs = Author.objects.annotate(Count("item")) qs = qs.filter(~Q(extra__value=0)).order_by("name") self.assertIn("SELECT", str(qs.query)) self.assertSequenceEqual(qs, [self.a1, self.a2, self.a3, self.a4]) def test_lookup_constraint_fielderror(self): msg = ( "Cannot resolve keyword 'unknown_field' into field. Choices are: " "annotation, category, category_id, children, id, item, " "managedmodel, name, note, parent, parent_id" ) with self.assertRaisesMessage(FieldError, msg): Tag.objects.filter(unknown_field__name="generic") def test_common_mixed_case_foreign_keys(self): """ Valid query should be generated when fields fetched from joined tables include FKs whose names only differ by case. """ c1 = SimpleCategory.objects.create(name="c1") c2 = SimpleCategory.objects.create(name="c2") c3 = SimpleCategory.objects.create(name="c3") category = CategoryItem.objects.create(category=c1) mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create( CaTeGoRy=c2 ) mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create( category=c3 ) CommonMixedCaseForeignKeys.objects.create( category=category, mixed_case_field_category=mixed_case_field_category, mixed_case_db_column_category=mixed_case_db_column_category, ) qs = CommonMixedCaseForeignKeys.objects.values( "category", "mixed_case_field_category", "mixed_case_db_column_category", "category__category", "mixed_case_field_category__CaTeGoRy", "mixed_case_db_column_category__category", ) self.assertTrue(qs.first()) def test_excluded_intermediary_m2m_table_joined(self): self.assertSequenceEqual( Note.objects.filter(~Q(tag__annotation__name=F("note"))), [self.n1, self.n2, self.n3], ) self.assertSequenceEqual( Note.objects.filter(tag__annotation__name="a1").filter( ~Q(tag__annotation__name=F("note")) ), [], ) def test_field_with_filterable(self): self.assertSequenceEqual( Author.objects.filter(extra=self.e2), [self.a3, self.a4], ) def test_negate_field(self): self.assertSequenceEqual( Note.objects.filter(negate=True), [self.n1, self.n2], ) self.assertSequenceEqual(Note.objects.exclude(negate=True), [self.n3]) class Queries2Tests(TestCase): @classmethod def setUpTestData(cls): cls.num4 = Number.objects.create(num=4) cls.num8 = Number.objects.create(num=8) cls.num12 = Number.objects.create(num=12) def test_ticket4289(self): # A slight variation on the restricting the filtering choices by the # lookup constraints. self.assertSequenceEqual(Number.objects.filter(num__lt=4), []) self.assertSequenceEqual(Number.objects.filter(num__gt=8, num__lt=12), []) self.assertSequenceEqual( Number.objects.filter(num__gt=8, num__lt=13), [self.num12], ) self.assertSequenceEqual( Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)), [] ) self.assertSequenceEqual( Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)), [] ) self.assertSequenceEqual( Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)), [] ) self.assertSequenceEqual( Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)), [self.num8], ) def test_ticket12239(self): # Custom lookups are registered to round float values correctly on gte # and lt IntegerField queries. self.assertSequenceEqual( Number.objects.filter(num__gt=11.9), [self.num12], ) self.assertSequenceEqual(Number.objects.filter(num__gt=12), []) self.assertSequenceEqual(Number.objects.filter(num__gt=12.0), []) self.assertSequenceEqual(Number.objects.filter(num__gt=12.1), []) self.assertCountEqual( Number.objects.filter(num__lt=12), [self.num4, self.num8], ) self.assertCountEqual( Number.objects.filter(num__lt=12.0), [self.num4, self.num8], ) self.assertCountEqual( Number.objects.filter(num__lt=12.1), [self.num4, self.num8, self.num12], ) self.assertCountEqual( Number.objects.filter(num__gte=11.9), [self.num12], ) self.assertCountEqual( Number.objects.filter(num__gte=12), [self.num12], ) self.assertCountEqual( Number.objects.filter(num__gte=12.0), [self.num12], ) self.assertSequenceEqual(Number.objects.filter(num__gte=12.1), []) self.assertSequenceEqual(Number.objects.filter(num__gte=12.9), []) self.assertCountEqual( Number.objects.filter(num__lte=11.9), [self.num4, self.num8], ) self.assertCountEqual( Number.objects.filter(num__lte=12), [self.num4, self.num8, self.num12], ) self.assertCountEqual( Number.objects.filter(num__lte=12.0), [self.num4, self.num8, self.num12], ) self.assertCountEqual( Number.objects.filter(num__lte=12.1), [self.num4, self.num8, self.num12], ) self.assertCountEqual( Number.objects.filter(num__lte=12.9), [self.num4, self.num8, self.num12], ) def test_ticket7759(self): # Count should work with a partially read result set. count = Number.objects.count() qs = Number.objects.all() def run(): for obj in qs: return qs.count() == count self.assertTrue(run()) class Queries3Tests(TestCase): def test_ticket7107(self): # This shouldn't create an infinite loop. self.assertSequenceEqual(Valid.objects.all(), []) def test_datetimes_invalid_field(self): # An error should be raised when QuerySet.datetimes() is passed the # wrong type of field. msg = "'name' isn't a DateField, TimeField, or DateTimeField." with self.assertRaisesMessage(TypeError, msg): Item.objects.datetimes("name", "month") def test_ticket22023(self): with self.assertRaisesMessage( TypeError, "Cannot call only() after .values() or .values_list()" ): Valid.objects.values().only() with self.assertRaisesMessage( TypeError, "Cannot call defer() after .values() or .values_list()" ): Valid.objects.values().defer() class Queries4Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name="t1", category=generic) n1 = Note.objects.create(note="n1", misc="foo") n2 = Note.objects.create(note="n2", misc="bar") e1 = ExtraInfo.objects.create(info="e1", note=n1) e2 = ExtraInfo.objects.create(info="e2", note=n2) cls.a1 = Author.objects.create(name="a1", num=1001, extra=e1) cls.a3 = Author.objects.create(name="a3", num=3003, extra=e2) cls.r1 = Report.objects.create(name="r1", creator=cls.a1) cls.r2 = Report.objects.create(name="r2", creator=cls.a3) cls.r3 = Report.objects.create(name="r3") cls.i1 = Item.objects.create( name="i1", created=datetime.datetime.now(), note=n1, creator=cls.a1 ) cls.i2 = Item.objects.create( name="i2", created=datetime.datetime.now(), note=n1, creator=cls.a3 ) def test_ticket24525(self): tag = Tag.objects.create() anth100 = tag.note_set.create(note="ANTH", misc="100") math101 = tag.note_set.create(note="MATH", misc="101") s1 = tag.annotation_set.create(name="1") s2 = tag.annotation_set.create(name="2") s1.notes.set([math101, anth100]) s2.notes.set([math101]) result = math101.annotation_set.all() & tag.annotation_set.exclude( notes__in=[anth100] ) self.assertEqual(list(result), [s2]) def test_ticket11811(self): unsaved_category = NamedCategory(name="Other") msg = ( "Unsaved model instance <NamedCategory: Other> cannot be used in an ORM " "query." ) with self.assertRaisesMessage(ValueError, msg): Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category) def test_ticket14876(self): # Note: when combining the query we need to have information available # about the join type of the trimmed "creator__isnull" join. If we # don't have that information, then the join is created as INNER JOIN # and results will be incorrect. q1 = Report.objects.filter( Q(creator__isnull=True) | Q(creator__extra__info="e1") ) q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter( Q(creator__extra__info="e1") ) self.assertCountEqual(q1, [self.r1, self.r3]) self.assertEqual(str(q1.query), str(q2.query)) q1 = Report.objects.filter( Q(creator__extra__info="e1") | Q(creator__isnull=True) ) q2 = Report.objects.filter( Q(creator__extra__info="e1") ) | Report.objects.filter(Q(creator__isnull=True)) self.assertCountEqual(q1, [self.r1, self.r3]) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter( Q(creator=self.a1) | Q(creator__report__name="r1") ).order_by() q2 = ( Item.objects.filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name="r1")).order_by() ) self.assertCountEqual(q1, [self.i1]) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter( Q(creator__report__name="e1") | Q(creator=self.a1) ).order_by() q2 = ( Item.objects.filter(Q(creator__report__name="e1")).order_by() | Item.objects.filter(Q(creator=self.a1)).order_by() ) self.assertCountEqual(q1, [self.i1]) self.assertEqual(str(q1.query), str(q2.query)) def test_combine_join_reuse(self): # Joins having identical connections are correctly recreated in the # rhs query, in case the query is ORed together (#18748). Report.objects.create(name="r4", creator=self.a1) q1 = Author.objects.filter(report__name="r5") q2 = Author.objects.filter(report__name="r4").filter(report__name="r1") combined = q1 | q2 self.assertEqual(str(combined.query).count("JOIN"), 2) self.assertEqual(len(combined), 1) self.assertEqual(combined[0].name, "a1") def test_combine_or_filter_reuse(self): combined = Author.objects.filter(name="a1") | Author.objects.filter(name="a3") self.assertEqual(combined.get(name="a1"), self.a1) def test_join_reuse_order(self): # Join aliases are reused in order. This shouldn't raise AssertionError # because change_map contains a circular reference (#26522). s1 = School.objects.create() s2 = School.objects.create() s3 = School.objects.create() t1 = Teacher.objects.create() otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1) qs1 = otherteachers.filter(schools=s1).filter(schools=s2) qs2 = otherteachers.filter(schools=s1).filter(schools=s3) self.assertSequenceEqual(qs1 | qs2, []) def test_ticket7095(self): # Updates that are filtered on the model being updated are somewhat # tricky in MySQL. ManagedModel.objects.create(data="mm1", tag=self.t1, public=True) self.assertEqual(ManagedModel.objects.update(data="mm"), 1) # A values() or values_list() query across joined models must use outer # joins appropriately. # Note: In Oracle, we expect a null CharField to return '' instead of # None. if connection.features.interprets_empty_strings_as_nulls: expected_null_charfield_repr = "" else: expected_null_charfield_repr = None self.assertSequenceEqual( Report.objects.values_list("creator__extra__info", flat=True).order_by( "name" ), ["e1", "e2", expected_null_charfield_repr], ) # Similarly for select_related(), joins beyond an initial nullable join # must use outer joins so that all results are included. self.assertSequenceEqual( Report.objects.select_related("creator", "creator__extra").order_by("name"), [self.r1, self.r2, self.r3], ) # When there are multiple paths to a table from another table, we have # to be careful not to accidentally reuse an inappropriate join when # using select_related(). We used to return the parent's Detail record # here by mistake. d1 = Detail.objects.create(data="d1") d2 = Detail.objects.create(data="d2") m1 = Member.objects.create(name="m1", details=d1) m2 = Member.objects.create(name="m2", details=d2) Child.objects.create(person=m2, parent=m1) obj = m1.children.select_related("person__details")[0] self.assertEqual(obj.person.details.data, "d2") def test_order_by_resetting(self): # Calling order_by() with no parameters removes any existing ordering on the # model. But it should still be possible to add new ordering after that. qs = Author.objects.order_by().order_by("name") self.assertIn("ORDER BY", qs.query.get_compiler(qs.db).as_sql()[0]) def test_order_by_reverse_fk(self): # It is possible to order by reverse of foreign key, although that can lead # to duplicate results. c1 = SimpleCategory.objects.create(name="category1") c2 = SimpleCategory.objects.create(name="category2") CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c1) self.assertSequenceEqual( SimpleCategory.objects.order_by("categoryitem", "pk"), [c1, c2, c1] ) def test_filter_reverse_non_integer_pk(self): date_obj = DateTimePK.objects.create() extra_obj = ExtraInfo.objects.create(info="extra", date=date_obj) self.assertEqual( DateTimePK.objects.filter(extrainfo=extra_obj).get(), date_obj, ) def test_ticket10181(self): # Avoid raising an EmptyResultSet if an inner query is probably # empty (and hence, not executed). self.assertSequenceEqual( Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])), [] ) def test_ticket15316_filter_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create( name="named category1", special_name="special1" ) c3 = SpecialCategory.objects.create( name="named category2", special_name="special2" ) CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 2) self.assertCountEqual(qs, [ci2, ci3]) def test_ticket15316_exclude_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create( name="named category1", special_name="special1" ) c3 = SpecialCategory.objects.create( name="named category2", special_name="special2" ) ci1 = CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_filter_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create( name="named category1", special_name="special1" ) c3 = SpecialCategory.objects.create( name="named category2", special_name="special2" ) ci1 = CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_exclude_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create( name="named category1", special_name="special1" ) c3 = SpecialCategory.objects.create( name="named category2", special_name="special2" ) CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 2) self.assertCountEqual(qs, [ci2, ci3]) def test_ticket15316_one2one_filter_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter( category__onetoonecategory__isnull=False ).order_by("pk") self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_one2one_exclude_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) CategoryItem.objects.create(category=c0) CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_one2one_filter_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) CategoryItem.objects.create(category=c0) CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_one2one_exclude_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude( category__onetoonecategory__isnull=True ).order_by("pk") self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) class Queries5Tests(TestCase): @classmethod def setUpTestData(cls): # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the # Meta.ordering will be rank3, rank2, rank1. cls.n1 = Note.objects.create(note="n1", misc="foo", id=1) cls.n2 = Note.objects.create(note="n2", misc="bar", id=2) e1 = ExtraInfo.objects.create(info="e1", note=cls.n1) e2 = ExtraInfo.objects.create(info="e2", note=cls.n2) a1 = Author.objects.create(name="a1", num=1001, extra=e1) a2 = Author.objects.create(name="a2", num=2002, extra=e1) a3 = Author.objects.create(name="a3", num=3003, extra=e2) cls.rank2 = Ranking.objects.create(rank=2, author=a2) cls.rank1 = Ranking.objects.create(rank=1, author=a3) cls.rank3 = Ranking.objects.create(rank=3, author=a1) def test_ordering(self): # Cross model ordering is possible in Meta, too. self.assertSequenceEqual( Ranking.objects.all(), [self.rank3, self.rank2, self.rank1], ) self.assertSequenceEqual( Ranking.objects.order_by("rank"), [self.rank1, self.rank2, self.rank3], ) # Ordering of extra() pieces is possible, too and you can mix extra # fields and model fields in the ordering. self.assertSequenceEqual( Ranking.objects.extra( tables=["django_site"], order_by=["-django_site.id", "rank"] ), [self.rank1, self.rank2, self.rank3], ) sql = "case when %s > 2 then 1 else 0 end" % connection.ops.quote_name("rank") qs = Ranking.objects.extra(select={"good": sql}) self.assertEqual( [o.good for o in qs.extra(order_by=("-good",))], [True, False, False] ) self.assertSequenceEqual( qs.extra(order_by=("-good", "id")), [self.rank3, self.rank2, self.rank1], ) # Despite having some extra aliases in the query, we can still omit # them in a values() query. dicts = qs.values("id", "rank").order_by("id") self.assertEqual([d["rank"] for d in dicts], [2, 1, 3]) def test_ticket7256(self): # An empty values() call includes all aliases, including those from an # extra() sql = "case when %s > 2 then 1 else 0 end" % connection.ops.quote_name("rank") qs = Ranking.objects.extra(select={"good": sql}) dicts = qs.values().order_by("id") for d in dicts: del d["id"] del d["author_id"] self.assertEqual( [sorted(d.items()) for d in dicts], [ [("good", 0), ("rank", 2)], [("good", 0), ("rank", 1)], [("good", 1), ("rank", 3)], ], ) def test_ticket7045(self): # Extra tables used to crash SQL construction on the second use. qs = Ranking.objects.extra(tables=["django_site"]) qs.query.get_compiler(qs.db).as_sql() # test passes if this doesn't raise an exception. qs.query.get_compiler(qs.db).as_sql() def test_ticket9848(self): # Make sure that updates which only filter on sub-tables don't # inadvertently update the wrong records (bug #9848). author_start = Author.objects.get(name="a1") ranking_start = Ranking.objects.get(author__name="a1") # Make sure that the IDs from different tables don't happen to match. self.assertSequenceEqual( Ranking.objects.filter(author__name="a1"), [self.rank3], ) self.assertEqual(Ranking.objects.filter(author__name="a1").update(rank=4636), 1) r = Ranking.objects.get(author__name="a1") self.assertEqual(r.id, ranking_start.id) self.assertEqual(r.author.id, author_start.id) self.assertEqual(r.rank, 4636) r.rank = 3 r.save() self.assertSequenceEqual( Ranking.objects.all(), [self.rank3, self.rank2, self.rank1], ) def test_ticket5261(self): # Test different empty excludes. self.assertSequenceEqual( Note.objects.exclude(Q()), [self.n1, self.n2], ) self.assertSequenceEqual( Note.objects.filter(~Q()), [self.n1, self.n2], ) self.assertSequenceEqual( Note.objects.filter(~Q() | ~Q()), [self.n1, self.n2], ) self.assertSequenceEqual( Note.objects.exclude(~Q() & ~Q()), [self.n1, self.n2], ) self.assertSequenceEqual( Note.objects.exclude(~Q() ^ ~Q()), [self.n1, self.n2], ) def test_extra_select_literal_percent_s(self): # Allow %%s to escape select clauses self.assertEqual(Note.objects.extra(select={"foo": "'%%s'"})[0].foo, "%s") self.assertEqual( Note.objects.extra(select={"foo": "'%%s bar %%s'"})[0].foo, "%s bar %s" ) self.assertEqual( Note.objects.extra(select={"foo": "'bar %%s'"})[0].foo, "bar %s" ) def test_extra_select_alias_sql_injection(self): crafted_alias = """injected_name" from "queries_note"; --""" msg = ( "Column aliases cannot contain whitespace characters, quotation marks, " "semicolons, or SQL comments." ) with self.assertRaisesMessage(ValueError, msg): Note.objects.extra(select={crafted_alias: "1"}) def test_queryset_reuse(self): # Using querysets doesn't mutate aliases. authors = Author.objects.filter(Q(name="a1") | Q(name="nonexistent")) self.assertEqual(Ranking.objects.filter(author__in=authors).get(), self.rank3) self.assertEqual(authors.count(), 1) def test_filter_unsaved_object(self): # These tests will catch ValueError in Django 5.0 when passing unsaved # model instances to related filters becomes forbidden. # msg = "Model instances passed to related filters must be saved." msg = "Passing unsaved model instances to related filters is deprecated." company = Company.objects.create(name="Django") with self.assertWarnsMessage(RemovedInDjango50Warning, msg): Employment.objects.filter(employer=Company(name="unsaved")) with self.assertWarnsMessage(RemovedInDjango50Warning, msg): Employment.objects.filter(employer__in=[company, Company(name="unsaved")]) with self.assertWarnsMessage(RemovedInDjango50Warning, msg): StaffUser.objects.filter(staff=Staff(name="unsaved")) class SelectRelatedTests(TestCase): def test_tickets_3045_3288(self): # Once upon a time, select_related() with circular relations would loop # infinitely if you forgot to specify "depth". Now we set an arbitrary # default upper bound. self.assertSequenceEqual(X.objects.all(), []) self.assertSequenceEqual(X.objects.select_related(), []) class SubclassFKTests(TestCase): def test_ticket7778(self): # Model subclasses could not be deleted if a nullable foreign key # relates to a model that relates back. num_celebs = Celebrity.objects.count() tvc = TvChef.objects.create(name="Huey") self.assertEqual(Celebrity.objects.count(), num_celebs + 1) Fan.objects.create(fan_of=tvc) Fan.objects.create(fan_of=tvc) tvc.delete() # The parent object should have been deleted as well. self.assertEqual(Celebrity.objects.count(), num_celebs) class CustomPkTests(TestCase): def test_ticket7371(self): self.assertQuerySetEqual(Related.objects.order_by("custom"), []) class NullableRelOrderingTests(TestCase): def test_ticket10028(self): # Ordering by model related to nullable relations(!) should use outer # joins, so that all results are included. p1 = Plaything.objects.create(name="p1") self.assertSequenceEqual(Plaything.objects.all(), [p1]) def test_join_already_in_query(self): # Ordering by model related to nullable relations should not change # the join type of already existing joins. Plaything.objects.create(name="p1") s = SingleObject.objects.create(name="s") r = RelatedObject.objects.create(single=s, f=1) p2 = Plaything.objects.create(name="p2", others=r) qs = Plaything.objects.filter(others__isnull=False).order_by("pk") self.assertNotIn("JOIN", str(qs.query)) qs = Plaything.objects.filter(others__f__isnull=False).order_by("pk") self.assertIn("INNER", str(qs.query)) qs = qs.order_by("others__single__name") # The ordering by others__single__pk will add one new join (to single) # and that join must be LEFT join. The already existing join to related # objects must be kept INNER. So, we have both an INNER and a LEFT join # in the query. self.assertEqual(str(qs.query).count("LEFT"), 1) self.assertEqual(str(qs.query).count("INNER"), 1) self.assertSequenceEqual(qs, [p2]) class DisjunctiveFilterTests(TestCase): @classmethod def setUpTestData(cls): cls.n1 = Note.objects.create(note="n1", misc="foo", id=1) cls.e1 = ExtraInfo.objects.create(info="e1", note=cls.n1) def test_ticket7872(self): # Another variation on the disjunctive filtering theme. # For the purposes of this regression test, it's important that there is no # Join object related to the LeafA we create. l1 = LeafA.objects.create(data="first") self.assertSequenceEqual(LeafA.objects.all(), [l1]) self.assertSequenceEqual( LeafA.objects.filter(Q(data="first") | Q(join__b__data="second")), [l1], ) def test_ticket8283(self): # Checking that applying filters after a disjunction works correctly. self.assertSequenceEqual( ( ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info="e2") ).filter(note=self.n1), [self.e1], ) self.assertSequenceEqual( ( ExtraInfo.objects.filter(info="e2") | ExtraInfo.objects.filter(note=self.n1) ).filter(note=self.n1), [self.e1], ) class Queries6Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name="t1", category=generic) cls.t2 = Tag.objects.create(name="t2", parent=cls.t1, category=generic) cls.t3 = Tag.objects.create(name="t3", parent=cls.t1) cls.t4 = Tag.objects.create(name="t4", parent=cls.t3) cls.t5 = Tag.objects.create(name="t5", parent=cls.t3) n1 = Note.objects.create(note="n1", misc="foo", id=1) cls.ann1 = Annotation.objects.create(name="a1", tag=cls.t1) cls.ann1.notes.add(n1) cls.ann2 = Annotation.objects.create(name="a2", tag=cls.t4) def test_parallel_iterators(self): # Parallel iterators work. qs = Tag.objects.all() i1, i2 = iter(qs), iter(qs) self.assertEqual(repr(next(i1)), "<Tag: t1>") self.assertEqual(repr(next(i1)), "<Tag: t2>") self.assertEqual(repr(next(i2)), "<Tag: t1>") self.assertEqual(repr(next(i2)), "<Tag: t2>") self.assertEqual(repr(next(i2)), "<Tag: t3>") self.assertEqual(repr(next(i1)), "<Tag: t3>") qs = X.objects.all() self.assertFalse(qs) self.assertFalse(qs) def test_nested_queries_sql(self): # Nested queries should not evaluate the inner query as part of constructing the # SQL (so we should see a nested query here, indicated by two "SELECT" calls). qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy")) self.assertEqual(qs.query.get_compiler(qs.db).as_sql()[0].count("SELECT"), 2) def test_tickets_8921_9188(self): # Incorrect SQL was being generated for certain types of exclude() # queries that crossed multi-valued relations (#8921, #9188 and some # preemptively discovered cases). self.assertSequenceEqual( PointerA.objects.filter(connection__pointerb__id=1), [] ) self.assertSequenceEqual( PointerA.objects.exclude(connection__pointerb__id=1), [] ) self.assertSequenceEqual( Tag.objects.exclude(children=None), [self.t1, self.t3], ) # This example is tricky because the parent could be NULL, so only checking # parents with annotations omits some results (tag t1, in this case). self.assertSequenceEqual( Tag.objects.exclude(parent__annotation__name="a1"), [self.t1, self.t4, self.t5], ) # The annotation->tag link is single values and tag->children links is # multi-valued. So we have to split the exclude filter in the middle # and then optimize the inner query without losing results. self.assertSequenceEqual( Annotation.objects.exclude(tag__children__name="t2"), [self.ann2], ) # Nested queries are possible (although should be used with care, since # they have performance problems on backends like MySQL. self.assertSequenceEqual( Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")), [self.ann1], ) def test_ticket3739(self): # The all() method on querysets returns a copy of the queryset. q1 = Tag.objects.order_by("name") self.assertIsNot(q1, q1.all()) def test_ticket_11320(self): qs = Tag.objects.exclude(category=None).exclude(category__name="foo") self.assertEqual(str(qs.query).count(" INNER JOIN "), 1) def test_distinct_ordered_sliced_subquery_aggregation(self): self.assertEqual( Tag.objects.distinct().order_by("category__name")[:3].count(), 3 ) def test_multiple_columns_with_the_same_name_slice(self): self.assertEqual( list( Tag.objects.order_by("name").values_list("name", "category__name")[:2] ), [("t1", "Generic"), ("t2", "Generic")], ) self.assertSequenceEqual( Tag.objects.order_by("name").select_related("category")[:2], [self.t1, self.t2], ) self.assertEqual( list(Tag.objects.order_by("-name").values_list("name", "parent__name")[:2]), [("t5", "t3"), ("t4", "t3")], ) self.assertSequenceEqual( Tag.objects.order_by("-name").select_related("parent")[:2], [self.t5, self.t4], ) def test_col_alias_quoted(self): with CaptureQueriesContext(connection) as captured_queries: self.assertEqual( Tag.objects.values("parent") .annotate( tag_per_parent=Count("pk"), ) .aggregate(Max("tag_per_parent")), {"tag_per_parent__max": 2}, ) sql = captured_queries[0]["sql"] self.assertIn("AS %s" % connection.ops.quote_name("col1"), sql) def test_xor_subquery(self): self.assertSequenceEqual( Tag.objects.filter( Exists(Tag.objects.filter(id=OuterRef("id"), name="t3")) ^ Exists(Tag.objects.filter(id=OuterRef("id"), parent=self.t1)) ), [self.t2], ) class RawQueriesTests(TestCase): @classmethod def setUpTestData(cls): Note.objects.create(note="n1", misc="foo", id=1) def test_ticket14729(self): # Test representation of raw query with one or few parameters passed as list query = "SELECT * FROM queries_note WHERE note = %s" params = ["n1"] qs = Note.objects.raw(query, params=params) self.assertEqual( repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>" ) query = "SELECT * FROM queries_note WHERE note = %s and misc = %s" params = ["n1", "foo"] qs = Note.objects.raw(query, params=params) self.assertEqual( repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>", ) class GeneratorExpressionTests(SimpleTestCase): def test_ticket10432(self): # Using an empty iterator as the rvalue for an "__in" # lookup is legal. self.assertCountEqual(Note.objects.filter(pk__in=iter(())), []) class ComparisonTests(TestCase): @classmethod def setUpTestData(cls): cls.n1 = Note.objects.create(note="n1", misc="foo", id=1) e1 = ExtraInfo.objects.create(info="e1", note=cls.n1) cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1) def test_ticket8597(self): # Regression tests for case-insensitive comparisons item_ab = Item.objects.create( name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1 ) item_xy = Item.objects.create( name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1 ) self.assertSequenceEqual( Item.objects.filter(name__iexact="A_b"), [item_ab], ) self.assertSequenceEqual( Item.objects.filter(name__iexact="x%Y"), [item_xy], ) self.assertSequenceEqual( Item.objects.filter(name__istartswith="A_b"), [item_ab], ) self.assertSequenceEqual( Item.objects.filter(name__iendswith="A_b"), [item_ab], ) class ExistsSql(TestCase): def test_exists(self): with CaptureQueriesContext(connection) as captured_queries: self.assertFalse(Tag.objects.exists()) # Ok - so the exist query worked - but did it include too many columns? self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]["sql"] id, name = connection.ops.quote_name("id"), connection.ops.quote_name("name") self.assertNotIn(id, qstr) self.assertNotIn(name, qstr) def test_distinct_exists(self): with CaptureQueriesContext(connection) as captured_queries: self.assertIs(Article.objects.distinct().exists(), False) self.assertEqual(len(captured_queries), 1) captured_sql = captured_queries[0]["sql"] self.assertNotIn(connection.ops.quote_name("id"), captured_sql) self.assertNotIn(connection.ops.quote_name("name"), captured_sql) def test_sliced_distinct_exists(self): with CaptureQueriesContext(connection) as captured_queries: self.assertIs(Article.objects.distinct()[1:3].exists(), False) self.assertEqual(len(captured_queries), 1) captured_sql = captured_queries[0]["sql"] self.assertIn(connection.ops.quote_name("id"), captured_sql) self.assertIn(connection.ops.quote_name("name"), captured_sql) def test_ticket_18414(self): Article.objects.create(name="one", created=datetime.datetime.now()) Article.objects.create(name="one", created=datetime.datetime.now()) Article.objects.create(name="two", created=datetime.datetime.now()) self.assertTrue(Article.objects.exists()) self.assertTrue(Article.objects.distinct().exists()) self.assertTrue(Article.objects.distinct()[1:3].exists()) self.assertFalse(Article.objects.distinct()[1:1].exists()) @skipUnlessDBFeature("can_distinct_on_fields") def test_ticket_18414_distinct_on(self): Article.objects.create(name="one", created=datetime.datetime.now()) Article.objects.create(name="one", created=datetime.datetime.now()) Article.objects.create(name="two", created=datetime.datetime.now()) self.assertTrue(Article.objects.distinct("name").exists()) self.assertTrue(Article.objects.distinct("name")[1:2].exists()) self.assertFalse(Article.objects.distinct("name")[2:3].exists()) class QuerysetOrderedTests(unittest.TestCase): """ Tests for the Queryset.ordered attribute. """ def test_no_default_or_explicit_ordering(self): self.assertIs(Annotation.objects.all().ordered, False) def test_cleared_default_ordering(self): self.assertIs(Tag.objects.all().ordered, True) self.assertIs(Tag.objects.order_by().ordered, False) def test_explicit_ordering(self): self.assertIs(Annotation.objects.order_by("id").ordered, True) def test_empty_queryset(self): self.assertIs(Annotation.objects.none().ordered, True) def test_order_by_extra(self): self.assertIs(Annotation.objects.extra(order_by=["id"]).ordered, True) def test_annotated_ordering(self): qs = Annotation.objects.annotate(num_notes=Count("notes")) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by("num_notes").ordered, True) def test_annotated_default_ordering(self): qs = Tag.objects.annotate(num_notes=Count("pk")) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by("name").ordered, True) def test_annotated_values_default_ordering(self): qs = Tag.objects.values("name").annotate(num_notes=Count("pk")) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by("name").ordered, True) @skipUnlessDBFeature("allow_sliced_subqueries_with_in") class SubqueryTests(TestCase): @classmethod def setUpTestData(cls): NamedCategory.objects.create(id=1, name="first") NamedCategory.objects.create(id=2, name="second") NamedCategory.objects.create(id=3, name="third") NamedCategory.objects.create(id=4, name="fourth") def test_ordered_subselect(self): "Subselects honor any manual ordering" query = DumbCategory.objects.filter( id__in=DumbCategory.objects.order_by("-id")[0:2] ) self.assertEqual(set(query.values_list("id", flat=True)), {3, 4}) query = DumbCategory.objects.filter( id__in=DumbCategory.objects.order_by("-id")[:2] ) self.assertEqual(set(query.values_list("id", flat=True)), {3, 4}) query = DumbCategory.objects.filter( id__in=DumbCategory.objects.order_by("-id")[1:2] ) self.assertEqual(set(query.values_list("id", flat=True)), {3}) query = DumbCategory.objects.filter( id__in=DumbCategory.objects.order_by("-id")[2:] ) self.assertEqual(set(query.values_list("id", flat=True)), {1, 2}) def test_slice_subquery_and_query(self): """ Slice a query that has a sliced subquery """ query = DumbCategory.objects.filter( id__in=DumbCategory.objects.order_by("-id")[0:2] )[0:2] self.assertEqual({x.id for x in query}, {3, 4}) query = DumbCategory.objects.filter( id__in=DumbCategory.objects.order_by("-id")[1:3] )[1:3] self.assertEqual({x.id for x in query}, {3}) query = DumbCategory.objects.filter( id__in=DumbCategory.objects.order_by("-id")[2:] )[1:] self.assertEqual({x.id for x in query}, {2}) def test_related_sliced_subquery(self): """ Related objects constraints can safely contain sliced subqueries. refs #22434 """ generic = NamedCategory.objects.create(id=5, name="Generic") t1 = Tag.objects.create(name="t1", category=generic) t2 = Tag.objects.create(name="t2", category=generic) ManagedModel.objects.create(data="mm1", tag=t1, public=True) mm2 = ManagedModel.objects.create(data="mm2", tag=t2, public=True) query = ManagedModel.normal_manager.filter( tag__in=Tag.objects.order_by("-id")[:1] ) self.assertEqual({x.id for x in query}, {mm2.id}) def test_sliced_delete(self): "Delete queries can safely contain sliced subqueries" DumbCategory.objects.filter( id__in=DumbCategory.objects.order_by("-id")[0:1] ).delete() self.assertEqual( set(DumbCategory.objects.values_list("id", flat=True)), {1, 2, 3} ) DumbCategory.objects.filter( id__in=DumbCategory.objects.order_by("-id")[1:2] ).delete() self.assertEqual(set(DumbCategory.objects.values_list("id", flat=True)), {1, 3}) DumbCategory.objects.filter( id__in=DumbCategory.objects.order_by("-id")[1:] ).delete() self.assertEqual(set(DumbCategory.objects.values_list("id", flat=True)), {3}) def test_distinct_ordered_sliced_subquery(self): # Implicit values('id'). self.assertSequenceEqual( NamedCategory.objects.filter( id__in=NamedCategory.objects.distinct().order_by("name")[0:2], ) .order_by("name") .values_list("name", flat=True), ["first", "fourth"], ) # Explicit values('id'). self.assertSequenceEqual( NamedCategory.objects.filter( id__in=NamedCategory.objects.distinct() .order_by("-name") .values("id")[0:2], ) .order_by("name") .values_list("name", flat=True), ["second", "third"], ) # Annotated value. self.assertSequenceEqual( DumbCategory.objects.filter( id__in=DumbCategory.objects.annotate(double_id=F("id") * 2) .order_by("id") .distinct() .values("double_id")[0:2], ) .order_by("id") .values_list("id", flat=True), [2, 4], ) class QuerySetBitwiseOperationTests(TestCase): @classmethod def setUpTestData(cls): cls.school = School.objects.create() cls.room_1 = Classroom.objects.create( school=cls.school, has_blackboard=False, name="Room 1" ) cls.room_2 = Classroom.objects.create( school=cls.school, has_blackboard=True, name="Room 2" ) cls.room_3 = Classroom.objects.create( school=cls.school, has_blackboard=True, name="Room 3" ) cls.room_4 = Classroom.objects.create( school=cls.school, has_blackboard=False, name="Room 4" ) tag = Tag.objects.create() cls.annotation_1 = Annotation.objects.create(tag=tag) annotation_2 = Annotation.objects.create(tag=tag) note = cls.annotation_1.notes.create(tag=tag) cls.base_user_1 = BaseUser.objects.create(annotation=cls.annotation_1) cls.base_user_2 = BaseUser.objects.create(annotation=annotation_2) cls.task = Task.objects.create( owner=cls.base_user_2, creator=cls.base_user_2, note=note, ) @skipUnlessDBFeature("allow_sliced_subqueries_with_in") def test_or_with_rhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True) qs2 = Classroom.objects.filter(has_blackboard=False)[:1] self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3]) @skipUnlessDBFeature("allow_sliced_subqueries_with_in") def test_or_with_lhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True)[:1] qs2 = Classroom.objects.filter(has_blackboard=False) self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4]) @skipUnlessDBFeature("allow_sliced_subqueries_with_in") def test_or_with_both_slice(self): qs1 = Classroom.objects.filter(has_blackboard=False)[:1] qs2 = Classroom.objects.filter(has_blackboard=True)[:1] self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2]) @skipUnlessDBFeature("allow_sliced_subqueries_with_in") def test_or_with_both_slice_and_ordering(self): qs1 = Classroom.objects.filter(has_blackboard=False).order_by("-pk")[:1] qs2 = Classroom.objects.filter(has_blackboard=True).order_by("-name")[:1] self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4]) @skipUnlessDBFeature("allow_sliced_subqueries_with_in") def test_xor_with_rhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True) qs2 = Classroom.objects.filter(has_blackboard=False)[:1] self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2, self.room_3]) @skipUnlessDBFeature("allow_sliced_subqueries_with_in") def test_xor_with_lhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True)[:1] qs2 = Classroom.objects.filter(has_blackboard=False) self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2, self.room_4]) @skipUnlessDBFeature("allow_sliced_subqueries_with_in") def test_xor_with_both_slice(self): qs1 = Classroom.objects.filter(has_blackboard=False)[:1] qs2 = Classroom.objects.filter(has_blackboard=True)[:1] self.assertCountEqual(qs1 ^ qs2, [self.room_1, self.room_2]) @skipUnlessDBFeature("allow_sliced_subqueries_with_in") def test_xor_with_both_slice_and_ordering(self): qs1 = Classroom.objects.filter(has_blackboard=False).order_by("-pk")[:1] qs2 = Classroom.objects.filter(has_blackboard=True).order_by("-name")[:1] self.assertCountEqual(qs1 ^ qs2, [self.room_3, self.room_4]) def test_subquery_aliases(self): combined = School.objects.filter(pk__isnull=False) & School.objects.filter( Exists( Classroom.objects.filter( has_blackboard=True, school=OuterRef("pk"), ) ), ) self.assertSequenceEqual(combined, [self.school]) nested_combined = School.objects.filter(pk__in=combined.values("pk")) self.assertSequenceEqual(nested_combined, [self.school]) def test_conflicting_aliases_during_combine(self): qs1 = self.annotation_1.baseuser_set.all() qs2 = BaseUser.objects.filter( Q(owner__note__in=self.annotation_1.notes.all()) | Q(creator__note__in=self.annotation_1.notes.all()) ) self.assertSequenceEqual(qs1, [self.base_user_1]) self.assertSequenceEqual(qs2, [self.base_user_2]) self.assertCountEqual(qs2 | qs1, qs1 | qs2) self.assertCountEqual(qs2 | qs1, [self.base_user_1, self.base_user_2]) class CloneTests(TestCase): def test_evaluated_queryset_as_argument(self): """ If a queryset is already evaluated, it can still be used as a query arg. """ n = Note(note="Test1", misc="misc") n.save() e = ExtraInfo(info="good", note=n) e.save() n_list = Note.objects.all() # Evaluate the Note queryset, populating the query cache list(n_list) # Make one of cached results unpickable. n_list._result_cache[0].lock = Lock() with self.assertRaises(TypeError): pickle.dumps(n_list) # Use the note queryset in a query, and evaluate # that query in a way that involves cloning. self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, "good") def test_no_model_options_cloning(self): """ Cloning a queryset does not get out of hand. While complete testing is impossible, this is a sanity check against invalid use of deepcopy. refs #16759. """ opts_class = type(Note._meta) note_deepcopy = getattr(opts_class, "__deepcopy__", None) opts_class.__deepcopy__ = lambda obj, memo: self.fail( "Model options shouldn't be cloned." ) try: Note.objects.filter(pk__lte=F("pk") + 1).all() finally: if note_deepcopy is None: delattr(opts_class, "__deepcopy__") else: opts_class.__deepcopy__ = note_deepcopy def test_no_fields_cloning(self): """ Cloning a queryset does not get out of hand. While complete testing is impossible, this is a sanity check against invalid use of deepcopy. refs #16759. """ opts_class = type(Note._meta.get_field("misc")) note_deepcopy = getattr(opts_class, "__deepcopy__", None) opts_class.__deepcopy__ = lambda obj, memo: self.fail( "Model fields shouldn't be cloned" ) try: Note.objects.filter(note=F("misc")).all() finally: if note_deepcopy is None: delattr(opts_class, "__deepcopy__") else: opts_class.__deepcopy__ = note_deepcopy class EmptyQuerySetTests(SimpleTestCase): def test_emptyqueryset_values(self): # #14366 -- Calling .values() on an empty QuerySet and then cloning # that should not cause an error self.assertCountEqual(Number.objects.none().values("num").order_by("num"), []) def test_values_subquery(self): self.assertCountEqual( Number.objects.filter(pk__in=Number.objects.none().values("pk")), [] ) self.assertCountEqual( Number.objects.filter(pk__in=Number.objects.none().values_list("pk")), [] ) def test_ticket_19151(self): # #19151 -- Calling .values() or .values_list() on an empty QuerySet # should return an empty QuerySet and not cause an error. q = Author.objects.none() self.assertCountEqual(q.values(), []) self.assertCountEqual(q.values_list(), []) class ValuesQuerysetTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=72) def test_flat_values_list(self): qs = Number.objects.values_list("num") qs = qs.values_list("num", flat=True) self.assertSequenceEqual(qs, [72]) def test_extra_values(self): # testing for ticket 14930 issues qs = Number.objects.extra( select={"value_plus_x": "num+%s", "value_minus_x": "num-%s"}, select_params=(1, 2), ) qs = qs.order_by("value_minus_x") qs = qs.values("num") self.assertSequenceEqual(qs, [{"num": 72}]) def test_extra_values_order_twice(self): # testing for ticket 14930 issues qs = Number.objects.extra( select={"value_plus_one": "num+1", "value_minus_one": "num-1"} ) qs = qs.order_by("value_minus_one").order_by("value_plus_one") qs = qs.values("num") self.assertSequenceEqual(qs, [{"num": 72}]) def test_extra_values_order_multiple(self): # Postgres doesn't allow constants in order by, so check for that. qs = Number.objects.extra( select={ "value_plus_one": "num+1", "value_minus_one": "num-1", "constant_value": "1", } ) qs = qs.order_by("value_plus_one", "value_minus_one", "constant_value") qs = qs.values("num") self.assertSequenceEqual(qs, [{"num": 72}]) def test_extra_values_order_in_extra(self): # testing for ticket 14930 issues qs = Number.objects.extra( select={"value_plus_one": "num+1", "value_minus_one": "num-1"}, order_by=["value_minus_one"], ) qs = qs.values("num") def test_extra_select_params_values_order_in_extra(self): # testing for 23259 issue qs = Number.objects.extra( select={"value_plus_x": "num+%s"}, select_params=[1], order_by=["value_plus_x"], ) qs = qs.filter(num=72) qs = qs.values("num") self.assertSequenceEqual(qs, [{"num": 72}]) def test_extra_multiple_select_params_values_order_by(self): # testing for 23259 issue qs = Number.objects.extra( select={"value_plus_x": "num+%s", "value_minus_x": "num-%s"}, select_params=(72, 72), ) qs = qs.order_by("value_minus_x") qs = qs.filter(num=1) qs = qs.values("num") self.assertSequenceEqual(qs, []) def test_extra_values_list(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={"value_plus_one": "num+1"}) qs = qs.order_by("value_plus_one") qs = qs.values_list("num") self.assertSequenceEqual(qs, [(72,)]) def test_flat_extra_values_list(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={"value_plus_one": "num+1"}) qs = qs.order_by("value_plus_one") qs = qs.values_list("num", flat=True) self.assertSequenceEqual(qs, [72]) def test_field_error_values_list(self): # see #23443 msg = ( "Cannot resolve keyword %r into field. Join on 'name' not permitted." % "foo" ) with self.assertRaisesMessage(FieldError, msg): Tag.objects.values_list("name__foo") def test_named_values_list_flat(self): msg = "'flat' and 'named' can't be used together." with self.assertRaisesMessage(TypeError, msg): Number.objects.values_list("num", flat=True, named=True) def test_named_values_list_bad_field_name(self): msg = "Type names and field names must be valid identifiers: '1'" with self.assertRaisesMessage(ValueError, msg): Number.objects.extra(select={"1": "num+1"}).values_list( "1", named=True ).first() def test_named_values_list_with_fields(self): qs = Number.objects.extra(select={"num2": "num+1"}).annotate(Count("id")) values = qs.values_list("num", "num2", named=True).first() self.assertEqual(type(values).__name__, "Row") self.assertEqual(values._fields, ("num", "num2")) self.assertEqual(values.num, 72) self.assertEqual(values.num2, 73) def test_named_values_list_without_fields(self): qs = Number.objects.extra(select={"num2": "num+1"}).annotate(Count("id")) values = qs.values_list(named=True).first() self.assertEqual(type(values).__name__, "Row") self.assertEqual( values._fields, ("num2", "id", "num", "other_num", "another_num", "id__count"), ) self.assertEqual(values.num, 72) self.assertEqual(values.num2, 73) self.assertEqual(values.id__count, 1) def test_named_values_list_expression_with_default_alias(self): expr = Count("id") values = ( Number.objects.annotate(id__count1=expr) .values_list(expr, "id__count1", named=True) .first() ) self.assertEqual(values._fields, ("id__count2", "id__count1")) def test_named_values_list_expression(self): expr = F("num") + 1 qs = Number.objects.annotate(combinedexpression1=expr).values_list( expr, "combinedexpression1", named=True ) values = qs.first() self.assertEqual(values._fields, ("combinedexpression2", "combinedexpression1")) def test_named_values_pickle(self): value = Number.objects.values_list("num", "other_num", named=True).get() self.assertEqual(value, (72, None)) self.assertEqual(pickle.loads(pickle.dumps(value)), value) class QuerySetSupportsPythonIdioms(TestCase): @classmethod def setUpTestData(cls): some_date = datetime.datetime(2014, 5, 16, 12, 1) cls.articles = [ Article.objects.create(name=f"Article {i}", created=some_date) for i in range(1, 8) ] def get_ordered_articles(self): return Article.objects.order_by("name") def test_can_get_items_using_index_and_slice_notation(self): self.assertEqual(self.get_ordered_articles()[0].name, "Article 1") self.assertSequenceEqual( self.get_ordered_articles()[1:3], [self.articles[1], self.articles[2]], ) def test_slicing_with_steps_can_be_used(self): self.assertSequenceEqual( self.get_ordered_articles()[::2], [ self.articles[0], self.articles[2], self.articles[4], self.articles[6], ], ) def test_slicing_without_step_is_lazy(self): with self.assertNumQueries(0): self.get_ordered_articles()[0:5] def test_slicing_with_tests_is_not_lazy(self): with self.assertNumQueries(1): self.get_ordered_articles()[0:5:3] def test_slicing_can_slice_again_after_slicing(self): self.assertSequenceEqual( self.get_ordered_articles()[0:5][0:2], [self.articles[0], self.articles[1]], ) self.assertSequenceEqual( self.get_ordered_articles()[0:5][4:], [self.articles[4]] ) self.assertSequenceEqual(self.get_ordered_articles()[0:5][5:], []) # Some more tests! self.assertSequenceEqual( self.get_ordered_articles()[2:][0:2], [self.articles[2], self.articles[3]], ) self.assertSequenceEqual( self.get_ordered_articles()[2:][:2], [self.articles[2], self.articles[3]], ) self.assertSequenceEqual( self.get_ordered_articles()[2:][2:3], [self.articles[4]] ) # Using an offset without a limit is also possible. self.assertSequenceEqual( self.get_ordered_articles()[5:], [self.articles[5], self.articles[6]], ) def test_slicing_cannot_filter_queryset_once_sliced(self): msg = "Cannot filter a query once a slice has been taken." with self.assertRaisesMessage(TypeError, msg): Article.objects.all()[0:5].filter(id=1) def test_slicing_cannot_reorder_queryset_once_sliced(self): msg = "Cannot reorder a query once a slice has been taken." with self.assertRaisesMessage(TypeError, msg): Article.objects.all()[0:5].order_by("id") def test_slicing_cannot_combine_queries_once_sliced(self): msg = "Cannot combine queries once a slice has been taken." with self.assertRaisesMessage(TypeError, msg): Article.objects.all()[0:1] & Article.objects.all()[4:5] def test_slicing_negative_indexing_not_supported_for_single_element(self): """hint: inverting your ordering might do what you need""" msg = "Negative indexing is not supported." with self.assertRaisesMessage(ValueError, msg): Article.objects.all()[-1] def test_slicing_negative_indexing_not_supported_for_range(self): """hint: inverting your ordering might do what you need""" msg = "Negative indexing is not supported." with self.assertRaisesMessage(ValueError, msg): Article.objects.all()[0:-5] with self.assertRaisesMessage(ValueError, msg): Article.objects.all()[-1:] def test_invalid_index(self): msg = "QuerySet indices must be integers or slices, not str." with self.assertRaisesMessage(TypeError, msg): Article.objects.all()["foo"] def test_can_get_number_of_items_in_queryset_using_standard_len(self): self.assertEqual(len(Article.objects.filter(name__exact="Article 1")), 1) def test_can_combine_queries_using_and_and_or_operators(self): s1 = Article.objects.filter(name__exact="Article 1") s2 = Article.objects.filter(name__exact="Article 2") self.assertSequenceEqual( (s1 | s2).order_by("name"), [self.articles[0], self.articles[1]], ) self.assertSequenceEqual(s1 & s2, []) class WeirdQuerysetSlicingTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=1) Number.objects.create(num=2) Article.objects.create(name="one", created=datetime.datetime.now()) Article.objects.create(name="two", created=datetime.datetime.now()) Article.objects.create(name="three", created=datetime.datetime.now()) Article.objects.create(name="four", created=datetime.datetime.now()) food = Food.objects.create(name="spam") Eaten.objects.create(meal="spam with eggs", food=food) def test_tickets_7698_10202(self): # People like to slice with '0' as the high-water mark. self.assertSequenceEqual(Article.objects.all()[0:0], []) self.assertSequenceEqual(Article.objects.all()[0:0][:10], []) self.assertEqual(Article.objects.all()[:0].count(), 0) msg = "Cannot change a query once a slice has been taken." with self.assertRaisesMessage(TypeError, msg): Article.objects.all()[:0].latest("created") def test_empty_resultset_sql(self): # ticket #12192 self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1])) def test_empty_sliced_subquery(self): self.assertEqual( Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0 ) def test_empty_sliced_subquery_exclude(self): self.assertEqual( Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1 ) def test_zero_length_values_slicing(self): n = 42 with self.assertNumQueries(0): self.assertQuerySetEqual(Article.objects.values()[n:n], []) self.assertQuerySetEqual(Article.objects.values_list()[n:n], []) class EscapingTests(TestCase): def test_ticket_7302(self): # Reserved names are appropriately escaped r_a = ReservedName.objects.create(name="a", order=42) r_b = ReservedName.objects.create(name="b", order=37) self.assertSequenceEqual( ReservedName.objects.order_by("order"), [r_b, r_a], ) self.assertSequenceEqual( ReservedName.objects.extra( select={"stuff": "name"}, order_by=("order", "stuff") ), [r_b, r_a], ) class ToFieldTests(TestCase): def test_in_query(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Eaten.objects.filter(food__in=[apple, pear])), {lunch, dinner}, ) def test_in_subquery(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") self.assertEqual( set(Eaten.objects.filter(food__in=Food.objects.filter(name="apple"))), {lunch}, ) self.assertEqual( set( Eaten.objects.filter( food__in=Food.objects.filter(name="apple").values("eaten__meal") ) ), set(), ) self.assertEqual( set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal="lunch"))), {apple}, ) def test_nested_in_subquery(self): extra = ExtraInfo.objects.create() author = Author.objects.create(num=42, extra=extra) report = Report.objects.create(creator=author) comment = ReportComment.objects.create(report=report) comments = ReportComment.objects.filter( report__in=Report.objects.filter( creator__in=extra.author_set.all(), ), ) self.assertSequenceEqual(comments, [comment]) def test_reverse_in(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch_apple = Eaten.objects.create(food=apple, meal="lunch") lunch_pear = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])), {apple, pear} ) def test_single_object(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=apple, meal="dinner") self.assertEqual(set(Eaten.objects.filter(food=apple)), {lunch, dinner}) def test_single_object_reverse(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") self.assertEqual(set(Food.objects.filter(eaten=lunch)), {apple}) def test_recursive_fk(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual(list(Node.objects.filter(parent=node1)), [node2]) def test_recursive_fk_reverse(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual(list(Node.objects.filter(node=node2)), [node1]) class IsNullTests(TestCase): def test_primary_key(self): custom = CustomPk.objects.create(name="pk") null = Related.objects.create() notnull = Related.objects.create(custom=custom) self.assertSequenceEqual( Related.objects.filter(custom__isnull=False), [notnull] ) self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null]) def test_to_field(self): apple = Food.objects.create(name="apple") e1 = Eaten.objects.create(food=apple, meal="lunch") e2 = Eaten.objects.create(meal="lunch") self.assertSequenceEqual( Eaten.objects.filter(food__isnull=False), [e1], ) self.assertSequenceEqual( Eaten.objects.filter(food__isnull=True), [e2], ) class ConditionalTests(TestCase): """Tests whose execution depend on different environment conditions like Python version or DB backend features""" @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") t1 = Tag.objects.create(name="t1", category=generic) Tag.objects.create(name="t2", parent=t1, category=generic) t3 = Tag.objects.create(name="t3", parent=t1) Tag.objects.create(name="t4", parent=t3) Tag.objects.create(name="t5", parent=t3) def test_infinite_loop(self): # If you're not careful, it's possible to introduce infinite loops via # default ordering on foreign keys in a cycle. We detect that. with self.assertRaisesMessage(FieldError, "Infinite loop caused by ordering."): list(LoopX.objects.all()) # Force queryset evaluation with list() with self.assertRaisesMessage(FieldError, "Infinite loop caused by ordering."): list(LoopZ.objects.all()) # Force queryset evaluation with list() # Note that this doesn't cause an infinite loop, since the default # ordering on the Tag model is empty (and thus defaults to using "id" # for the related field). self.assertEqual(len(Tag.objects.order_by("parent")), 5) # ... but you can still order in a non-recursive fashion among linked # fields (the previous test failed because the default ordering was # recursive). self.assertSequenceEqual(LoopX.objects.order_by("y__x__y__x__id"), []) # When grouping without specifying ordering, we add an explicit "ORDER BY NULL" # portion in MySQL to prevent unnecessary sorting. @skipUnlessDBFeature("requires_explicit_null_ordering_when_grouping") def test_null_ordering_added(self): query = Tag.objects.values_list("parent_id", flat=True).order_by().query query.group_by = ["parent_id"] sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0] fragment = "ORDER BY " pos = sql.find(fragment) self.assertEqual(sql.find(fragment, pos + 1), -1) self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment)) def test_in_list_limit(self): # The "in" lookup works with lists of 1000 items or more. # The numbers amount is picked to force three different IN batches # for Oracle, yet to be less than 2100 parameter limit for MSSQL. numbers = list(range(2050)) max_query_params = connection.features.max_query_params if max_query_params is None or max_query_params >= len(numbers): Number.objects.bulk_create(Number(num=num) for num in numbers) for number in [1000, 1001, 2000, len(numbers)]: with self.subTest(number=number): self.assertEqual( Number.objects.filter(num__in=numbers[:number]).count(), number ) class UnionTests(unittest.TestCase): """ Tests for the union of two querysets. Bug #12252. """ @classmethod def setUpTestData(cls): objectas = [] objectbs = [] objectcs = [] a_info = ["one", "two", "three"] for name in a_info: o = ObjectA(name=name) o.save() objectas.append(o) b_info = [ ("un", 1, objectas[0]), ("deux", 2, objectas[0]), ("trois", 3, objectas[2]), ] for name, number, objecta in b_info: o = ObjectB(name=name, num=number, objecta=objecta) o.save() objectbs.append(o) c_info = [("ein", objectas[2], objectbs[2]), ("zwei", objectas[1], objectbs[1])] for name, objecta, objectb in c_info: o = ObjectC(name=name, objecta=objecta, objectb=objectb) o.save() objectcs.append(o) def check_union(self, model, Q1, Q2): filter = model.objects.filter self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2))) self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2))) def test_A_AB(self): Q1 = Q(name="two") Q2 = Q(objectb__name="deux") self.check_union(ObjectA, Q1, Q2) def test_A_AB2(self): Q1 = Q(name="two") Q2 = Q(objectb__name="deux", objectb__num=2) self.check_union(ObjectA, Q1, Q2) def test_AB_ACB(self): Q1 = Q(objectb__name="deux") Q2 = Q(objectc__objectb__name="deux") self.check_union(ObjectA, Q1, Q2) def test_BAB_BAC(self): Q1 = Q(objecta__objectb__name="deux") Q2 = Q(objecta__objectc__name="ein") self.check_union(ObjectB, Q1, Q2) def test_BAB_BACB(self): Q1 = Q(objecta__objectb__name="deux") Q2 = Q(objecta__objectc__objectb__name="trois") self.check_union(ObjectB, Q1, Q2) def test_BA_BCA__BAB_BAC_BCA(self): Q1 = Q(objecta__name="one", objectc__objecta__name="two") Q2 = Q( objecta__objectc__name="ein", objectc__objecta__name="three", objecta__objectb__name="trois", ) self.check_union(ObjectB, Q1, Q2) class DefaultValuesInsertTest(TestCase): def test_no_extra_params(self): """ Can create an instance of a model with only the PK field (#17056)." """ DumbCategory.objects.create() class ExcludeTests(TestCase): @classmethod def setUpTestData(cls): f1 = Food.objects.create(name="apples") cls.f2 = Food.objects.create(name="oranges") Eaten.objects.create(food=f1, meal="dinner") cls.j1 = Job.objects.create(name="Manager") cls.r1 = Responsibility.objects.create(description="Playing golf") cls.j2 = Job.objects.create(name="Programmer") cls.r2 = Responsibility.objects.create(description="Programming") JobResponsibilities.objects.create(job=cls.j1, responsibility=cls.r1) JobResponsibilities.objects.create(job=cls.j2, responsibility=cls.r2) def test_to_field(self): self.assertSequenceEqual( Food.objects.exclude(eaten__meal="dinner"), [self.f2], ) self.assertSequenceEqual( Job.objects.exclude(responsibilities__description="Playing golf"), [self.j2], ) self.assertSequenceEqual( Responsibility.objects.exclude(jobs__name="Manager"), [self.r2], ) def test_ticket14511(self): alex = Person.objects.get_or_create(name="Alex")[0] jane = Person.objects.get_or_create(name="Jane")[0] oracle = Company.objects.get_or_create(name="Oracle")[0] google = Company.objects.get_or_create(name="Google")[0] microsoft = Company.objects.get_or_create(name="Microsoft")[0] intel = Company.objects.get_or_create(name="Intel")[0] def employ(employer, employee, title): Employment.objects.get_or_create( employee=employee, employer=employer, title=title ) employ(oracle, alex, "Engineer") employ(oracle, alex, "Developer") employ(google, alex, "Engineer") employ(google, alex, "Manager") employ(microsoft, alex, "Manager") employ(intel, alex, "Manager") employ(microsoft, jane, "Developer") employ(intel, jane, "Manager") alex_tech_employers = ( alex.employers.filter(employment__title__in=("Engineer", "Developer")) .distinct() .order_by("name") ) self.assertSequenceEqual(alex_tech_employers, [google, oracle]) alex_nontech_employers = ( alex.employers.exclude(employment__title__in=("Engineer", "Developer")) .distinct() .order_by("name") ) self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft]) def test_exclude_reverse_fk_field_ref(self): tag = Tag.objects.create() Note.objects.create(tag=tag, note="note") annotation = Annotation.objects.create(name="annotation", tag=tag) self.assertEqual( Annotation.objects.exclude(tag__note__note=F("name")).get(), annotation ) def test_exclude_with_circular_fk_relation(self): self.assertEqual( ObjectB.objects.exclude(objecta__objectb__name=F("name")).count(), 0 ) def test_subquery_exclude_outerref(self): qs = JobResponsibilities.objects.filter( Exists(Responsibility.objects.exclude(jobs=OuterRef("job"))), ) self.assertTrue(qs.exists()) self.r1.delete() self.assertFalse(qs.exists()) def test_exclude_nullable_fields(self): number = Number.objects.create(num=1, other_num=1) Number.objects.create(num=2, other_num=2, another_num=2) self.assertSequenceEqual( Number.objects.exclude(other_num=F("another_num")), [number], ) self.assertSequenceEqual( Number.objects.exclude(num=F("another_num")), [number], ) def test_exclude_multivalued_exists(self): with CaptureQueriesContext(connection) as captured_queries: self.assertSequenceEqual( Job.objects.exclude(responsibilities__description="Programming"), [self.j1], ) self.assertIn("exists", captured_queries[0]["sql"].lower()) def test_exclude_subquery(self): subquery = JobResponsibilities.objects.filter( responsibility__description="bar", ) | JobResponsibilities.objects.exclude( job__responsibilities__description="foo", ) self.assertCountEqual( Job.objects.annotate( responsibility=subquery.filter(job=OuterRef("name"),).values( "id" )[:1] ), [self.j1, self.j2], ) @ignore_warnings(category=RemovedInDjango50Warning) def test_exclude_unsaved_o2o_object(self): jack = Staff.objects.create(name="jack") jack_staff = StaffUser.objects.create(staff=jack) unsaved_object = Staff(name="jane") self.assertIsNone(unsaved_object.pk) self.assertSequenceEqual( StaffUser.objects.exclude(staff=unsaved_object), [jack_staff] ) def test_exclude_unsaved_object(self): # These tests will catch ValueError in Django 5.0 when passing unsaved # model instances to related filters becomes forbidden. # msg = "Model instances passed to related filters must be saved." company = Company.objects.create(name="Django") msg = "Passing unsaved model instances to related filters is deprecated." with self.assertWarnsMessage(RemovedInDjango50Warning, msg): Employment.objects.exclude(employer=Company(name="unsaved")) with self.assertWarnsMessage(RemovedInDjango50Warning, msg): Employment.objects.exclude(employer__in=[company, Company(name="unsaved")]) with self.assertWarnsMessage(RemovedInDjango50Warning, msg): StaffUser.objects.exclude(staff=Staff(name="unsaved")) class ExcludeTest17600(TestCase): """ Some regressiontests for ticket #17600. Some of these likely duplicate other existing tests. """ @classmethod def setUpTestData(cls): # Create a few Orders. cls.o1 = Order.objects.create(pk=1) cls.o2 = Order.objects.create(pk=2) cls.o3 = Order.objects.create(pk=3) # Create some OrderItems for the first order with homogeneous # status_id values cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1) cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1) cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1) # Create some OrderItems for the second order with heterogeneous # status_id values cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1) cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2) cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3) # Create some OrderItems for the second order with heterogeneous # status_id values cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2) cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3) cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4) def test_exclude_plain(self): """ This should exclude Orders which have some items with status 1 """ self.assertSequenceEqual( Order.objects.exclude(items__status=1), [self.o3], ) def test_exclude_plain_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertSequenceEqual( Order.objects.exclude(items__status=1).distinct(), [self.o3], ) def test_exclude_with_q_object_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertSequenceEqual( Order.objects.exclude(Q(items__status=1)).distinct(), [self.o3], ) def test_exclude_with_q_object_no_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertSequenceEqual( Order.objects.exclude(Q(items__status=1)), [self.o3], ) def test_exclude_with_q_is_equal_to_plain_exclude(self): """ Using exclude(condition) and exclude(Q(condition)) should yield the same QuerySet """ self.assertEqual( list(Order.objects.exclude(items__status=1).distinct()), list(Order.objects.exclude(Q(items__status=1)).distinct()), ) def test_exclude_with_q_is_equal_to_plain_exclude_variation(self): """ Using exclude(condition) and exclude(Q(condition)) should yield the same QuerySet """ self.assertEqual( list(Order.objects.exclude(items__status=1)), list(Order.objects.exclude(Q(items__status=1)).distinct()), ) @unittest.expectedFailure def test_only_orders_with_all_items_having_status_1(self): """ This should only return orders having ALL items set to status 1, or those items not having any orders at all. The correct way to write this query in SQL seems to be using two nested subqueries. """ self.assertSequenceEqual( Order.objects.exclude(~Q(items__status=1)).distinct(), [self.o1], ) class Exclude15786(TestCase): """Regression test for #15786""" def test_ticket15786(self): c1 = SimpleCategory.objects.create(name="c1") c2 = SimpleCategory.objects.create(name="c2") OneToOneCategory.objects.create(category=c1) OneToOneCategory.objects.create(category=c2) rel = CategoryRelationship.objects.create(first=c1, second=c2) self.assertEqual( CategoryRelationship.objects.exclude( first__onetoonecategory=F("second__onetoonecategory") ).get(), rel, ) class NullInExcludeTest(TestCase): @classmethod def setUpTestData(cls): NullableName.objects.create(name="i1") NullableName.objects.create() def test_null_in_exclude_qs(self): none_val = "" if connection.features.interprets_empty_strings_as_nulls else None self.assertQuerySetEqual( NullableName.objects.exclude(name__in=[]), ["i1", none_val], attrgetter("name"), ) self.assertQuerySetEqual( NullableName.objects.exclude(name__in=["i1"]), [none_val], attrgetter("name"), ) self.assertQuerySetEqual( NullableName.objects.exclude(name__in=["i3"]), ["i1", none_val], attrgetter("name"), ) inner_qs = NullableName.objects.filter(name="i1").values_list("name") self.assertQuerySetEqual( NullableName.objects.exclude(name__in=inner_qs), [none_val], attrgetter("name"), ) # The inner queryset wasn't executed - it should be turned # into subquery above self.assertIs(inner_qs._result_cache, None) @unittest.expectedFailure def test_col_not_in_list_containing_null(self): """ The following case is not handled properly because SQL's COL NOT IN (list containing null) handling is too weird to abstract away. """ self.assertQuerySetEqual( NullableName.objects.exclude(name__in=[None]), ["i1"], attrgetter("name") ) def test_double_exclude(self): self.assertEqual( list(NullableName.objects.filter(~~Q(name="i1"))), list(NullableName.objects.filter(Q(name="i1"))), ) self.assertNotIn( "IS NOT NULL", str(NullableName.objects.filter(~~Q(name="i1")).query) ) class EmptyStringsAsNullTest(TestCase): """ Filtering on non-null character fields works as expected. The reason for these tests is that Oracle treats '' as NULL, and this can cause problems in query construction. Refs #17957. """ @classmethod def setUpTestData(cls): cls.nc = NamedCategory.objects.create(name="") def test_direct_exclude(self): self.assertQuerySetEqual( NamedCategory.objects.exclude(name__in=["nonexistent"]), [self.nc.pk], attrgetter("pk"), ) def test_joined_exclude(self): self.assertQuerySetEqual( DumbCategory.objects.exclude(namedcategory__name__in=["nonexistent"]), [self.nc.pk], attrgetter("pk"), ) def test_21001(self): foo = NamedCategory.objects.create(name="foo") self.assertQuerySetEqual( NamedCategory.objects.exclude(name=""), [foo.pk], attrgetter("pk") ) class ProxyQueryCleanupTest(TestCase): def test_evaluated_proxy_count(self): """ Generating the query string doesn't alter the query's state in irreversible ways. Refs #18248. """ ProxyCategory.objects.create() qs = ProxyCategory.objects.all() self.assertEqual(qs.count(), 1) str(qs.query) self.assertEqual(qs.count(), 1) class WhereNodeTest(SimpleTestCase): class DummyNode: def as_sql(self, compiler, connection): return "dummy", [] class MockCompiler: def compile(self, node): return node.as_sql(self, connection) def __call__(self, name): return connection.ops.quote_name(name) def test_empty_full_handling_conjunction(self): compiler = WhereNodeTest.MockCompiler() w = WhereNode(children=[NothingNode()]) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() with self.assertRaises(FullResultSet): w.as_sql(compiler, connection) w = WhereNode(children=[self.DummyNode(), self.DummyNode()]) self.assertEqual(w.as_sql(compiler, connection), ("(dummy AND dummy)", [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy AND dummy)", [])) w = WhereNode(children=[NothingNode(), self.DummyNode()]) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() with self.assertRaises(FullResultSet): w.as_sql(compiler, connection) def test_empty_full_handling_disjunction(self): compiler = WhereNodeTest.MockCompiler() w = WhereNode(children=[NothingNode()], connector=OR) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() with self.assertRaises(FullResultSet): w.as_sql(compiler, connection) w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector=OR) self.assertEqual(w.as_sql(compiler, connection), ("(dummy OR dummy)", [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy OR dummy)", [])) w = WhereNode(children=[NothingNode(), self.DummyNode()], connector=OR) self.assertEqual(w.as_sql(compiler, connection), ("dummy", [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ("NOT (dummy)", [])) def test_empty_nodes(self): compiler = WhereNodeTest.MockCompiler() empty_w = WhereNode() w = WhereNode(children=[empty_w, empty_w]) with self.assertRaises(FullResultSet): w.as_sql(compiler, connection) w.negate() with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.connector = OR with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() with self.assertRaises(FullResultSet): w.as_sql(compiler, connection) w = WhereNode(children=[empty_w, NothingNode()], connector=OR) with self.assertRaises(FullResultSet): w.as_sql(compiler, connection) w = WhereNode(children=[empty_w, NothingNode()], connector=AND) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) class QuerySetExceptionTests(SimpleTestCase): def test_invalid_order_by(self): msg = "Cannot resolve keyword '*' into field. Choices are: created, id, name" with self.assertRaisesMessage(FieldError, msg): Article.objects.order_by("*") def test_invalid_order_by_raw_column_alias(self): msg = ( "Cannot resolve keyword 'queries_author.name' into field. Choices " "are: cover, created, creator, creator_id, id, modified, name, " "note, note_id, tags" ) with self.assertRaisesMessage(FieldError, msg): Item.objects.values("creator__name").order_by("queries_author.name") def test_invalid_queryset_model(self): msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".' with self.assertRaisesMessage(ValueError, msg): list(Author.objects.filter(extra=Article.objects.all())) class NullJoinPromotionOrTest(TestCase): @classmethod def setUpTestData(cls): cls.d1 = ModelD.objects.create(name="foo") d2 = ModelD.objects.create(name="bar") cls.a1 = ModelA.objects.create(name="a1", d=cls.d1) c = ModelC.objects.create(name="c") b = ModelB.objects.create(name="b", c=c) cls.a2 = ModelA.objects.create(name="a2", b=b, d=d2) def test_ticket_17886(self): # The first Q-object is generating the match, the rest of the filters # should not remove the match even if they do not match anything. The # problem here was that b__name generates a LOUTER JOIN, then # b__c__name generates join to c, which the ORM tried to promote but # failed as that join isn't nullable. q_obj = Q(d__name="foo") | Q(b__name="foo") | Q(b__c__name="foo") qset = ModelA.objects.filter(q_obj) self.assertEqual(list(qset), [self.a1]) # We generate one INNER JOIN to D. The join is direct and not nullable # so we can use INNER JOIN for it. However, we can NOT use INNER JOIN # for the b->c join, as a->b is nullable. self.assertEqual(str(qset.query).count("INNER JOIN"), 1) def test_isnull_filter_promotion(self): qs = ModelA.objects.filter(Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count("LEFT OUTER"), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(~Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count("INNER JOIN"), 1) self.assertEqual(list(qs), [self.a2]) qs = ModelA.objects.filter(~~Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count("LEFT OUTER"), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count("INNER JOIN"), 1) self.assertEqual(list(qs), [self.a2]) qs = ModelA.objects.filter(~Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count("LEFT OUTER"), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(~~Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count("INNER JOIN"), 1) self.assertEqual(list(qs), [self.a2]) def test_null_join_demotion(self): qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True)) self.assertIn(" INNER JOIN ", str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False)) self.assertIn(" INNER JOIN ", str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True)) self.assertIn(" LEFT OUTER JOIN ", str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False)) self.assertIn(" LEFT OUTER JOIN ", str(qs.query)) def test_ticket_21366(self): n = Note.objects.create(note="n", misc="m") e = ExtraInfo.objects.create(info="info", note=n) a = Author.objects.create(name="Author1", num=1, extra=e) Ranking.objects.create(rank=1, author=a) r1 = Report.objects.create(name="Foo", creator=a) r2 = Report.objects.create(name="Bar") Report.objects.create(name="Bar", creator=a) qs = Report.objects.filter( Q(creator__ranking__isnull=True) | Q(creator__ranking__rank=1, name="Foo") ) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2) self.assertEqual(str(qs.query).count(" JOIN "), 2) self.assertSequenceEqual(qs.order_by("name"), [r2, r1]) def test_ticket_21748(self): i1 = Identifier.objects.create(name="i1") i2 = Identifier.objects.create(name="i2") i3 = Identifier.objects.create(name="i3") Program.objects.create(identifier=i1) Channel.objects.create(identifier=i1) Program.objects.create(identifier=i2) self.assertSequenceEqual( Identifier.objects.filter(program=None, channel=None), [i3] ) self.assertSequenceEqual( Identifier.objects.exclude(program=None, channel=None).order_by("name"), [i1, i2], ) def test_ticket_21748_double_negated_and(self): i1 = Identifier.objects.create(name="i1") i2 = Identifier.objects.create(name="i2") Identifier.objects.create(name="i3") p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) Program.objects.create(identifier=i2) # Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for # join promotion. qs1_doubleneg = Identifier.objects.exclude( ~Q(program__id=p1.id, channel__id=c1.id) ).order_by("pk") qs1_filter = Identifier.objects.filter( program__id=p1.id, channel__id=c1.id ).order_by("pk") self.assertQuerySetEqual(qs1_doubleneg, qs1_filter, lambda x: x) self.assertEqual( str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN") ) self.assertEqual(2, str(qs1_doubleneg.query).count("INNER JOIN")) self.assertEqual( str(qs1_filter.query).count("INNER JOIN"), str(qs1_doubleneg.query).count("INNER JOIN"), ) def test_ticket_21748_double_negated_or(self): i1 = Identifier.objects.create(name="i1") i2 = Identifier.objects.create(name="i2") Identifier.objects.create(name="i3") p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) p2 = Program.objects.create(identifier=i2) # Test OR + doubleneg. The expected result is that channel is LOUTER # joined, program INNER joined qs1_filter = Identifier.objects.filter( Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id) ).order_by("pk") qs1_doubleneg = Identifier.objects.exclude( ~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)) ).order_by("pk") self.assertQuerySetEqual(qs1_doubleneg, qs1_filter, lambda x: x) self.assertEqual( str(qs1_filter.query).count("JOIN"), str(qs1_doubleneg.query).count("JOIN") ) self.assertEqual(1, str(qs1_doubleneg.query).count("INNER JOIN")) self.assertEqual( str(qs1_filter.query).count("INNER JOIN"), str(qs1_doubleneg.query).count("INNER JOIN"), ) def test_ticket_21748_complex_filter(self): i1 = Identifier.objects.create(name="i1") i2 = Identifier.objects.create(name="i2") Identifier.objects.create(name="i3") p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) p2 = Program.objects.create(identifier=i2) # Finally, a more complex case, one time in a way where each # NOT is pushed to lowest level in the boolean tree, and # another query where this isn't done. qs1 = Identifier.objects.filter( ~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id)) ).order_by("pk") qs2 = Identifier.objects.filter( Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id)) ).order_by("pk") self.assertQuerySetEqual(qs1, qs2, lambda x: x) self.assertEqual(str(qs1.query).count("JOIN"), str(qs2.query).count("JOIN")) self.assertEqual(0, str(qs1.query).count("INNER JOIN")) self.assertEqual( str(qs1.query).count("INNER JOIN"), str(qs2.query).count("INNER JOIN") ) class ReverseJoinTrimmingTest(TestCase): def test_reverse_trimming(self): # We don't accidentally trim reverse joins - we can't know if there is # anything on the other side of the join, so trimming reverse joins # can't be done, ever. t = Tag.objects.create() qs = Tag.objects.filter(annotation__tag=t.pk) self.assertIn("INNER JOIN", str(qs.query)) self.assertEqual(list(qs), []) class JoinReuseTest(TestCase): """ The queries reuse joins sensibly (for example, direct joins are always reused). """ def test_fk_reuse(self): qs = Annotation.objects.filter(tag__name="foo").filter(tag__name="bar") self.assertEqual(str(qs.query).count("JOIN"), 1) def test_fk_reuse_select_related(self): qs = Annotation.objects.filter(tag__name="foo").select_related("tag") self.assertEqual(str(qs.query).count("JOIN"), 1) def test_fk_reuse_annotation(self): qs = Annotation.objects.filter(tag__name="foo").annotate(cnt=Count("tag__name")) self.assertEqual(str(qs.query).count("JOIN"), 1) def test_fk_reuse_disjunction(self): qs = Annotation.objects.filter(Q(tag__name="foo") | Q(tag__name="bar")) self.assertEqual(str(qs.query).count("JOIN"), 1) def test_fk_reuse_order_by(self): qs = Annotation.objects.filter(tag__name="foo").order_by("tag__name") self.assertEqual(str(qs.query).count("JOIN"), 1) def test_revo2o_reuse(self): qs = Detail.objects.filter(member__name="foo").filter(member__name="foo") self.assertEqual(str(qs.query).count("JOIN"), 1) def test_revfk_noreuse(self): qs = Author.objects.filter(report__name="r4").filter(report__name="r1") self.assertEqual(str(qs.query).count("JOIN"), 2) def test_inverted_q_across_relations(self): """ When a trimmable join is specified in the query (here school__), the ORM detects it and removes unnecessary joins. The set of reusable joins are updated after trimming the query so that other lookups don't consider that the outer query's filters are in effect for the subquery (#26551). """ springfield_elementary = School.objects.create() hogward = School.objects.create() Student.objects.create(school=springfield_elementary) hp = Student.objects.create(school=hogward) Classroom.objects.create(school=hogward, name="Potion") Classroom.objects.create(school=springfield_elementary, name="Main") qs = Student.objects.filter( ~( Q(school__classroom__name="Main") & Q(school__classroom__has_blackboard=None) ) ) self.assertSequenceEqual(qs, [hp]) class DisjunctionPromotionTests(TestCase): def test_disjunction_promotion_select_related(self): fk1 = FK1.objects.create(f1="f1", f2="f2") basea = BaseA.objects.create(a=fk1) qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2)) self.assertEqual(str(qs.query).count(" JOIN "), 0) qs = qs.select_related("a", "b") self.assertEqual(str(qs.query).count(" INNER JOIN "), 0) self.assertEqual(str(qs.query).count(" LEFT OUTER JOIN "), 2) with self.assertNumQueries(1): self.assertSequenceEqual(qs, [basea]) self.assertEqual(qs[0].a, fk1) self.assertIs(qs[0].b, None) def test_disjunction_promotion1(self): # Pre-existing join, add two ORed filters to the same join, # all joins can be INNER JOINS. qs = BaseA.objects.filter(a__f1="foo") self.assertEqual(str(qs.query).count("INNER JOIN"), 1) qs = qs.filter(Q(b__f1="foo") | Q(b__f2="foo")) self.assertEqual(str(qs.query).count("INNER JOIN"), 2) # Reverse the order of AND and OR filters. qs = BaseA.objects.filter(Q(b__f1="foo") | Q(b__f2="foo")) self.assertEqual(str(qs.query).count("INNER JOIN"), 1) qs = qs.filter(a__f1="foo") self.assertEqual(str(qs.query).count("INNER JOIN"), 2) def test_disjunction_promotion2(self): qs = BaseA.objects.filter(a__f1="foo") self.assertEqual(str(qs.query).count("INNER JOIN"), 1) # Now we have two different joins in an ORed condition, these # must be OUTER joins. The pre-existing join should remain INNER. qs = qs.filter(Q(b__f1="foo") | Q(c__f2="foo")) self.assertEqual(str(qs.query).count("INNER JOIN"), 1) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2) # Reverse case. qs = BaseA.objects.filter(Q(b__f1="foo") | Q(c__f2="foo")) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2) qs = qs.filter(a__f1="foo") self.assertEqual(str(qs.query).count("INNER JOIN"), 1) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2) def test_disjunction_promotion3(self): qs = BaseA.objects.filter(a__f2="bar") self.assertEqual(str(qs.query).count("INNER JOIN"), 1) # The ANDed a__f2 filter allows us to use keep using INNER JOIN # even inside the ORed case. If the join to a__ returns nothing, # the ANDed filter for a__f2 can't be true. qs = qs.filter(Q(a__f1="foo") | Q(b__f2="foo")) self.assertEqual(str(qs.query).count("INNER JOIN"), 1) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1) def test_disjunction_promotion3_demote(self): # This one needs demotion logic: the first filter causes a to be # outer joined, the second filter makes it inner join again. qs = BaseA.objects.filter(Q(a__f1="foo") | Q(b__f2="foo")).filter(a__f2="bar") self.assertEqual(str(qs.query).count("INNER JOIN"), 1) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1) def test_disjunction_promotion4_demote(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count("JOIN"), 0) # Demote needed for the "a" join. It is marked as outer join by # above filter (even if it is trimmed away). qs = qs.filter(a__f1="foo") self.assertEqual(str(qs.query).count("INNER JOIN"), 1) def test_disjunction_promotion4(self): qs = BaseA.objects.filter(a__f1="foo") self.assertEqual(str(qs.query).count("INNER JOIN"), 1) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count("INNER JOIN"), 1) def test_disjunction_promotion5_demote(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) # Note that the above filters on a force the join to an # inner join even if it is trimmed. self.assertEqual(str(qs.query).count("JOIN"), 0) qs = qs.filter(Q(a__f1="foo") | Q(b__f1="foo")) # So, now the a__f1 join doesn't need promotion. self.assertEqual(str(qs.query).count("INNER JOIN"), 1) # But b__f1 does. self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1) qs = BaseA.objects.filter(Q(a__f1="foo") | Q(b__f1="foo")) # Now the join to a is created as LOUTER self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count("INNER JOIN"), 1) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1) def test_disjunction_promotion6(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count("JOIN"), 0) qs = BaseA.objects.filter(Q(a__f1="foo") & Q(b__f1="foo")) self.assertEqual(str(qs.query).count("INNER JOIN"), 2) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0) qs = BaseA.objects.filter(Q(a__f1="foo") & Q(b__f1="foo")) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0) self.assertEqual(str(qs.query).count("INNER JOIN"), 2) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count("INNER JOIN"), 2) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 0) def test_disjunction_promotion7(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count("JOIN"), 0) qs = BaseA.objects.filter(Q(a__f1="foo") | (Q(b__f1="foo") & Q(a__f1="bar"))) self.assertEqual(str(qs.query).count("INNER JOIN"), 1) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1) qs = BaseA.objects.filter( (Q(a__f1="foo") | Q(b__f1="foo")) & (Q(a__f1="bar") | Q(c__f1="foo")) ) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3) self.assertEqual(str(qs.query).count("INNER JOIN"), 0) qs = BaseA.objects.filter( Q(a__f1="foo") | Q(a__f1="bar") & (Q(b__f1="bar") | Q(c__f1="foo")) ) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2) self.assertEqual(str(qs.query).count("INNER JOIN"), 1) def test_disjunction_promotion_fexpression(self): qs = BaseA.objects.filter(Q(a__f1=F("b__f1")) | Q(b__f1="foo")) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 1) self.assertEqual(str(qs.query).count("INNER JOIN"), 1) qs = BaseA.objects.filter(Q(a__f1=F("c__f1")) | Q(b__f1="foo")) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3) qs = BaseA.objects.filter( Q(a__f1=F("b__f1")) | Q(a__f2=F("b__f2")) | Q(c__f1="foo") ) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 3) qs = BaseA.objects.filter(Q(a__f1=F("c__f1")) | (Q(pk=1) & Q(pk=2))) self.assertEqual(str(qs.query).count("LEFT OUTER JOIN"), 2) self.assertEqual(str(qs.query).count("INNER JOIN"), 0) class ManyToManyExcludeTest(TestCase): def test_exclude_many_to_many(self): i_extra = Identifier.objects.create(name="extra") i_program = Identifier.objects.create(name="program") program = Program.objects.create(identifier=i_program) i_channel = Identifier.objects.create(name="channel") channel = Channel.objects.create(identifier=i_channel) channel.programs.add(program) # channel contains 'program1', so all Identifiers except that one # should be returned self.assertSequenceEqual( Identifier.objects.exclude(program__channel=channel).order_by("name"), [i_channel, i_extra], ) self.assertSequenceEqual( Identifier.objects.exclude(program__channel=None).order_by("name"), [i_program], ) def test_ticket_12823(self): pg3 = Page.objects.create(text="pg3") pg2 = Page.objects.create(text="pg2") pg1 = Page.objects.create(text="pg1") pa1 = Paragraph.objects.create(text="pa1") pa1.page.set([pg1, pg2]) pa2 = Paragraph.objects.create(text="pa2") pa2.page.set([pg2, pg3]) pa3 = Paragraph.objects.create(text="pa3") ch1 = Chapter.objects.create(title="ch1", paragraph=pa1) ch2 = Chapter.objects.create(title="ch2", paragraph=pa2) ch3 = Chapter.objects.create(title="ch3", paragraph=pa3) b1 = Book.objects.create(title="b1", chapter=ch1) b2 = Book.objects.create(title="b2", chapter=ch2) b3 = Book.objects.create(title="b3", chapter=ch3) q = Book.objects.exclude(chapter__paragraph__page__text="pg1") self.assertNotIn("IS NOT NULL", str(q.query)) self.assertEqual(len(q), 2) self.assertNotIn(b1, q) self.assertIn(b2, q) self.assertIn(b3, q) class RelabelCloneTest(TestCase): def test_ticket_19964(self): my1 = MyObject.objects.create(data="foo") my1.parent = my1 my1.save() my2 = MyObject.objects.create(data="bar", parent=my1) parents = MyObject.objects.filter(parent=F("id")) children = MyObject.objects.filter(parent__in=parents).exclude(parent=F("id")) self.assertEqual(list(parents), [my1]) # Evaluating the children query (which has parents as part of it) does # not change results for the parents query. self.assertEqual(list(children), [my2]) self.assertEqual(list(parents), [my1]) class Ticket20101Tests(TestCase): def test_ticket_20101(self): """ Tests QuerySet ORed combining in exclude subquery case. """ t = Tag.objects.create(name="foo") a1 = Annotation.objects.create(tag=t, name="a1") a2 = Annotation.objects.create(tag=t, name="a2") a3 = Annotation.objects.create(tag=t, name="a3") n = Note.objects.create(note="foo", misc="bar") qs1 = Note.objects.exclude(annotation__in=[a1, a2]) qs2 = Note.objects.filter(annotation__in=[a3]) self.assertIn(n, qs1) self.assertNotIn(n, qs2) self.assertIn(n, (qs1 | qs2)) class EmptyStringPromotionTests(SimpleTestCase): def test_empty_string_promotion(self): qs = RelatedObject.objects.filter(single__name="") if connection.features.interprets_empty_strings_as_nulls: self.assertIn("LEFT OUTER JOIN", str(qs.query)) else: self.assertNotIn("LEFT OUTER JOIN", str(qs.query)) class ValuesSubqueryTests(TestCase): def test_values_in_subquery(self): # If a values() queryset is used, then the given values # will be used instead of forcing use of the relation's field. o1 = Order.objects.create(id=-2) o2 = Order.objects.create(id=-1) oi1 = OrderItem.objects.create(order=o1, status=0) oi1.status = oi1.pk oi1.save() OrderItem.objects.create(order=o2, status=0) # The query below should match o1 as it has related order_item # with id == status. self.assertSequenceEqual( Order.objects.filter(items__in=OrderItem.objects.values_list("status")), [o1], ) class DoubleInSubqueryTests(TestCase): def test_double_subquery_in(self): lfa1 = LeafA.objects.create(data="foo") lfa2 = LeafA.objects.create(data="bar") lfb1 = LeafB.objects.create(data="lfb1") lfb2 = LeafB.objects.create(data="lfb2") Join.objects.create(a=lfa1, b=lfb1) Join.objects.create(a=lfa2, b=lfb2) leaf_as = LeafA.objects.filter(data="foo").values_list("pk", flat=True) joins = Join.objects.filter(a__in=leaf_as).values_list("b__id", flat=True) qs = LeafB.objects.filter(pk__in=joins) self.assertSequenceEqual(qs, [lfb1]) class Ticket18785Tests(SimpleTestCase): def test_ticket_18785(self): # Test join trimming from ticket18785 qs = ( Item.objects.exclude(note__isnull=False) .filter(name="something", creator__extra__isnull=True) .order_by() ) self.assertEqual(1, str(qs.query).count("INNER JOIN")) self.assertEqual(0, str(qs.query).count("OUTER JOIN")) class Ticket20788Tests(TestCase): def test_ticket_20788(self): Paragraph.objects.create() paragraph = Paragraph.objects.create() page = paragraph.page.create() chapter = Chapter.objects.create(paragraph=paragraph) Book.objects.create(chapter=chapter) paragraph2 = Paragraph.objects.create() Page.objects.create() chapter2 = Chapter.objects.create(paragraph=paragraph2) book2 = Book.objects.create(chapter=chapter2) sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page) self.assertSequenceEqual(sentences_not_in_pub, [book2]) class Ticket12807Tests(TestCase): def test_ticket_12807(self): p1 = Paragraph.objects.create() p2 = Paragraph.objects.create() # The ORed condition below should have no effect on the query - the # ~Q(pk__in=[]) will always be True. qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk)) self.assertSequenceEqual(qs, [p1]) class RelatedLookupTypeTests(TestCase): error = 'Cannot query "%s": Must be "%s" instance.' @classmethod def setUpTestData(cls): cls.oa = ObjectA.objects.create(name="oa") cls.poa = ProxyObjectA.objects.get(name="oa") cls.coa = ChildObjectA.objects.create(name="coa") cls.wrong_type = Order.objects.create(id=cls.oa.pk) cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1) cls.pob1 = ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2) cls.pob = ProxyObjectB.objects.all() cls.c = ObjectC.objects.create(childobjecta=cls.coa) def test_wrong_type_lookup(self): """ A ValueError is raised when the incorrect object type is passed to a query lookup. """ # Passing incorrect object type with self.assertRaisesMessage( ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name) ): ObjectB.objects.get(objecta=self.wrong_type) with self.assertRaisesMessage( ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name) ): ObjectB.objects.filter(objecta__in=[self.wrong_type]) with self.assertRaisesMessage( ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name) ): ObjectB.objects.filter(objecta=self.wrong_type) with self.assertRaisesMessage( ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name) ): ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob]) # Passing an object of the class on which query is done. with self.assertRaisesMessage( ValueError, self.error % (self.ob, ObjectA._meta.object_name) ): ObjectB.objects.filter(objecta__in=[self.poa, self.ob]) with self.assertRaisesMessage( ValueError, self.error % (self.ob, ChildObjectA._meta.object_name) ): ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob]) def test_wrong_backward_lookup(self): """ A ValueError is raised when the incorrect object type is passed to a query lookup for backward relations. """ with self.assertRaisesMessage( ValueError, self.error % (self.oa, ObjectB._meta.object_name) ): ObjectA.objects.filter(objectb__in=[self.oa, self.ob]) with self.assertRaisesMessage( ValueError, self.error % (self.oa, ObjectB._meta.object_name) ): ObjectA.objects.exclude(objectb=self.oa) with self.assertRaisesMessage( ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name) ): ObjectA.objects.get(objectb=self.wrong_type) def test_correct_lookup(self): """ When passing proxy model objects, child objects, or parent objects, lookups work fine. """ out_a = [self.oa] out_b = [self.ob, self.pob1] out_c = [self.c] # proxy model objects self.assertSequenceEqual( ObjectB.objects.filter(objecta=self.poa).order_by("name"), out_b ) self.assertSequenceEqual( ObjectA.objects.filter(objectb__in=self.pob).order_by("pk"), out_a * 2 ) # child objects self.assertSequenceEqual(ObjectB.objects.filter(objecta__in=[self.coa]), []) self.assertSequenceEqual( ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by("name"), out_b, ) self.assertSequenceEqual( ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by( "name" ), out_b, ) # parent objects self.assertSequenceEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c) # QuerySet related object type checking shouldn't issue queries # (the querysets aren't evaluated here, hence zero queries) (#23266). with self.assertNumQueries(0): ObjectB.objects.filter(objecta__in=ObjectA.objects.all()) def test_values_queryset_lookup(self): """ ValueQuerySets are not checked for compatibility with the lookup field. """ # Make sure the num and objecta field values match. ob = ObjectB.objects.get(name="ob") ob.num = ob.objecta.pk ob.save() pob = ObjectB.objects.get(name="pob") pob.num = pob.objecta.pk pob.save() self.assertSequenceEqual( ObjectB.objects.filter( objecta__in=ObjectB.objects.values_list("num") ).order_by("pk"), [ob, pob], ) class Ticket14056Tests(TestCase): def test_ticket_14056(self): s1 = SharedConnection.objects.create(data="s1") s2 = SharedConnection.objects.create(data="s2") s3 = SharedConnection.objects.create(data="s3") PointerA.objects.create(connection=s2) expected_ordering = ( [s1, s3, s2] if connection.features.nulls_order_largest else [s2, s1, s3] ) self.assertSequenceEqual( SharedConnection.objects.order_by("-pointera__connection", "pk"), expected_ordering, ) class Ticket20955Tests(TestCase): def test_ticket_20955(self): jack = Staff.objects.create(name="jackstaff") jackstaff = StaffUser.objects.create(staff=jack) jill = Staff.objects.create(name="jillstaff") jillstaff = StaffUser.objects.create(staff=jill) task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task") task_get = Task.objects.get(pk=task.pk) # Load data so that assertNumQueries doesn't complain about the get # version's queries. task_get.creator.staffuser.staff task_get.owner.staffuser.staff qs = Task.objects.select_related( "creator__staffuser__staff", "owner__staffuser__staff" ) self.assertEqual(str(qs.query).count(" JOIN "), 6) task_select_related = qs.get(pk=task.pk) with self.assertNumQueries(0): self.assertEqual( task_select_related.creator.staffuser.staff, task_get.creator.staffuser.staff, ) self.assertEqual( task_select_related.owner.staffuser.staff, task_get.owner.staffuser.staff, ) class Ticket21203Tests(TestCase): def test_ticket_21203(self): p = Ticket21203Parent.objects.create(parent_bool=True) c = Ticket21203Child.objects.create(parent=p) qs = Ticket21203Child.objects.select_related("parent").defer("parent__created") self.assertSequenceEqual(qs, [c]) self.assertIs(qs[0].parent.parent_bool, True) class ValuesJoinPromotionTests(TestCase): def test_values_no_promotion_for_existing(self): qs = Node.objects.filter(parent__parent__isnull=False) self.assertIn(" INNER JOIN ", str(qs.query)) qs = qs.values("parent__parent__id") self.assertIn(" INNER JOIN ", str(qs.query)) # Make sure there is a left outer join without the filter. qs = Node.objects.values("parent__parent__id") self.assertIn(" LEFT OUTER JOIN ", str(qs.query)) def test_non_nullable_fk_not_promoted(self): qs = ObjectB.objects.values("objecta__name") self.assertIn(" INNER JOIN ", str(qs.query)) def test_ticket_21376(self): a = ObjectA.objects.create() ObjectC.objects.create(objecta=a) qs = ObjectC.objects.filter( Q(objecta=a) | Q(objectb__objecta=a), ) qs = qs.filter( Q(objectb=1) | Q(objecta=a), ) self.assertEqual(qs.count(), 1) tblname = connection.ops.quote_name(ObjectB._meta.db_table) self.assertIn(" LEFT OUTER JOIN %s" % tblname, str(qs.query)) class ForeignKeyToBaseExcludeTests(TestCase): def test_ticket_21787(self): sc1 = SpecialCategory.objects.create(special_name="sc1", name="sc1") sc2 = SpecialCategory.objects.create(special_name="sc2", name="sc2") sc3 = SpecialCategory.objects.create(special_name="sc3", name="sc3") c1 = CategoryItem.objects.create(category=sc1) CategoryItem.objects.create(category=sc2) self.assertSequenceEqual( SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by("name"), [sc2, sc3], ) self.assertSequenceEqual( SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1] ) class ReverseM2MCustomPkTests(TestCase): def test_ticket_21879(self): cpt1 = CustomPkTag.objects.create(id="cpt1", tag="cpt1") cp1 = CustomPk.objects.create(name="cp1", extra="extra") cp1.custompktag_set.add(cpt1) self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1]) self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1]) class Ticket22429Tests(TestCase): def test_ticket_22429(self): sc1 = School.objects.create() st1 = Student.objects.create(school=sc1) sc2 = School.objects.create() st2 = Student.objects.create(school=sc2) cr = Classroom.objects.create(school=sc1) cr.students.add(st1) queryset = Student.objects.filter(~Q(classroom__school=F("school"))) self.assertSequenceEqual(queryset, [st2]) class Ticket23605Tests(TestCase): def test_ticket_23605(self): # Test filtering on a complicated q-object from ticket's report. # The query structure is such that we have multiple nested subqueries. # The original problem was that the inner queries weren't relabeled # correctly. # See also #24090. a1 = Ticket23605A.objects.create() a2 = Ticket23605A.objects.create() c1 = Ticket23605C.objects.create(field_c0=10000.0) Ticket23605B.objects.create( field_b0=10000.0, field_b1=True, modelc_fk=c1, modela_fk=a1 ) complex_q = Q( pk__in=Ticket23605A.objects.filter( Q( # True for a1 as field_b0 = 10000, field_c0=10000 # False for a2 as no ticket23605b found ticket23605b__field_b0__gte=1000000 / F("ticket23605b__modelc_fk__field_c0") ) & # True for a1 (field_b1=True) Q(ticket23605b__field_b1=True) & ~Q( ticket23605b__pk__in=Ticket23605B.objects.filter( ~( # Same filters as above commented filters, but # double-negated (one for Q() above, one for # parentheses). So, again a1 match, a2 not. Q(field_b1=True) & Q(field_b0__gte=1000000 / F("modelc_fk__field_c0")) ) ) ) ).filter(ticket23605b__field_b1=True) ) qs1 = Ticket23605A.objects.filter(complex_q) self.assertSequenceEqual(qs1, [a1]) qs2 = Ticket23605A.objects.exclude(complex_q) self.assertSequenceEqual(qs2, [a2]) class TestTicket24279(TestCase): def test_ticket_24278(self): School.objects.create() qs = School.objects.filter(Q(pk__in=()) | Q()) self.assertSequenceEqual(qs, []) class TestInvalidValuesRelation(SimpleTestCase): def test_invalid_values(self): msg = "Field 'id' expected a number but got 'abc'." with self.assertRaisesMessage(ValueError, msg): Annotation.objects.filter(tag="abc") with self.assertRaisesMessage(ValueError, msg): Annotation.objects.filter(tag__in=[123, "abc"]) class TestTicket24605(TestCase): def test_ticket_24605(self): """ Subquery table names should be quoted. """ i1 = Individual.objects.create(alive=True) RelatedIndividual.objects.create(related=i1) i2 = Individual.objects.create(alive=False) RelatedIndividual.objects.create(related=i2) i3 = Individual.objects.create(alive=True) i4 = Individual.objects.create(alive=False) self.assertSequenceEqual( Individual.objects.filter( Q(alive=False), Q(related_individual__isnull=True) ), [i4], ) self.assertSequenceEqual( Individual.objects.exclude( Q(alive=False), Q(related_individual__isnull=True) ).order_by("pk"), [i1, i2, i3], ) class Ticket23622Tests(TestCase): @skipUnlessDBFeature("can_distinct_on_fields") def test_ticket_23622(self): """ Make sure __pk__in and __in work the same for related fields when using a distinct on subquery. """ a1 = Ticket23605A.objects.create() a2 = Ticket23605A.objects.create() c1 = Ticket23605C.objects.create(field_c0=0.0) Ticket23605B.objects.create( modela_fk=a1, field_b0=123, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=23, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=234, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=12, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=567, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=76, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=7, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=56, field_b1=True, modelc_fk=c1, ) qx = Q( ticket23605b__pk__in=Ticket23605B.objects.order_by( "modela_fk", "-field_b1" ).distinct("modela_fk") ) & Q(ticket23605b__field_b0__gte=300) qy = Q( ticket23605b__in=Ticket23605B.objects.order_by( "modela_fk", "-field_b1" ).distinct("modela_fk") ) & Q(ticket23605b__field_b0__gte=300) self.assertEqual( set(Ticket23605A.objects.filter(qx).values_list("pk", flat=True)), set(Ticket23605A.objects.filter(qy).values_list("pk", flat=True)), ) self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
32e9c9d680239f88a81a163035861aa4b6028977b43de05a324c665e7716f859
import os from datetime import date from django.contrib.sitemaps import Sitemap from django.contrib.sites.models import Site from django.core.exceptions import ImproperlyConfigured from django.test import ignore_warnings, modify_settings, override_settings from django.utils import translation from django.utils.deprecation import RemovedInDjango50Warning from django.utils.formats import localize from .base import SitemapTestsBase from .models import TestModel class HTTPSitemapTests(SitemapTestsBase): use_sitemap_err_msg = ( "To use sitemaps, either enable the sites framework or pass a " "Site/RequestSite object in your view." ) def test_simple_sitemap_index(self): "A simple sitemap index can be rendered" response = self.client.get("/simple/index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc><lastmod>%s</lastmod></sitemap> </sitemapindex> """ % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_sitemap_not_callable(self): """A sitemap may not be callable.""" response = self.client.get("/simple-not-callable/index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc><lastmod>%s</lastmod></sitemap> </sitemapindex> """ % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_paged_sitemap(self): """A sitemap may have multiple pages.""" response = self.client.get("/simple-paged/index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>{0}/simple/sitemap-simple.xml</loc><lastmod>{1}</lastmod></sitemap><sitemap><loc>{0}/simple/sitemap-simple.xml?p=2</loc><lastmod>{1}</lastmod></sitemap> </sitemapindex> """.format( self.base_url, date.today() ) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [os.path.join(os.path.dirname(__file__), "templates")], } ] ) def test_simple_sitemap_custom_lastmod_index(self): "A simple sitemap index can be rendered with a custom template" response = self.client.get("/simple/custom-lastmod-index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc><lastmod>%s</lastmod></sitemap> </sitemapindex> """ % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_simple_sitemap_section(self): "A simple sitemap section can be rendered" response = self.client.get("/simple/sitemap-simple.xml") expected_content = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>%s/location/</loc><lastmod>%s</lastmod>" "<changefreq>never</changefreq><priority>0.5</priority></url>\n" "</urlset>" ) % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_no_section(self): response = self.client.get("/simple/sitemap-simple2.xml") self.assertEqual( str(response.context["exception"]), "No sitemap available for section: 'simple2'", ) self.assertEqual(response.status_code, 404) def test_empty_page(self): response = self.client.get("/simple/sitemap-simple.xml?p=0") self.assertEqual(str(response.context["exception"]), "Page 0 empty") self.assertEqual(response.status_code, 404) def test_page_not_int(self): response = self.client.get("/simple/sitemap-simple.xml?p=test") self.assertEqual(str(response.context["exception"]), "No page 'test'") self.assertEqual(response.status_code, 404) def test_simple_sitemap(self): "A simple sitemap can be rendered" response = self.client.get("/simple/sitemap.xml") expected_content = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>%s/location/</loc><lastmod>%s</lastmod>" "<changefreq>never</changefreq><priority>0.5</priority></url>\n" "</urlset>" ) % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [os.path.join(os.path.dirname(__file__), "templates")], } ] ) def test_simple_custom_sitemap(self): "A simple sitemap can be rendered with a custom template" response = self.client.get("/simple/custom-sitemap.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_sitemap_last_modified(self): "Last-Modified header is set correctly" response = self.client.get("/lastmod/sitemap.xml") self.assertEqual( response.headers["Last-Modified"], "Wed, 13 Mar 2013 10:00:00 GMT" ) def test_sitemap_last_modified_date(self): """ The Last-Modified header should be support dates (without time). """ response = self.client.get("/lastmod/date-sitemap.xml") self.assertEqual( response.headers["Last-Modified"], "Wed, 13 Mar 2013 00:00:00 GMT" ) def test_sitemap_last_modified_tz(self): """ The Last-Modified header should be converted from timezone aware dates to GMT. """ response = self.client.get("/lastmod/tz-sitemap.xml") self.assertEqual( response.headers["Last-Modified"], "Wed, 13 Mar 2013 15:00:00 GMT" ) def test_sitemap_last_modified_missing(self): "Last-Modified header is missing when sitemap has no lastmod" response = self.client.get("/generic/sitemap.xml") self.assertFalse(response.has_header("Last-Modified")) def test_sitemap_last_modified_mixed(self): "Last-Modified header is omitted when lastmod not on all items" response = self.client.get("/lastmod-mixed/sitemap.xml") self.assertFalse(response.has_header("Last-Modified")) def test_sitemaps_lastmod_mixed_ascending_last_modified_missing(self): """ The Last-Modified header is omitted when lastmod isn't found in all sitemaps. Test sitemaps are sorted by lastmod in ascending order. """ response = self.client.get("/lastmod-sitemaps/mixed-ascending.xml") self.assertFalse(response.has_header("Last-Modified")) def test_sitemaps_lastmod_mixed_descending_last_modified_missing(self): """ The Last-Modified header is omitted when lastmod isn't found in all sitemaps. Test sitemaps are sorted by lastmod in descending order. """ response = self.client.get("/lastmod-sitemaps/mixed-descending.xml") self.assertFalse(response.has_header("Last-Modified")) def test_sitemaps_lastmod_ascending(self): """ The Last-Modified header is set to the most recent sitemap lastmod. Test sitemaps are sorted by lastmod in ascending order. """ response = self.client.get("/lastmod-sitemaps/ascending.xml") self.assertEqual( response.headers["Last-Modified"], "Sat, 20 Apr 2013 05:00:00 GMT" ) def test_sitemaps_lastmod_descending(self): """ The Last-Modified header is set to the most recent sitemap lastmod. Test sitemaps are sorted by lastmod in descending order. """ response = self.client.get("/lastmod-sitemaps/descending.xml") self.assertEqual( response.headers["Last-Modified"], "Sat, 20 Apr 2013 05:00:00 GMT" ) def test_sitemap_get_latest_lastmod_none(self): """ sitemapindex.lastmod is omitted when Sitemap.lastmod is callable and Sitemap.get_latest_lastmod is not implemented """ response = self.client.get("/lastmod/get-latest-lastmod-none-sitemap.xml") self.assertNotContains(response, "<lastmod>") def test_sitemap_get_latest_lastmod(self): """ sitemapindex.lastmod is included when Sitemap.lastmod is attribute and Sitemap.get_latest_lastmod is implemented """ response = self.client.get("/lastmod/get-latest-lastmod-sitemap.xml") self.assertContains(response, "<lastmod>2013-03-13T10:00:00</lastmod>") def test_sitemap_latest_lastmod_timezone(self): """ lastmod datestamp shows timezones if Sitemap.get_latest_lastmod returns an aware datetime. """ response = self.client.get("/lastmod/latest-lastmod-timezone-sitemap.xml") self.assertContains(response, "<lastmod>2013-03-13T10:00:00-05:00</lastmod>") def test_localized_priority(self): """The priority value should not be localized.""" with translation.override("fr"): self.assertEqual("0,3", localize(0.3)) # Priorities aren't rendered in localized format. response = self.client.get("/simple/sitemap.xml") self.assertContains(response, "<priority>0.5</priority>") self.assertContains(response, "<lastmod>%s</lastmod>" % date.today()) @modify_settings(INSTALLED_APPS={"remove": "django.contrib.sites"}) def test_requestsite_sitemap(self): # Hitting the flatpages sitemap without the sites framework installed # doesn't raise an exception. response = self.client.get("/simple/sitemap.xml") expected_content = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod>" "<changefreq>never</changefreq><priority>0.5</priority></url>\n" "</urlset>" ) % date.today() self.assertXMLEqual(response.content.decode(), expected_content) @ignore_warnings(category=RemovedInDjango50Warning) def test_sitemap_get_urls_no_site_1(self): """ Check we get ImproperlyConfigured if we don't pass a site object to Sitemap.get_urls and no Site objects exist """ Site.objects.all().delete() with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg): Sitemap().get_urls() @modify_settings(INSTALLED_APPS={"remove": "django.contrib.sites"}) @ignore_warnings(category=RemovedInDjango50Warning) def test_sitemap_get_urls_no_site_2(self): """ Check we get ImproperlyConfigured when we don't pass a site object to Sitemap.get_urls if Site objects exists, but the sites framework is not actually installed. """ with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg): Sitemap().get_urls() @ignore_warnings(category=RemovedInDjango50Warning) def test_sitemap_item(self): """ Check to make sure that the raw item is included with each Sitemap.get_url() url result. """ test_sitemap = Sitemap() test_sitemap.items = TestModel.objects.order_by("pk").all def is_testmodel(url): return isinstance(url["item"], TestModel) item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls())) self.assertTrue(item_in_url_info) def test_cached_sitemap_index(self): """ A cached sitemap index can be rendered (#2713). """ response = self.client.get("/cached/index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/cached/sitemap-simple.xml</loc><lastmod>%s</lastmod></sitemap> </sitemapindex> """ % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_x_robots_sitemap(self): response = self.client.get("/simple/index.xml") self.assertEqual(response.headers["X-Robots-Tag"], "noindex, noodp, noarchive") response = self.client.get("/simple/sitemap.xml") self.assertEqual(response.headers["X-Robots-Tag"], "noindex, noodp, noarchive") def test_empty_sitemap(self): response = self.client.get("/empty/sitemap.xml") self.assertEqual(response.status_code, 200) @override_settings(LANGUAGES=(("en", "English"), ("pt", "Portuguese"))) def test_simple_i18n_sitemap_index(self): """ A simple i18n sitemap index can be rendered, without logging variable lookup errors. """ with self.assertNoLogs("django.template", "DEBUG"): response = self.client.get("/simple/i18n.xml") expected_content = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>{0}/en/i18n/testmodel/{1}/</loc><changefreq>never</changefreq>" "<priority>0.5</priority></url><url><loc>{0}/pt/i18n/testmodel/{1}/</loc>" "<changefreq>never</changefreq><priority>0.5</priority></url>\n" "</urlset>" ).format(self.base_url, self.i18n_model.pk) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings(LANGUAGES=(("en", "English"), ("pt", "Portuguese"))) def test_alternate_i18n_sitemap_index(self): """ A i18n sitemap with alternate/hreflang links can be rendered. """ response = self.client.get("/alternates/i18n.xml") url, pk = self.base_url, self.i18n_model.pk expected_urls = f""" <url><loc>{url}/en/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> </url> <url><loc>{url}/pt/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> </url> """.replace( "\n", "" ) expected_content = ( f'<?xml version="1.0" encoding="UTF-8"?>\n' f'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' f'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' f"{expected_urls}\n" f"</urlset>" ) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings( LANGUAGES=(("en", "English"), ("pt", "Portuguese"), ("es", "Spanish")) ) def test_alternate_i18n_sitemap_limited(self): """ A i18n sitemap index with limited languages can be rendered. """ response = self.client.get("/limited/i18n.xml") url, pk = self.base_url, self.i18n_model.pk expected_urls = f""" <url><loc>{url}/en/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="es" href="{url}/es/i18n/testmodel/{pk}/"/> </url> <url><loc>{url}/es/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="es" href="{url}/es/i18n/testmodel/{pk}/"/> </url> """.replace( "\n", "" ) expected_content = ( f'<?xml version="1.0" encoding="UTF-8"?>\n' f'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' f'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' f"{expected_urls}\n" f"</urlset>" ) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings(LANGUAGES=(("en", "English"), ("pt", "Portuguese"))) def test_alternate_i18n_sitemap_xdefault(self): """ A i18n sitemap index with x-default can be rendered. """ response = self.client.get("/x-default/i18n.xml") url, pk = self.base_url, self.i18n_model.pk expected_urls = f""" <url><loc>{url}/en/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="x-default" href="{url}/i18n/testmodel/{pk}/"/> </url> <url><loc>{url}/pt/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="x-default" href="{url}/i18n/testmodel/{pk}/"/> </url> """.replace( "\n", "" ) expected_content = ( f'<?xml version="1.0" encoding="UTF-8"?>\n' f'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' f'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' f"{expected_urls}\n" f"</urlset>" ) self.assertXMLEqual(response.content.decode(), expected_content) def test_sitemap_without_entries(self): response = self.client.get("/sitemap-without-entries/sitemap.xml") expected_content = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n\n' "</urlset>" ) self.assertXMLEqual(response.content.decode(), expected_content) def test_callable_sitemod_partial(self): """ Not all items have `lastmod`. Therefore the `Last-Modified` header is not set by the detail or index sitemap view. """ index_response = self.client.get("/callable-lastmod-partial/index.xml") sitemap_response = self.client.get("/callable-lastmod-partial/sitemap.xml") self.assertNotIn("Last-Modified", index_response) self.assertNotIn("Last-Modified", sitemap_response) expected_content_index = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>http://example.com/simple/sitemap-callable-lastmod.xml</loc></sitemap> </sitemapindex> """ expected_content_sitemap = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>http://example.com/location/</loc>" "<lastmod>2013-03-13</lastmod></url><url>" "<loc>http://example.com/location/</loc></url>\n" "</urlset>" ) self.assertXMLEqual(index_response.content.decode(), expected_content_index) self.assertXMLEqual(sitemap_response.content.decode(), expected_content_sitemap) def test_callable_sitemod_full(self): """ All items in the sitemap have `lastmod`. The `Last-Modified` header is set for the detail and index sitemap view. """ index_response = self.client.get("/callable-lastmod-full/index.xml") sitemap_response = self.client.get("/callable-lastmod-full/sitemap.xml") self.assertEqual( index_response.headers["Last-Modified"], "Thu, 13 Mar 2014 10:00:00 GMT" ) self.assertEqual( sitemap_response.headers["Last-Modified"], "Thu, 13 Mar 2014 10:00:00 GMT" ) expected_content_index = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>http://example.com/simple/sitemap-callable-lastmod.xml</loc><lastmod>2014-03-13T10:00:00</lastmod></sitemap> </sitemapindex> """ expected_content_sitemap = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>http://example.com/location/</loc>" "<lastmod>2013-03-13</lastmod></url>" "<url><loc>http://example.com/location/</loc>" "<lastmod>2014-03-13</lastmod></url>\n" "</urlset>" ) self.assertXMLEqual(index_response.content.decode(), expected_content_index) self.assertXMLEqual(sitemap_response.content.decode(), expected_content_sitemap) def test_callable_sitemod_no_items(self): index_response = self.client.get("/callable-lastmod-no-items/index.xml") self.assertNotIn("Last-Modified", index_response) expected_content_index = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>http://example.com/simple/sitemap-callable-lastmod.xml</loc></sitemap> </sitemapindex> """ self.assertXMLEqual(index_response.content.decode(), expected_content_index) # RemovedInDjango50Warning class DeprecatedTests(SitemapTestsBase): @override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [os.path.join(os.path.dirname(__file__), "templates")], } ] ) def test_simple_sitemap_custom_index_warning(self): msg = ( "Calling `__str__` on SitemapIndexItem is deprecated, use the `location` " "attribute instead." ) with self.assertRaisesMessage(RemovedInDjango50Warning, msg): self.client.get("/simple/custom-index.xml") @ignore_warnings(category=RemovedInDjango50Warning) @override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [os.path.join(os.path.dirname(__file__), "templates")], } ] ) def test_simple_sitemap_custom_index(self): "A simple sitemap index can be rendered with a custom template" response = self.client.get("/simple/custom-index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % ( self.base_url ) self.assertXMLEqual(response.content.decode(), expected_content)
4ace64b5a29bbfd2d2adf3a466cb0c00880c65e4ce38931aa88a070e423fe9ad
from math import ceil from django.db import connection, models from django.db.models import ProtectedError, Q, RestrictedError from django.db.models.deletion import Collector from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from .models import ( B1, B2, B3, MR, A, Avatar, B, Base, Child, DeleteBottom, DeleteTop, GenericB1, GenericB2, GenericDeleteBottom, HiddenUser, HiddenUserProfile, M, M2MFrom, M2MTo, MRNull, Origin, P, Parent, R, RChild, RChildChild, Referrer, S, T, User, create_a, get_default_r, ) class OnDeleteTests(TestCase): def setUp(self): self.DEFAULT = get_default_r() def test_auto(self): a = create_a("auto") a.auto.delete() self.assertFalse(A.objects.filter(name="auto").exists()) def test_non_callable(self): msg = "on_delete must be callable." with self.assertRaisesMessage(TypeError, msg): models.ForeignKey("self", on_delete=None) with self.assertRaisesMessage(TypeError, msg): models.OneToOneField("self", on_delete=None) def test_auto_nullable(self): a = create_a("auto_nullable") a.auto_nullable.delete() self.assertFalse(A.objects.filter(name="auto_nullable").exists()) def test_setvalue(self): a = create_a("setvalue") a.setvalue.delete() a = A.objects.get(pk=a.pk) self.assertEqual(self.DEFAULT, a.setvalue.pk) def test_setnull(self): a = create_a("setnull") a.setnull.delete() a = A.objects.get(pk=a.pk) self.assertIsNone(a.setnull) def test_setdefault(self): a = create_a("setdefault") a.setdefault.delete() a = A.objects.get(pk=a.pk) self.assertEqual(self.DEFAULT, a.setdefault.pk) def test_setdefault_none(self): a = create_a("setdefault_none") a.setdefault_none.delete() a = A.objects.get(pk=a.pk) self.assertIsNone(a.setdefault_none) def test_cascade(self): a = create_a("cascade") a.cascade.delete() self.assertFalse(A.objects.filter(name="cascade").exists()) def test_cascade_nullable(self): a = create_a("cascade_nullable") a.cascade_nullable.delete() self.assertFalse(A.objects.filter(name="cascade_nullable").exists()) def test_protect(self): a = create_a("protect") msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through protected foreign keys: 'A.protect'." ) with self.assertRaisesMessage(ProtectedError, msg) as cm: a.protect.delete() self.assertEqual(cm.exception.protected_objects, {a}) def test_protect_multiple(self): a = create_a("protect") b = B.objects.create(protect=a.protect) msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through protected foreign keys: 'A.protect', " "'B.protect'." ) with self.assertRaisesMessage(ProtectedError, msg) as cm: a.protect.delete() self.assertEqual(cm.exception.protected_objects, {a, b}) def test_protect_path(self): a = create_a("protect") a.protect.p = P.objects.create() a.protect.save() msg = ( "Cannot delete some instances of model 'P' because they are " "referenced through protected foreign keys: 'R.p'." ) with self.assertRaisesMessage(ProtectedError, msg) as cm: a.protect.p.delete() self.assertEqual(cm.exception.protected_objects, {a}) def test_do_nothing(self): # Testing DO_NOTHING is a bit harder: It would raise IntegrityError for # a normal model, so we connect to pre_delete and set the fk to a known # value. replacement_r = R.objects.create() def check_do_nothing(sender, **kwargs): obj = kwargs["instance"] obj.donothing_set.update(donothing=replacement_r) models.signals.pre_delete.connect(check_do_nothing) a = create_a("do_nothing") a.donothing.delete() a = A.objects.get(pk=a.pk) self.assertEqual(replacement_r, a.donothing) models.signals.pre_delete.disconnect(check_do_nothing) def test_do_nothing_qscount(self): """ A models.DO_NOTHING relation doesn't trigger a query. """ b = Base.objects.create() with self.assertNumQueries(1): # RelToBase should not be queried. b.delete() self.assertEqual(Base.objects.count(), 0) def test_inheritance_cascade_up(self): child = RChild.objects.create() child.delete() self.assertFalse(R.objects.filter(pk=child.pk).exists()) def test_inheritance_cascade_down(self): child = RChild.objects.create() parent = child.r_ptr parent.delete() self.assertFalse(RChild.objects.filter(pk=child.pk).exists()) def test_cascade_from_child(self): a = create_a("child") a.child.delete() self.assertFalse(A.objects.filter(name="child").exists()) self.assertFalse(R.objects.filter(pk=a.child_id).exists()) def test_cascade_from_parent(self): a = create_a("child") R.objects.get(pk=a.child_id).delete() self.assertFalse(A.objects.filter(name="child").exists()) self.assertFalse(RChild.objects.filter(pk=a.child_id).exists()) def test_setnull_from_child(self): a = create_a("child_setnull") a.child_setnull.delete() self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists()) a = A.objects.get(pk=a.pk) self.assertIsNone(a.child_setnull) def test_setnull_from_parent(self): a = create_a("child_setnull") R.objects.get(pk=a.child_setnull_id).delete() self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists()) a = A.objects.get(pk=a.pk) self.assertIsNone(a.child_setnull) def test_o2o_setnull(self): a = create_a("o2o_setnull") a.o2o_setnull.delete() a = A.objects.get(pk=a.pk) self.assertIsNone(a.o2o_setnull) def test_restrict(self): a = create_a("restrict") msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through restricted foreign keys: 'A.restrict'." ) with self.assertRaisesMessage(RestrictedError, msg) as cm: a.restrict.delete() self.assertEqual(cm.exception.restricted_objects, {a}) def test_restrict_multiple(self): a = create_a("restrict") b3 = B3.objects.create(restrict=a.restrict) msg = ( "Cannot delete some instances of model 'R' because they are " "referenced through restricted foreign keys: 'A.restrict', " "'B3.restrict'." ) with self.assertRaisesMessage(RestrictedError, msg) as cm: a.restrict.delete() self.assertEqual(cm.exception.restricted_objects, {a, b3}) def test_restrict_path_cascade_indirect(self): a = create_a("restrict") a.restrict.p = P.objects.create() a.restrict.save() msg = ( "Cannot delete some instances of model 'P' because they are " "referenced through restricted foreign keys: 'A.restrict'." ) with self.assertRaisesMessage(RestrictedError, msg) as cm: a.restrict.p.delete() self.assertEqual(cm.exception.restricted_objects, {a}) # Object referenced also with CASCADE relationship can be deleted. a.cascade.p = a.restrict.p a.cascade.save() a.restrict.p.delete() self.assertFalse(A.objects.filter(name="restrict").exists()) self.assertFalse(R.objects.filter(pk=a.restrict_id).exists()) def test_restrict_path_cascade_direct(self): a = create_a("restrict") a.restrict.p = P.objects.create() a.restrict.save() a.cascade_p = a.restrict.p a.save() a.restrict.p.delete() self.assertFalse(A.objects.filter(name="restrict").exists()) self.assertFalse(R.objects.filter(pk=a.restrict_id).exists()) def test_restrict_path_cascade_indirect_diamond(self): delete_top = DeleteTop.objects.create() b1 = B1.objects.create(delete_top=delete_top) b2 = B2.objects.create(delete_top=delete_top) delete_bottom = DeleteBottom.objects.create(b1=b1, b2=b2) msg = ( "Cannot delete some instances of model 'B1' because they are " "referenced through restricted foreign keys: 'DeleteBottom.b1'." ) with self.assertRaisesMessage(RestrictedError, msg) as cm: b1.delete() self.assertEqual(cm.exception.restricted_objects, {delete_bottom}) self.assertTrue(DeleteTop.objects.exists()) self.assertTrue(B1.objects.exists()) self.assertTrue(B2.objects.exists()) self.assertTrue(DeleteBottom.objects.exists()) # Object referenced also with CASCADE relationship can be deleted. delete_top.delete() self.assertFalse(DeleteTop.objects.exists()) self.assertFalse(B1.objects.exists()) self.assertFalse(B2.objects.exists()) self.assertFalse(DeleteBottom.objects.exists()) def test_restrict_gfk_no_fast_delete(self): delete_top = DeleteTop.objects.create() generic_b1 = GenericB1.objects.create(generic_delete_top=delete_top) generic_b2 = GenericB2.objects.create(generic_delete_top=delete_top) generic_delete_bottom = GenericDeleteBottom.objects.create( generic_b1=generic_b1, generic_b2=generic_b2, ) msg = ( "Cannot delete some instances of model 'GenericB1' because they " "are referenced through restricted foreign keys: " "'GenericDeleteBottom.generic_b1'." ) with self.assertRaisesMessage(RestrictedError, msg) as cm: generic_b1.delete() self.assertEqual(cm.exception.restricted_objects, {generic_delete_bottom}) self.assertTrue(DeleteTop.objects.exists()) self.assertTrue(GenericB1.objects.exists()) self.assertTrue(GenericB2.objects.exists()) self.assertTrue(GenericDeleteBottom.objects.exists()) # Object referenced also with CASCADE relationship can be deleted. delete_top.delete() self.assertFalse(DeleteTop.objects.exists()) self.assertFalse(GenericB1.objects.exists()) self.assertFalse(GenericB2.objects.exists()) self.assertFalse(GenericDeleteBottom.objects.exists()) class DeletionTests(TestCase): def test_sliced_queryset(self): msg = "Cannot use 'limit' or 'offset' with delete()." with self.assertRaisesMessage(TypeError, msg): M.objects.all()[0:5].delete() def test_pk_none(self): m = M() msg = "M object can't be deleted because its id attribute is set to None." with self.assertRaisesMessage(ValueError, msg): m.delete() def test_m2m(self): m = M.objects.create() r = R.objects.create() MR.objects.create(m=m, r=r) r.delete() self.assertFalse(MR.objects.exists()) r = R.objects.create() MR.objects.create(m=m, r=r) m.delete() self.assertFalse(MR.objects.exists()) m = M.objects.create() r = R.objects.create() m.m2m.add(r) r.delete() through = M._meta.get_field("m2m").remote_field.through self.assertFalse(through.objects.exists()) r = R.objects.create() m.m2m.add(r) m.delete() self.assertFalse(through.objects.exists()) m = M.objects.create() r = R.objects.create() MRNull.objects.create(m=m, r=r) r.delete() self.assertFalse(not MRNull.objects.exists()) self.assertFalse(m.m2m_through_null.exists()) def test_bulk(self): s = S.objects.create(r=R.objects.create()) for i in range(2 * GET_ITERATOR_CHUNK_SIZE): T.objects.create(s=s) # 1 (select related `T` instances) # + 1 (select related `U` instances) # + 2 (delete `T` instances in batches) # + 1 (delete `s`) self.assertNumQueries(5, s.delete) self.assertFalse(S.objects.exists()) def test_instance_update(self): deleted = [] related_setnull_sets = [] def pre_delete(sender, **kwargs): obj = kwargs["instance"] deleted.append(obj) if isinstance(obj, R): related_setnull_sets.append([a.pk for a in obj.setnull_set.all()]) models.signals.pre_delete.connect(pre_delete) a = create_a("update_setnull") a.setnull.delete() a = create_a("update_cascade") a.cascade.delete() for obj in deleted: self.assertIsNone(obj.pk) for pk_list in related_setnull_sets: for a in A.objects.filter(id__in=pk_list): self.assertIsNone(a.setnull) models.signals.pre_delete.disconnect(pre_delete) def test_deletion_order(self): pre_delete_order = [] post_delete_order = [] def log_post_delete(sender, **kwargs): pre_delete_order.append((sender, kwargs["instance"].pk)) def log_pre_delete(sender, **kwargs): post_delete_order.append((sender, kwargs["instance"].pk)) models.signals.post_delete.connect(log_post_delete) models.signals.pre_delete.connect(log_pre_delete) r = R.objects.create(pk=1) s1 = S.objects.create(pk=1, r=r) s2 = S.objects.create(pk=2, r=r) T.objects.create(pk=1, s=s1) T.objects.create(pk=2, s=s2) RChild.objects.create(r_ptr=r) r.delete() self.assertEqual( pre_delete_order, [(T, 2), (T, 1), (RChild, 1), (S, 2), (S, 1), (R, 1)] ) self.assertEqual( post_delete_order, [(T, 1), (T, 2), (RChild, 1), (S, 1), (S, 2), (R, 1)] ) models.signals.post_delete.disconnect(log_post_delete) models.signals.pre_delete.disconnect(log_pre_delete) def test_relational_post_delete_signals_happen_before_parent_object(self): deletions = [] def log_post_delete(instance, **kwargs): self.assertTrue(R.objects.filter(pk=instance.r_id)) self.assertIs(type(instance), S) deletions.append(instance.id) r = R.objects.create() s = S.objects.create(r=r) s_id = s.pk models.signals.post_delete.connect(log_post_delete, sender=S) try: r.delete() finally: models.signals.post_delete.disconnect(log_post_delete) self.assertEqual(len(deletions), 1) self.assertEqual(deletions[0], s_id) @skipUnlessDBFeature("can_defer_constraint_checks") def test_can_defer_constraint_checks(self): u = User.objects.create(avatar=Avatar.objects.create()) a = Avatar.objects.get(pk=u.avatar_id) # 1 query to find the users for the avatar. # 1 query to delete the user # 1 query to delete the avatar # The important thing is that when we can defer constraint checks there # is no need to do an UPDATE on User.avatar to null it out. # Attach a signal to make sure we will not do fast_deletes. calls = [] def noop(*args, **kwargs): calls.append("") models.signals.post_delete.connect(noop, sender=User) self.assertNumQueries(3, a.delete) self.assertFalse(User.objects.exists()) self.assertFalse(Avatar.objects.exists()) self.assertEqual(len(calls), 1) models.signals.post_delete.disconnect(noop, sender=User) @skipIfDBFeature("can_defer_constraint_checks") def test_cannot_defer_constraint_checks(self): u = User.objects.create(avatar=Avatar.objects.create()) # Attach a signal to make sure we will not do fast_deletes. calls = [] def noop(*args, **kwargs): calls.append("") models.signals.post_delete.connect(noop, sender=User) a = Avatar.objects.get(pk=u.avatar_id) # The below doesn't make sense... Why do we need to null out # user.avatar if we are going to delete the user immediately after it, # and there are no more cascades. # 1 query to find the users for the avatar. # 1 query to delete the user # 1 query to null out user.avatar, because we can't defer the constraint # 1 query to delete the avatar self.assertNumQueries(4, a.delete) self.assertFalse(User.objects.exists()) self.assertFalse(Avatar.objects.exists()) self.assertEqual(len(calls), 1) models.signals.post_delete.disconnect(noop, sender=User) def test_hidden_related(self): r = R.objects.create() h = HiddenUser.objects.create(r=r) HiddenUserProfile.objects.create(user=h) r.delete() self.assertEqual(HiddenUserProfile.objects.count(), 0) def test_large_delete(self): TEST_SIZE = 2000 objs = [Avatar() for i in range(0, TEST_SIZE)] Avatar.objects.bulk_create(objs) # Calculate the number of queries needed. batch_size = connection.ops.bulk_batch_size(["pk"], objs) # The related fetches are done in batches. batches = ceil(len(objs) / batch_size) # One query for Avatar.objects.all() and then one related fast delete for # each batch. fetches_to_mem = 1 + batches # The Avatar objects are going to be deleted in batches of # GET_ITERATOR_CHUNK_SIZE. queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE self.assertNumQueries(queries, Avatar.objects.all().delete) self.assertFalse(Avatar.objects.exists()) def test_large_delete_related(self): TEST_SIZE = 2000 s = S.objects.create(r=R.objects.create()) for i in range(TEST_SIZE): T.objects.create(s=s) batch_size = max(connection.ops.bulk_batch_size(["pk"], range(TEST_SIZE)), 1) # TEST_SIZE / batch_size (select related `T` instances) # + 1 (select related `U` instances) # + TEST_SIZE / GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches) # + 1 (delete `s`) expected_num_queries = ceil(TEST_SIZE / batch_size) expected_num_queries += ceil(TEST_SIZE / GET_ITERATOR_CHUNK_SIZE) + 2 self.assertNumQueries(expected_num_queries, s.delete) self.assertFalse(S.objects.exists()) self.assertFalse(T.objects.exists()) def test_delete_with_keeping_parents(self): child = RChild.objects.create() parent_id = child.r_ptr_id child.delete(keep_parents=True) self.assertFalse(RChild.objects.filter(id=child.id).exists()) self.assertTrue(R.objects.filter(id=parent_id).exists()) def test_delete_with_keeping_parents_relationships(self): child = RChild.objects.create() parent_id = child.r_ptr_id parent_referent_id = S.objects.create(r=child.r_ptr).pk child.delete(keep_parents=True) self.assertFalse(RChild.objects.filter(id=child.id).exists()) self.assertTrue(R.objects.filter(id=parent_id).exists()) self.assertTrue(S.objects.filter(pk=parent_referent_id).exists()) childchild = RChildChild.objects.create() parent_id = childchild.rchild_ptr.r_ptr_id child_id = childchild.rchild_ptr_id parent_referent_id = S.objects.create(r=childchild.rchild_ptr.r_ptr).pk childchild.delete(keep_parents=True) self.assertFalse(RChildChild.objects.filter(id=childchild.id).exists()) self.assertTrue(RChild.objects.filter(id=child_id).exists()) self.assertTrue(R.objects.filter(id=parent_id).exists()) self.assertTrue(S.objects.filter(pk=parent_referent_id).exists()) def test_queryset_delete_returns_num_rows(self): """ QuerySet.delete() should return the number of deleted rows and a dictionary with the number of deletions for each object type. """ Avatar.objects.bulk_create( [Avatar(desc="a"), Avatar(desc="b"), Avatar(desc="c")] ) avatars_count = Avatar.objects.count() deleted, rows_count = Avatar.objects.all().delete() self.assertEqual(deleted, avatars_count) # more complex example with multiple object types r = R.objects.create() h1 = HiddenUser.objects.create(r=r) HiddenUser.objects.create(r=r) HiddenUserProfile.objects.create(user=h1) existed_objs = { R._meta.label: R.objects.count(), HiddenUser._meta.label: HiddenUser.objects.count(), HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(), } deleted, deleted_objs = R.objects.all().delete() self.assertCountEqual(deleted_objs.keys(), existed_objs.keys()) for k, v in existed_objs.items(): self.assertEqual(deleted_objs[k], v) def test_model_delete_returns_num_rows(self): """ Model.delete() should return the number of deleted rows and a dictionary with the number of deletions for each object type. """ r = R.objects.create() h1 = HiddenUser.objects.create(r=r) h2 = HiddenUser.objects.create(r=r) HiddenUser.objects.create(r=r) HiddenUserProfile.objects.create(user=h1) HiddenUserProfile.objects.create(user=h2) m1 = M.objects.create() m2 = M.objects.create() MR.objects.create(r=r, m=m1) r.m_set.add(m1) r.m_set.add(m2) r.save() existed_objs = { R._meta.label: R.objects.count(), HiddenUser._meta.label: HiddenUser.objects.count(), MR._meta.label: MR.objects.count(), HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(), M.m2m.through._meta.label: M.m2m.through.objects.count(), } deleted, deleted_objs = r.delete() self.assertEqual(deleted, sum(existed_objs.values())) self.assertCountEqual(deleted_objs.keys(), existed_objs.keys()) for k, v in existed_objs.items(): self.assertEqual(deleted_objs[k], v) def test_proxied_model_duplicate_queries(self): """ #25685 - Deleting instances of a model with existing proxy classes should not issue multiple queries during cascade deletion of referring models. """ avatar = Avatar.objects.create() # One query for the Avatar table and a second for the User one. with self.assertNumQueries(2): avatar.delete() def test_only_referenced_fields_selected(self): """ Only referenced fields are selected during cascade deletion SELECT unless deletion signals are connected. """ origin = Origin.objects.create() expected_sql = str( Referrer.objects.only( # Both fields are referenced by SecondReferrer. "id", "unique_field", ) .filter(origin__in=[origin]) .query ) with self.assertNumQueries(2) as ctx: origin.delete() self.assertEqual(ctx.captured_queries[0]["sql"], expected_sql) def receiver(instance, **kwargs): pass # All fields are selected if deletion signals are connected. for signal_name in ("pre_delete", "post_delete"): with self.subTest(signal=signal_name): origin = Origin.objects.create() signal = getattr(models.signals, signal_name) signal.connect(receiver, sender=Referrer) with self.assertNumQueries(2) as ctx: origin.delete() self.assertIn( connection.ops.quote_name("large_field"), ctx.captured_queries[0]["sql"], ) signal.disconnect(receiver, sender=Referrer) class FastDeleteTests(TestCase): def test_fast_delete_all(self): with self.assertNumQueries(1) as ctx: User.objects.all().delete() sql = ctx.captured_queries[0]["sql"] # No subqueries is used when performing a full delete. self.assertNotIn("SELECT", sql) def test_fast_delete_fk(self): u = User.objects.create(avatar=Avatar.objects.create()) a = Avatar.objects.get(pk=u.avatar_id) # 1 query to fast-delete the user # 1 query to delete the avatar self.assertNumQueries(2, a.delete) self.assertFalse(User.objects.exists()) self.assertFalse(Avatar.objects.exists()) def test_fast_delete_m2m(self): t = M2MTo.objects.create() f = M2MFrom.objects.create() f.m2m.add(t) # 1 to delete f, 1 to fast-delete m2m for f self.assertNumQueries(2, f.delete) def test_fast_delete_revm2m(self): t = M2MTo.objects.create() f = M2MFrom.objects.create() f.m2m.add(t) # 1 to delete t, 1 to fast-delete t's m_set self.assertNumQueries(2, f.delete) def test_fast_delete_qs(self): u1 = User.objects.create() u2 = User.objects.create() self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete) self.assertEqual(User.objects.count(), 1) self.assertTrue(User.objects.filter(pk=u2.pk).exists()) def test_fast_delete_instance_set_pk_none(self): u = User.objects.create() # User can be fast-deleted. collector = Collector(using="default") self.assertTrue(collector.can_fast_delete(u)) u.delete() self.assertIsNone(u.pk) def test_fast_delete_joined_qs(self): a = Avatar.objects.create(desc="a") User.objects.create(avatar=a) u2 = User.objects.create() self.assertNumQueries(1, User.objects.filter(avatar__desc="a").delete) self.assertEqual(User.objects.count(), 1) self.assertTrue(User.objects.filter(pk=u2.pk).exists()) def test_fast_delete_inheritance(self): c = Child.objects.create() p = Parent.objects.create() # 1 for self, 1 for parent self.assertNumQueries(2, c.delete) self.assertFalse(Child.objects.exists()) self.assertEqual(Parent.objects.count(), 1) self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1) # 1 for self delete, 1 for fast delete of empty "child" qs. self.assertNumQueries(2, p.delete) self.assertFalse(Parent.objects.exists()) # 1 for self delete, 1 for fast delete of empty "child" qs. c = Child.objects.create() p = c.parent_ptr self.assertNumQueries(2, p.delete) self.assertFalse(Parent.objects.exists()) self.assertFalse(Child.objects.exists()) def test_fast_delete_large_batch(self): User.objects.bulk_create(User() for i in range(0, 2000)) # No problems here - we aren't going to cascade, so we will fast # delete the objects in a single query. self.assertNumQueries(1, User.objects.all().delete) a = Avatar.objects.create(desc="a") User.objects.bulk_create(User(avatar=a) for i in range(0, 2000)) # We don't hit parameter amount limits for a, so just one query for # that + fast delete of the related objs. self.assertNumQueries(2, a.delete) self.assertEqual(User.objects.count(), 0) def test_fast_delete_empty_no_update_can_self_select(self): """ Fast deleting when DatabaseFeatures.update_can_self_select = False works even if the specified filter doesn't match any row (#25932). """ with self.assertNumQueries(1): self.assertEqual( User.objects.filter(avatar__desc="missing").delete(), (0, {}), ) def test_fast_delete_combined_relationships(self): # The cascading fast-delete of SecondReferrer should be combined # in a single DELETE WHERE referrer_id OR unique_field. origin = Origin.objects.create() referer = Referrer.objects.create(origin=origin, unique_field=42) with self.assertNumQueries(2): referer.delete() def test_fast_delete_aggregation(self): # Fast-deleting when filtering against an aggregation result in # a single query containing a subquery. Base.objects.create() with self.assertNumQueries(1): self.assertEqual( Base.objects.annotate( rels_count=models.Count("rels"), ) .filter(rels_count=0) .delete(), (1, {"delete.Base": 1}), ) self.assertIs(Base.objects.exists(), False) def test_fast_delete_full_match(self): avatar = Avatar.objects.create(desc="bar") User.objects.create(avatar=avatar) with self.assertNumQueries(1): User.objects.filter(~Q(pk__in=[]) | Q(avatar__desc="foo")).delete() self.assertFalse(User.objects.exists())
421423fb38c1d24d2f54f6a7771047920cf8a40c38a7a75318cff3a3b3dceee6
from datetime import date, datetime from django.conf.urls.i18n import i18n_patterns from django.contrib.sitemaps import GenericSitemap, Sitemap, views from django.http import HttpResponse from django.urls import path from django.utils import timezone from django.views.decorators.cache import cache_page from ..models import I18nTestModel, TestModel class SimpleSitemap(Sitemap): changefreq = "never" priority = 0.5 location = "/location/" lastmod = date.today() def items(self): return [object()] class SimplePagedSitemap(Sitemap): lastmod = date.today() def items(self): return [object() for x in range(Sitemap.limit + 1)] class SimpleI18nSitemap(Sitemap): changefreq = "never" priority = 0.5 i18n = True def items(self): return I18nTestModel.objects.order_by("pk").all() class AlternatesI18nSitemap(SimpleI18nSitemap): alternates = True class LimitedI18nSitemap(AlternatesI18nSitemap): languages = ["en", "es"] class XDefaultI18nSitemap(AlternatesI18nSitemap): x_default = True class EmptySitemap(Sitemap): changefreq = "never" priority = 0.5 location = "/location/" class FixedLastmodSitemap(SimpleSitemap): lastmod = datetime(2013, 3, 13, 10, 0, 0) class FixedLastmodMixedSitemap(Sitemap): changefreq = "never" priority = 0.5 location = "/location/" loop = 0 def items(self): o1 = TestModel() o1.lastmod = datetime(2013, 3, 13, 10, 0, 0) o2 = TestModel() return [o1, o2] class FixedNewerLastmodSitemap(SimpleSitemap): lastmod = datetime(2013, 4, 20, 5, 0, 0) class DateSiteMap(SimpleSitemap): lastmod = date(2013, 3, 13) class TimezoneSiteMap(SimpleSitemap): lastmod = datetime(2013, 3, 13, 10, 0, 0, tzinfo=timezone.get_fixed_timezone(-300)) class CallableLastmodPartialSitemap(Sitemap): """Not all items have `lastmod`.""" location = "/location/" def items(self): o1 = TestModel() o1.lastmod = datetime(2013, 3, 13, 10, 0, 0) o2 = TestModel() return [o1, o2] def lastmod(self, obj): return obj.lastmod class CallableLastmodFullSitemap(Sitemap): """All items have `lastmod`.""" location = "/location/" def items(self): o1 = TestModel() o1.lastmod = datetime(2013, 3, 13, 10, 0, 0) o2 = TestModel() o2.lastmod = datetime(2014, 3, 13, 10, 0, 0) return [o1, o2] def lastmod(self, obj): return obj.lastmod class CallableLastmodNoItemsSitemap(Sitemap): location = "/location/" def items(self): return [] def lastmod(self, obj): return obj.lastmod class GetLatestLastmodNoneSiteMap(Sitemap): changefreq = "never" priority = 0.5 location = "/location/" def items(self): return [object()] def lastmod(self, obj): return datetime(2013, 3, 13, 10, 0, 0) def get_latest_lastmod(self): return None class GetLatestLastmodSiteMap(SimpleSitemap): def get_latest_lastmod(self): return datetime(2013, 3, 13, 10, 0, 0) def testmodelview(request, id): return HttpResponse() simple_sitemaps = { "simple": SimpleSitemap, } simple_i18n_sitemaps = { "i18n": SimpleI18nSitemap, } alternates_i18n_sitemaps = { "i18n-alternates": AlternatesI18nSitemap, } limited_i18n_sitemaps = { "i18n-limited": LimitedI18nSitemap, } xdefault_i18n_sitemaps = { "i18n-xdefault": XDefaultI18nSitemap, } simple_sitemaps_not_callable = { "simple": SimpleSitemap(), } simple_sitemaps_paged = { "simple": SimplePagedSitemap, } empty_sitemaps = { "empty": EmptySitemap, } fixed_lastmod_sitemaps = { "fixed-lastmod": FixedLastmodSitemap, } fixed_lastmod_mixed_sitemaps = { "fixed-lastmod-mixed": FixedLastmodMixedSitemap, } sitemaps_lastmod_mixed_ascending = { "no-lastmod": EmptySitemap, "lastmod": FixedLastmodSitemap, } sitemaps_lastmod_mixed_descending = { "lastmod": FixedLastmodSitemap, "no-lastmod": EmptySitemap, } sitemaps_lastmod_ascending = { "date": DateSiteMap, "datetime": FixedLastmodSitemap, "datetime-newer": FixedNewerLastmodSitemap, } sitemaps_lastmod_descending = { "datetime-newer": FixedNewerLastmodSitemap, "datetime": FixedLastmodSitemap, "date": DateSiteMap, } generic_sitemaps = { "generic": GenericSitemap({"queryset": TestModel.objects.order_by("pk").all()}), } get_latest_lastmod_none_sitemaps = { "get-latest-lastmod-none": GetLatestLastmodNoneSiteMap, } get_latest_lastmod_sitemaps = { "get-latest-lastmod": GetLatestLastmodSiteMap, } latest_lastmod_timezone_sitemaps = { "latest-lastmod-timezone": TimezoneSiteMap, } generic_sitemaps_lastmod = { "generic": GenericSitemap( { "queryset": TestModel.objects.order_by("pk").all(), "date_field": "lastmod", } ), } callable_lastmod_partial_sitemap = { "callable-lastmod": CallableLastmodPartialSitemap, } callable_lastmod_full_sitemap = { "callable-lastmod": CallableLastmodFullSitemap, } callable_lastmod_no_items_sitemap = { "callable-lastmod": CallableLastmodNoItemsSitemap, } urlpatterns = [ path("simple/index.xml", views.index, {"sitemaps": simple_sitemaps}), path("simple-paged/index.xml", views.index, {"sitemaps": simple_sitemaps_paged}), path( "simple-not-callable/index.xml", views.index, {"sitemaps": simple_sitemaps_not_callable}, ), path( "simple/custom-index.xml", views.index, {"sitemaps": simple_sitemaps, "template_name": "custom_sitemap_index.xml"}, ), path( "simple/custom-lastmod-index.xml", views.index, { "sitemaps": simple_sitemaps, "template_name": "custom_sitemap_lastmod_index.xml", }, ), path( "simple/sitemap-<section>.xml", views.sitemap, {"sitemaps": simple_sitemaps}, name="django.contrib.sitemaps.views.sitemap", ), path( "simple/sitemap.xml", views.sitemap, {"sitemaps": simple_sitemaps}, name="django.contrib.sitemaps.views.sitemap", ), path( "simple/i18n.xml", views.sitemap, {"sitemaps": simple_i18n_sitemaps}, name="django.contrib.sitemaps.views.sitemap", ), path( "alternates/i18n.xml", views.sitemap, {"sitemaps": alternates_i18n_sitemaps}, name="django.contrib.sitemaps.views.sitemap", ), path( "limited/i18n.xml", views.sitemap, {"sitemaps": limited_i18n_sitemaps}, name="django.contrib.sitemaps.views.sitemap", ), path( "x-default/i18n.xml", views.sitemap, {"sitemaps": xdefault_i18n_sitemaps}, name="django.contrib.sitemaps.views.sitemap", ), path( "simple/custom-sitemap.xml", views.sitemap, {"sitemaps": simple_sitemaps, "template_name": "custom_sitemap.xml"}, name="django.contrib.sitemaps.views.sitemap", ), path( "empty/sitemap.xml", views.sitemap, {"sitemaps": empty_sitemaps}, name="django.contrib.sitemaps.views.sitemap", ), path( "lastmod/sitemap.xml", views.sitemap, {"sitemaps": fixed_lastmod_sitemaps}, name="django.contrib.sitemaps.views.sitemap", ), path( "lastmod-mixed/sitemap.xml", views.sitemap, {"sitemaps": fixed_lastmod_mixed_sitemaps}, name="django.contrib.sitemaps.views.sitemap", ), path( "lastmod/date-sitemap.xml", views.sitemap, {"sitemaps": {"date-sitemap": DateSiteMap}}, name="django.contrib.sitemaps.views.sitemap", ), path( "lastmod/tz-sitemap.xml", views.sitemap, {"sitemaps": {"tz-sitemap": TimezoneSiteMap}}, name="django.contrib.sitemaps.views.sitemap", ), path( "lastmod-sitemaps/mixed-ascending.xml", views.sitemap, {"sitemaps": sitemaps_lastmod_mixed_ascending}, name="django.contrib.sitemaps.views.sitemap", ), path( "lastmod-sitemaps/mixed-descending.xml", views.sitemap, {"sitemaps": sitemaps_lastmod_mixed_descending}, name="django.contrib.sitemaps.views.sitemap", ), path( "lastmod-sitemaps/ascending.xml", views.sitemap, {"sitemaps": sitemaps_lastmod_ascending}, name="django.contrib.sitemaps.views.sitemap", ), path( "lastmod-sitemaps/descending.xml", views.sitemap, {"sitemaps": sitemaps_lastmod_descending}, name="django.contrib.sitemaps.views.sitemap", ), path( "lastmod/get-latest-lastmod-none-sitemap.xml", views.index, {"sitemaps": get_latest_lastmod_none_sitemaps}, name="django.contrib.sitemaps.views.index", ), path( "lastmod/get-latest-lastmod-sitemap.xml", views.index, {"sitemaps": get_latest_lastmod_sitemaps}, name="django.contrib.sitemaps.views.index", ), path( "lastmod/latest-lastmod-timezone-sitemap.xml", views.index, {"sitemaps": latest_lastmod_timezone_sitemaps}, name="django.contrib.sitemaps.views.index", ), path( "generic/sitemap.xml", views.sitemap, {"sitemaps": generic_sitemaps}, name="django.contrib.sitemaps.views.sitemap", ), path( "generic-lastmod/sitemap.xml", views.sitemap, {"sitemaps": generic_sitemaps_lastmod}, name="django.contrib.sitemaps.views.sitemap", ), path( "cached/index.xml", cache_page(1)(views.index), {"sitemaps": simple_sitemaps, "sitemap_url_name": "cached_sitemap"}, ), path( "cached/sitemap-<section>.xml", cache_page(1)(views.sitemap), {"sitemaps": simple_sitemaps}, name="cached_sitemap", ), path( "sitemap-without-entries/sitemap.xml", views.sitemap, {"sitemaps": {}}, name="django.contrib.sitemaps.views.sitemap", ), path( "callable-lastmod-partial/index.xml", views.index, {"sitemaps": callable_lastmod_partial_sitemap}, ), path( "callable-lastmod-partial/sitemap.xml", views.sitemap, {"sitemaps": callable_lastmod_partial_sitemap}, ), path( "callable-lastmod-full/index.xml", views.index, {"sitemaps": callable_lastmod_full_sitemap}, ), path( "callable-lastmod-full/sitemap.xml", views.sitemap, {"sitemaps": callable_lastmod_full_sitemap}, ), path( "callable-lastmod-no-items/index.xml", views.index, {"sitemaps": callable_lastmod_no_items_sitemap}, ), path( "generic-lastmod/index.xml", views.index, {"sitemaps": generic_sitemaps_lastmod}, name="django.contrib.sitemaps.views.index", ), ] urlpatterns += i18n_patterns( path("i18n/testmodel/<int:id>/", testmodelview, name="i18n_testmodel"), )
1d0802e456d98d4e19d446a066011d89de759eb03f038eaefed8a9bfb52d328b
import json import mimetypes import os import sys from copy import copy from functools import partial from http import HTTPStatus from importlib import import_module from io import BytesIO from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit from asgiref.sync import sync_to_async from django.conf import settings from django.core.handlers.asgi import ASGIRequest from django.core.handlers.base import BaseHandler from django.core.handlers.wsgi import LimitedStream, WSGIRequest from django.core.serializers.json import DjangoJSONEncoder from django.core.signals import got_request_exception, request_finished, request_started from django.db import close_old_connections from django.http import HttpRequest, QueryDict, SimpleCookie from django.test import signals from django.test.utils import ContextList from django.urls import resolve from django.utils.encoding import force_bytes from django.utils.functional import SimpleLazyObject from django.utils.http import urlencode from django.utils.itercompat import is_iterable from django.utils.regex_helper import _lazy_re_compile __all__ = ( "AsyncClient", "AsyncRequestFactory", "Client", "RedirectCycleError", "RequestFactory", "encode_file", "encode_multipart", ) BOUNDARY = "BoUnDaRyStRiNg" MULTIPART_CONTENT = "multipart/form-data; boundary=%s" % BOUNDARY CONTENT_TYPE_RE = _lazy_re_compile(r".*; charset=([\w-]+);?") # Structured suffix spec: https://tools.ietf.org/html/rfc6838#section-4.2.8 JSON_CONTENT_TYPE_RE = _lazy_re_compile(r"^application\/(.+\+)?json") class RedirectCycleError(Exception): """The test client has been asked to follow a redirect loop.""" def __init__(self, message, last_response): super().__init__(message) self.last_response = last_response self.redirect_chain = last_response.redirect_chain class FakePayload: """ A wrapper around BytesIO that restricts what can be read since data from the network can't be sought and cannot be read outside of its content length. This makes sure that views can't do anything under the test client that wouldn't work in real life. """ def __init__(self, content=None): self.__content = BytesIO() self.__len = 0 self.read_started = False if content is not None: self.write(content) def __len__(self): return self.__len def read(self, num_bytes=None): if not self.read_started: self.__content.seek(0) self.read_started = True if num_bytes is None: num_bytes = self.__len or 0 assert ( self.__len >= num_bytes ), "Cannot read more than the available bytes from the HTTP incoming data." content = self.__content.read(num_bytes) self.__len -= num_bytes return content def write(self, content): if self.read_started: raise ValueError("Unable to write a payload after it's been read") content = force_bytes(content) self.__content.write(content) self.__len += len(content) def close(self): pass def closing_iterator_wrapper(iterable, close): try: yield from iterable finally: request_finished.disconnect(close_old_connections) close() # will fire request_finished request_finished.connect(close_old_connections) def conditional_content_removal(request, response): """ Simulate the behavior of most web servers by removing the content of responses for HEAD requests, 1xx, 204, and 304 responses. Ensure compliance with RFC 7230, section 3.3.3. """ if 100 <= response.status_code < 200 or response.status_code in (204, 304): if response.streaming: response.streaming_content = [] else: response.content = b"" if request.method == "HEAD": if response.streaming: response.streaming_content = [] else: response.content = b"" return response class ClientHandler(BaseHandler): """ An HTTP Handler that can be used for testing purposes. Use the WSGI interface to compose requests, but return the raw HttpResponse object with the originating WSGIRequest attached to its ``wsgi_request`` attribute. """ def __init__(self, enforce_csrf_checks=True, *args, **kwargs): self.enforce_csrf_checks = enforce_csrf_checks super().__init__(*args, **kwargs) def __call__(self, environ): # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._middleware_chain is None: self.load_middleware() request_started.disconnect(close_old_connections) request_started.send(sender=self.__class__, environ=environ) request_started.connect(close_old_connections) request = WSGIRequest(environ) # sneaky little hack so that we can easily get round # CsrfViewMiddleware. This makes life easier, and is probably # required for backwards compatibility with external tests against # admin views. request._dont_enforce_csrf_checks = not self.enforce_csrf_checks # Request goes through middleware. response = self.get_response(request) # Simulate behaviors of most web servers. conditional_content_removal(request, response) # Attach the originating request to the response so that it could be # later retrieved. response.wsgi_request = request # Emulate a WSGI server by calling the close method on completion. if response.streaming: response.streaming_content = closing_iterator_wrapper( response.streaming_content, response.close ) else: request_finished.disconnect(close_old_connections) response.close() # will fire request_finished request_finished.connect(close_old_connections) return response class AsyncClientHandler(BaseHandler): """An async version of ClientHandler.""" def __init__(self, enforce_csrf_checks=True, *args, **kwargs): self.enforce_csrf_checks = enforce_csrf_checks super().__init__(*args, **kwargs) async def __call__(self, scope): # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._middleware_chain is None: self.load_middleware(is_async=True) # Extract body file from the scope, if provided. if "_body_file" in scope: body_file = scope.pop("_body_file") else: body_file = FakePayload("") request_started.disconnect(close_old_connections) await sync_to_async(request_started.send, thread_sensitive=False)( sender=self.__class__, scope=scope ) request_started.connect(close_old_connections) # Wrap FakePayload body_file to allow large read() in test environment. request = ASGIRequest(scope, LimitedStream(body_file, len(body_file))) # Sneaky little hack so that we can easily get round # CsrfViewMiddleware. This makes life easier, and is probably required # for backwards compatibility with external tests against admin views. request._dont_enforce_csrf_checks = not self.enforce_csrf_checks # Request goes through middleware. response = await self.get_response_async(request) # Simulate behaviors of most web servers. conditional_content_removal(request, response) # Attach the originating ASGI request to the response so that it could # be later retrieved. response.asgi_request = request # Emulate a server by calling the close method on completion. if response.streaming: response.streaming_content = await sync_to_async( closing_iterator_wrapper, thread_sensitive=False )( response.streaming_content, response.close, ) else: request_finished.disconnect(close_old_connections) # Will fire request_finished. await sync_to_async(response.close, thread_sensitive=False)() request_finished.connect(close_old_connections) return response def store_rendered_templates(store, signal, sender, template, context, **kwargs): """ Store templates and contexts that are rendered. The context is copied so that it is an accurate representation at the time of rendering. """ store.setdefault("templates", []).append(template) if "context" not in store: store["context"] = ContextList() store["context"].append(copy(context)) def encode_multipart(boundary, data): """ Encode multipart POST data from a dictionary of form values. The key will be used as the form data name; the value will be transmitted as content. If the value is a file, the contents of the file will be sent as an application/octet-stream; otherwise, str(value) will be sent. """ lines = [] def to_bytes(s): return force_bytes(s, settings.DEFAULT_CHARSET) # Not by any means perfect, but good enough for our purposes. def is_file(thing): return hasattr(thing, "read") and callable(thing.read) # Each bit of the multipart form data could be either a form value or a # file, or a *list* of form values and/or files. Remember that HTTP field # names can be duplicated! for (key, value) in data.items(): if value is None: raise TypeError( "Cannot encode None for key '%s' as POST data. Did you mean " "to pass an empty string or omit the value?" % key ) elif is_file(value): lines.extend(encode_file(boundary, key, value)) elif not isinstance(value, str) and is_iterable(value): for item in value: if is_file(item): lines.extend(encode_file(boundary, key, item)) else: lines.extend( to_bytes(val) for val in [ "--%s" % boundary, 'Content-Disposition: form-data; name="%s"' % key, "", item, ] ) else: lines.extend( to_bytes(val) for val in [ "--%s" % boundary, 'Content-Disposition: form-data; name="%s"' % key, "", value, ] ) lines.extend( [ to_bytes("--%s--" % boundary), b"", ] ) return b"\r\n".join(lines) def encode_file(boundary, key, file): def to_bytes(s): return force_bytes(s, settings.DEFAULT_CHARSET) # file.name might not be a string. For example, it's an int for # tempfile.TemporaryFile(). file_has_string_name = hasattr(file, "name") and isinstance(file.name, str) filename = os.path.basename(file.name) if file_has_string_name else "" if hasattr(file, "content_type"): content_type = file.content_type elif filename: content_type = mimetypes.guess_type(filename)[0] else: content_type = None if content_type is None: content_type = "application/octet-stream" filename = filename or key return [ to_bytes("--%s" % boundary), to_bytes( 'Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename) ), to_bytes("Content-Type: %s" % content_type), b"", to_bytes(file.read()), ] class RequestFactory: """ Class that lets you create mock Request objects for use in testing. Usage: rf = RequestFactory() get_request = rf.get('/hello/') post_request = rf.post('/submit/', {'foo': 'bar'}) Once you have a request object you can pass it to any view function, just as if that view had been hooked up using a URLconf. """ def __init__(self, *, json_encoder=DjangoJSONEncoder, **defaults): self.json_encoder = json_encoder self.defaults = defaults self.cookies = SimpleCookie() self.errors = BytesIO() def _base_environ(self, **request): """ The base environment for a request. """ # This is a minimal valid WSGI environ dictionary, plus: # - HTTP_COOKIE: for cookie support, # - REMOTE_ADDR: often useful, see #8551. # See https://www.python.org/dev/peps/pep-3333/#environ-variables return { "HTTP_COOKIE": "; ".join( sorted( "%s=%s" % (morsel.key, morsel.coded_value) for morsel in self.cookies.values() ) ), "PATH_INFO": "/", "REMOTE_ADDR": "127.0.0.1", "REQUEST_METHOD": "GET", "SCRIPT_NAME": "", "SERVER_NAME": "testserver", "SERVER_PORT": "80", "SERVER_PROTOCOL": "HTTP/1.1", "wsgi.version": (1, 0), "wsgi.url_scheme": "http", "wsgi.input": FakePayload(b""), "wsgi.errors": self.errors, "wsgi.multiprocess": True, "wsgi.multithread": False, "wsgi.run_once": False, **self.defaults, **request, } def request(self, **request): "Construct a generic request object." return WSGIRequest(self._base_environ(**request)) def _encode_data(self, data, content_type): if content_type is MULTIPART_CONTENT: return encode_multipart(BOUNDARY, data) else: # Encode the content so that the byte representation is correct. match = CONTENT_TYPE_RE.match(content_type) if match: charset = match[1] else: charset = settings.DEFAULT_CHARSET return force_bytes(data, encoding=charset) def _encode_json(self, data, content_type): """ Return encoded JSON if data is a dict, list, or tuple and content_type is application/json. """ should_encode = JSON_CONTENT_TYPE_RE.match(content_type) and isinstance( data, (dict, list, tuple) ) return json.dumps(data, cls=self.json_encoder) if should_encode else data def _get_path(self, parsed): path = parsed.path # If there are parameters, add them if parsed.params: path += ";" + parsed.params path = unquote_to_bytes(path) # Replace the behavior where non-ASCII values in the WSGI environ are # arbitrarily decoded with ISO-8859-1. # Refs comment in `get_bytes_from_wsgi()`. return path.decode("iso-8859-1") def get(self, path, data=None, secure=False, **extra): """Construct a GET request.""" data = {} if data is None else data return self.generic( "GET", path, secure=secure, **{ "QUERY_STRING": urlencode(data, doseq=True), **extra, }, ) def post( self, path, data=None, content_type=MULTIPART_CONTENT, secure=False, **extra ): """Construct a POST request.""" data = self._encode_json({} if data is None else data, content_type) post_data = self._encode_data(data, content_type) return self.generic( "POST", path, post_data, content_type, secure=secure, **extra ) def head(self, path, data=None, secure=False, **extra): """Construct a HEAD request.""" data = {} if data is None else data return self.generic( "HEAD", path, secure=secure, **{ "QUERY_STRING": urlencode(data, doseq=True), **extra, }, ) def trace(self, path, secure=False, **extra): """Construct a TRACE request.""" return self.generic("TRACE", path, secure=secure, **extra) def options( self, path, data="", content_type="application/octet-stream", secure=False, **extra, ): "Construct an OPTIONS request." return self.generic("OPTIONS", path, data, content_type, secure=secure, **extra) def put( self, path, data="", content_type="application/octet-stream", secure=False, **extra, ): """Construct a PUT request.""" data = self._encode_json(data, content_type) return self.generic("PUT", path, data, content_type, secure=secure, **extra) def patch( self, path, data="", content_type="application/octet-stream", secure=False, **extra, ): """Construct a PATCH request.""" data = self._encode_json(data, content_type) return self.generic("PATCH", path, data, content_type, secure=secure, **extra) def delete( self, path, data="", content_type="application/octet-stream", secure=False, **extra, ): """Construct a DELETE request.""" data = self._encode_json(data, content_type) return self.generic("DELETE", path, data, content_type, secure=secure, **extra) def generic( self, method, path, data="", content_type="application/octet-stream", secure=False, **extra, ): """Construct an arbitrary HTTP request.""" parsed = urlparse(str(path)) # path can be lazy data = force_bytes(data, settings.DEFAULT_CHARSET) r = { "PATH_INFO": self._get_path(parsed), "REQUEST_METHOD": method, "SERVER_PORT": "443" if secure else "80", "wsgi.url_scheme": "https" if secure else "http", } if data: r.update( { "CONTENT_LENGTH": str(len(data)), "CONTENT_TYPE": content_type, "wsgi.input": FakePayload(data), } ) r.update(extra) # If QUERY_STRING is absent or empty, we want to extract it from the URL. if not r.get("QUERY_STRING"): # WSGI requires latin-1 encoded strings. See get_path_info(). query_string = parsed[4].encode().decode("iso-8859-1") r["QUERY_STRING"] = query_string return self.request(**r) class AsyncRequestFactory(RequestFactory): """ Class that lets you create mock ASGI-like Request objects for use in testing. Usage: rf = AsyncRequestFactory() get_request = await rf.get('/hello/') post_request = await rf.post('/submit/', {'foo': 'bar'}) Once you have a request object you can pass it to any view function, including synchronous ones. The reason we have a separate class here is: a) this makes ASGIRequest subclasses, and b) AsyncTestClient can subclass it. """ def _base_scope(self, **request): """The base scope for a request.""" # This is a minimal valid ASGI scope, plus: # - headers['cookie'] for cookie support, # - 'client' often useful, see #8551. scope = { "asgi": {"version": "3.0"}, "type": "http", "http_version": "1.1", "client": ["127.0.0.1", 0], "server": ("testserver", "80"), "scheme": "http", "method": "GET", "headers": [], **self.defaults, **request, } scope["headers"].append( ( b"cookie", b"; ".join( sorted( ("%s=%s" % (morsel.key, morsel.coded_value)).encode("ascii") for morsel in self.cookies.values() ) ), ) ) return scope def request(self, **request): """Construct a generic request object.""" # This is synchronous, which means all methods on this class are. # AsyncClient, however, has an async request function, which makes all # its methods async. if "_body_file" in request: body_file = request.pop("_body_file") else: body_file = FakePayload("") # Wrap FakePayload body_file to allow large read() in test environment. return ASGIRequest( self._base_scope(**request), LimitedStream(body_file, len(body_file)) ) def generic( self, method, path, data="", content_type="application/octet-stream", secure=False, **extra, ): """Construct an arbitrary HTTP request.""" parsed = urlparse(str(path)) # path can be lazy. data = force_bytes(data, settings.DEFAULT_CHARSET) s = { "method": method, "path": self._get_path(parsed), "server": ("127.0.0.1", "443" if secure else "80"), "scheme": "https" if secure else "http", "headers": [(b"host", b"testserver")], } if data: s["headers"].extend( [ (b"content-length", str(len(data)).encode("ascii")), (b"content-type", content_type.encode("ascii")), ] ) s["_body_file"] = FakePayload(data) follow = extra.pop("follow", None) if follow is not None: s["follow"] = follow if query_string := extra.pop("QUERY_STRING", None): s["query_string"] = query_string s["headers"] += [ (key.lower().encode("ascii"), value.encode("latin1")) for key, value in extra.items() ] # If QUERY_STRING is absent or empty, we want to extract it from the # URL. if not s.get("query_string"): s["query_string"] = parsed[4] return self.request(**s) class ClientMixin: """ Mixin with common methods between Client and AsyncClient. """ def store_exc_info(self, **kwargs): """Store exceptions when they are generated by a view.""" self.exc_info = sys.exc_info() def check_exception(self, response): """ Look for a signaled exception, clear the current context exception data, re-raise the signaled exception, and clear the signaled exception from the local cache. """ response.exc_info = self.exc_info if self.exc_info: _, exc_value, _ = self.exc_info self.exc_info = None if self.raise_request_exception: raise exc_value @property def session(self): """Return the current session variables.""" engine = import_module(settings.SESSION_ENGINE) cookie = self.cookies.get(settings.SESSION_COOKIE_NAME) if cookie: return engine.SessionStore(cookie.value) session = engine.SessionStore() session.save() self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key return session def login(self, **credentials): """ Set the Factory to appear as if it has successfully logged into a site. Return True if login is possible or False if the provided credentials are incorrect. """ from django.contrib.auth import authenticate user = authenticate(**credentials) if user: self._login(user) return True return False def force_login(self, user, backend=None): def get_backend(): from django.contrib.auth import load_backend for backend_path in settings.AUTHENTICATION_BACKENDS: backend = load_backend(backend_path) if hasattr(backend, "get_user"): return backend_path if backend is None: backend = get_backend() user.backend = backend self._login(user, backend) def _login(self, user, backend=None): from django.contrib.auth import login # Create a fake request to store login details. request = HttpRequest() if self.session: request.session = self.session else: engine = import_module(settings.SESSION_ENGINE) request.session = engine.SessionStore() login(request, user, backend) # Save the session values. request.session.save() # Set the cookie to represent the session. session_cookie = settings.SESSION_COOKIE_NAME self.cookies[session_cookie] = request.session.session_key cookie_data = { "max-age": None, "path": "/", "domain": settings.SESSION_COOKIE_DOMAIN, "secure": settings.SESSION_COOKIE_SECURE or None, "expires": None, } self.cookies[session_cookie].update(cookie_data) def logout(self): """Log out the user by removing the cookies and session object.""" from django.contrib.auth import get_user, logout request = HttpRequest() if self.session: request.session = self.session request.user = get_user(request) else: engine = import_module(settings.SESSION_ENGINE) request.session = engine.SessionStore() logout(request) self.cookies = SimpleCookie() def _parse_json(self, response, **extra): if not hasattr(response, "_json"): if not JSON_CONTENT_TYPE_RE.match(response.get("Content-Type")): raise ValueError( 'Content-Type header is "%s", not "application/json"' % response.get("Content-Type") ) response._json = json.loads( response.content.decode(response.charset), **extra ) return response._json class Client(ClientMixin, RequestFactory): """ A class that can act as a client for testing purposes. It allows the user to compose GET and POST requests, and obtain the response that the server gave to those requests. The server Response objects are annotated with the details of the contexts and templates that were rendered during the process of serving the request. Client objects are stateful - they will retain cookie (and thus session) details for the lifetime of the Client instance. This is not intended as a replacement for Twill/Selenium or the like - it is here to allow testing against the contexts and templates produced by a view, rather than the HTML rendered to the end-user. """ def __init__( self, enforce_csrf_checks=False, raise_request_exception=True, **defaults ): super().__init__(**defaults) self.handler = ClientHandler(enforce_csrf_checks) self.raise_request_exception = raise_request_exception self.exc_info = None self.extra = None def request(self, **request): """ Make a generic request. Compose the environment dictionary and pass to the handler, return the result of the handler. Assume defaults for the query environment, which can be overridden using the arguments to the request. """ environ = self._base_environ(**request) # Curry a data dictionary into an instance of the template renderer # callback function. data = {} on_template_render = partial(store_rendered_templates, data) signal_uid = "template-render-%s" % id(request) signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid) # Capture exceptions created by the handler. exception_uid = "request-exception-%s" % id(request) got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid) try: response = self.handler(environ) finally: signals.template_rendered.disconnect(dispatch_uid=signal_uid) got_request_exception.disconnect(dispatch_uid=exception_uid) # Check for signaled exceptions. self.check_exception(response) # Save the client and request that stimulated the response. response.client = self response.request = request # Add any rendered template detail to the response. response.templates = data.get("templates", []) response.context = data.get("context") response.json = partial(self._parse_json, response) # Attach the ResolverMatch instance to the response. urlconf = getattr(response.wsgi_request, "urlconf", None) response.resolver_match = SimpleLazyObject( lambda: resolve(request["PATH_INFO"], urlconf=urlconf), ) # Flatten a single context. Not really necessary anymore thanks to the # __getattr__ flattening in ContextList, but has some edge case # backwards compatibility implications. if response.context and len(response.context) == 1: response.context = response.context[0] # Update persistent cookie data. if response.cookies: self.cookies.update(response.cookies) return response def get(self, path, data=None, follow=False, secure=False, **extra): """Request a response from the server using GET.""" self.extra = extra response = super().get(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def post( self, path, data=None, content_type=MULTIPART_CONTENT, follow=False, secure=False, **extra, ): """Request a response from the server using POST.""" self.extra = extra response = super().post( path, data=data, content_type=content_type, secure=secure, **extra ) if follow: response = self._handle_redirects( response, data=data, content_type=content_type, **extra ) return response def head(self, path, data=None, follow=False, secure=False, **extra): """Request a response from the server using HEAD.""" self.extra = extra response = super().head(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def options( self, path, data="", content_type="application/octet-stream", follow=False, secure=False, **extra, ): """Request a response from the server using OPTIONS.""" self.extra = extra response = super().options( path, data=data, content_type=content_type, secure=secure, **extra ) if follow: response = self._handle_redirects( response, data=data, content_type=content_type, **extra ) return response def put( self, path, data="", content_type="application/octet-stream", follow=False, secure=False, **extra, ): """Send a resource to the server using PUT.""" self.extra = extra response = super().put( path, data=data, content_type=content_type, secure=secure, **extra ) if follow: response = self._handle_redirects( response, data=data, content_type=content_type, **extra ) return response def patch( self, path, data="", content_type="application/octet-stream", follow=False, secure=False, **extra, ): """Send a resource to the server using PATCH.""" self.extra = extra response = super().patch( path, data=data, content_type=content_type, secure=secure, **extra ) if follow: response = self._handle_redirects( response, data=data, content_type=content_type, **extra ) return response def delete( self, path, data="", content_type="application/octet-stream", follow=False, secure=False, **extra, ): """Send a DELETE request to the server.""" self.extra = extra response = super().delete( path, data=data, content_type=content_type, secure=secure, **extra ) if follow: response = self._handle_redirects( response, data=data, content_type=content_type, **extra ) return response def trace(self, path, data="", follow=False, secure=False, **extra): """Send a TRACE request to the server.""" self.extra = extra response = super().trace(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def _handle_redirects(self, response, data="", content_type="", **extra): """ Follow any redirects by requesting responses from the server using GET. """ response.redirect_chain = [] redirect_status_codes = ( HTTPStatus.MOVED_PERMANENTLY, HTTPStatus.FOUND, HTTPStatus.SEE_OTHER, HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT, ) while response.status_code in redirect_status_codes: response_url = response.url redirect_chain = response.redirect_chain redirect_chain.append((response_url, response.status_code)) url = urlsplit(response_url) if url.scheme: extra["wsgi.url_scheme"] = url.scheme if url.hostname: extra["SERVER_NAME"] = url.hostname if url.port: extra["SERVER_PORT"] = str(url.port) path = url.path # RFC 2616: bare domains without path are treated as the root. if not path and url.netloc: path = "/" # Prepend the request path to handle relative path redirects if not path.startswith("/"): path = urljoin(response.request["PATH_INFO"], path) if response.status_code in ( HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT, ): # Preserve request method and query string (if needed) # post-redirect for 307/308 responses. request_method = response.request["REQUEST_METHOD"].lower() if request_method not in ("get", "head"): extra["QUERY_STRING"] = url.query request_method = getattr(self, request_method) else: request_method = self.get data = QueryDict(url.query) content_type = None response = request_method( path, data=data, content_type=content_type, follow=False, **extra ) response.redirect_chain = redirect_chain if redirect_chain[-1] in redirect_chain[:-1]: # Check that we're not redirecting to somewhere we've already # been to, to prevent loops. raise RedirectCycleError( "Redirect loop detected.", last_response=response ) if len(redirect_chain) > 20: # Such a lengthy chain likely also means a loop, but one with # a growing path, changing view, or changing query argument; # 20 is the value of "network.http.redirection-limit" from Firefox. raise RedirectCycleError("Too many redirects.", last_response=response) return response class AsyncClient(ClientMixin, AsyncRequestFactory): """ An async version of Client that creates ASGIRequests and calls through an async request path. Does not currently support "follow" on its methods. """ def __init__( self, enforce_csrf_checks=False, raise_request_exception=True, **defaults ): super().__init__(**defaults) self.handler = AsyncClientHandler(enforce_csrf_checks) self.raise_request_exception = raise_request_exception self.exc_info = None self.extra = None async def request(self, **request): """ Make a generic request. Compose the scope dictionary and pass to the handler, return the result of the handler. Assume defaults for the query environment, which can be overridden using the arguments to the request. """ if "follow" in request: raise NotImplementedError( "AsyncClient request methods do not accept the follow parameter." ) scope = self._base_scope(**request) # Curry a data dictionary into an instance of the template renderer # callback function. data = {} on_template_render = partial(store_rendered_templates, data) signal_uid = "template-render-%s" % id(request) signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid) # Capture exceptions created by the handler. exception_uid = "request-exception-%s" % id(request) got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid) try: response = await self.handler(scope) finally: signals.template_rendered.disconnect(dispatch_uid=signal_uid) got_request_exception.disconnect(dispatch_uid=exception_uid) # Check for signaled exceptions. self.check_exception(response) # Save the client and request that stimulated the response. response.client = self response.request = request # Add any rendered template detail to the response. response.templates = data.get("templates", []) response.context = data.get("context") response.json = partial(self._parse_json, response) # Attach the ResolverMatch instance to the response. urlconf = getattr(response.asgi_request, "urlconf", None) response.resolver_match = SimpleLazyObject( lambda: resolve(request["path"], urlconf=urlconf), ) # Flatten a single context. Not really necessary anymore thanks to the # __getattr__ flattening in ContextList, but has some edge case # backwards compatibility implications. if response.context and len(response.context) == 1: response.context = response.context[0] # Update persistent cookie data. if response.cookies: self.cookies.update(response.cookies) return response
f10f9e5803cc9ded8ce2dc3ba04c22568739e52ad8bd71cdf00bd40e4a491ba3
""" Accessors for related objects. When a field defines a relation between two models, each model class provides an attribute to access related instances of the other model class (unless the reverse accessor has been disabled with related_name='+'). Accessors are implemented as descriptors in order to customize access and assignment. This module defines the descriptor classes. Forward accessors follow foreign keys. Reverse accessors trace them back. For example, with the following models:: class Parent(Model): pass class Child(Model): parent = ForeignKey(Parent, related_name='children') ``child.parent`` is a forward many-to-one relation. ``parent.children`` is a reverse many-to-one relation. There are three types of relations (many-to-one, one-to-one, and many-to-many) and two directions (forward and reverse) for a total of six combinations. 1. Related instance on the forward side of a many-to-one relation: ``ForwardManyToOneDescriptor``. Uniqueness of foreign key values is irrelevant to accessing the related instance, making the many-to-one and one-to-one cases identical as far as the descriptor is concerned. The constraint is checked upstream (unicity validation in forms) or downstream (unique indexes in the database). 2. Related instance on the forward side of a one-to-one relation: ``ForwardOneToOneDescriptor``. It avoids querying the database when accessing the parent link field in a multi-table inheritance scenario. 3. Related instance on the reverse side of a one-to-one relation: ``ReverseOneToOneDescriptor``. One-to-one relations are asymmetrical, despite the apparent symmetry of the name, because they're implemented in the database with a foreign key from one table to another. As a consequence ``ReverseOneToOneDescriptor`` is slightly different from ``ForwardManyToOneDescriptor``. 4. Related objects manager for related instances on the reverse side of a many-to-one relation: ``ReverseManyToOneDescriptor``. Unlike the previous two classes, this one provides access to a collection of objects. It returns a manager rather than an instance. 5. Related objects manager for related instances on the forward or reverse sides of a many-to-many relation: ``ManyToManyDescriptor``. Many-to-many relations are symmetrical. The syntax of Django models requires declaring them on one side but that's an implementation detail. They could be declared on the other side without any change in behavior. Therefore the forward and reverse descriptors can be the same. If you're looking for ``ForwardManyToManyDescriptor`` or ``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead. """ from asgiref.sync import sync_to_async from django.core.exceptions import FieldError from django.db import ( DEFAULT_DB_ALIAS, NotSupportedError, connections, router, transaction, ) from django.db.models import Q, Window, signals from django.db.models.functions import RowNumber from django.db.models.lookups import GreaterThan, LessThanOrEqual from django.db.models.query import QuerySet from django.db.models.query_utils import DeferredAttribute from django.db.models.utils import AltersData, resolve_callables from django.utils.functional import cached_property class ForeignKeyDeferredAttribute(DeferredAttribute): def __set__(self, instance, value): if instance.__dict__.get(self.field.attname) != value and self.field.is_cached( instance ): self.field.delete_cached_value(instance) instance.__dict__[self.field.attname] = value def _filter_prefetch_queryset(queryset, field_name, instances): predicate = Q(**{f"{field_name}__in": instances}) db = queryset._db or DEFAULT_DB_ALIAS if queryset.query.is_sliced: if not connections[db].features.supports_over_clause: raise NotSupportedError( "Prefetching from a limited queryset is only supported on backends " "that support window functions." ) low_mark, high_mark = queryset.query.low_mark, queryset.query.high_mark order_by = [ expr for expr, _ in queryset.query.get_compiler(using=db).get_order_by() ] window = Window(RowNumber(), partition_by=field_name, order_by=order_by) predicate &= GreaterThan(window, low_mark) if high_mark is not None: predicate &= LessThanOrEqual(window, high_mark) queryset.query.clear_limits() return queryset.filter(predicate) class ForwardManyToOneDescriptor: """ Accessor to the related object on the forward side of a many-to-one or one-to-one (via ForwardOneToOneDescriptor subclass) relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``Child.parent`` is a ``ForwardManyToOneDescriptor`` instance. """ def __init__(self, field_with_rel): self.field = field_with_rel @cached_property def RelatedObjectDoesNotExist(self): # The exception can't be created at initialization time since the # related model might not be resolved yet; `self.field.model` might # still be a string model reference. return type( "RelatedObjectDoesNotExist", (self.field.remote_field.model.DoesNotExist, AttributeError), { "__module__": self.field.model.__module__, "__qualname__": "%s.%s.RelatedObjectDoesNotExist" % ( self.field.model.__qualname__, self.field.name, ), }, ) def is_cached(self, instance): return self.field.is_cached(instance) def get_queryset(self, **hints): return self.field.remote_field.model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = self.field.get_foreign_related_value instance_attr = self.field.get_local_related_value instances_dict = {instance_attr(inst): inst for inst in instances} related_field = self.field.foreign_related_fields[0] remote_field = self.field.remote_field # FIXME: This will need to be revisited when we introduce support for # composite fields. In the meantime we take this practical approach to # solve a regression on 1.6 when the reverse manager in hidden # (related_name ends with a '+'). Refs #21410. # The check for len(...) == 1 is a special case that allows the query # to be join-less and smaller. Refs #21760. if remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1: query = { "%s__in" % related_field.name: {instance_attr(inst)[0] for inst in instances} } else: query = {"%s__in" % self.field.related_query_name(): instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. if not remote_field.multiple: for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] remote_field.set_cached_value(rel_obj, instance) return ( queryset, rel_obj_attr, instance_attr, True, self.field.get_cache_name(), False, ) def get_object(self, instance): qs = self.get_queryset(instance=instance) # Assuming the database enforces foreign keys, this won't fail. return qs.get(self.field.get_reverse_related_filter(instance)) def __get__(self, instance, cls=None): """ Get the related instance through the forward relation. With the example above, when getting ``child.parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``cls`` is the ``Child`` class (we don't need it) """ if instance is None: return self # The related instance is loaded from the database and then cached # by the field on the model instance state. It can also be pre-cached # by the reverse accessor (ReverseOneToOneDescriptor). try: rel_obj = self.field.get_cached_value(instance) except KeyError: has_value = None not in self.field.get_local_related_value(instance) ancestor_link = ( instance._meta.get_ancestor_link(self.field.model) if has_value else None ) if ancestor_link and ancestor_link.is_cached(instance): # An ancestor link will exist if this field is defined on a # multi-table inheritance parent of the instance's class. ancestor = ancestor_link.get_cached_value(instance) # The value might be cached on an ancestor if the instance # originated from walking down the inheritance chain. rel_obj = self.field.get_cached_value(ancestor, default=None) else: rel_obj = None if rel_obj is None and has_value: rel_obj = self.get_object(instance) remote_field = self.field.remote_field # If this is a one-to-one relation, set the reverse accessor # cache on the related object to the current instance to avoid # an extra SQL query if it's accessed later on. if not remote_field.multiple: remote_field.set_cached_value(rel_obj, instance) self.field.set_cached_value(instance, rel_obj) if rel_obj is None and not self.field.null: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (self.field.model.__name__, self.field.name) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the forward relation. With the example above, when setting ``child.parent = parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``value`` is the ``parent`` instance on the right of the equal sign """ # An object must be an instance of the related class. if value is not None and not isinstance( value, self.field.remote_field.model._meta.concrete_model ): raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.field.name, self.field.remote_field.model._meta.object_name, ) ) elif value is not None: if instance._state.db is None: instance._state.db = router.db_for_write( instance.__class__, instance=value ) if value._state.db is None: value._state.db = router.db_for_write( value.__class__, instance=instance ) if not router.allow_relation(value, instance): raise ValueError( 'Cannot assign "%r": the current database router prevents this ' "relation." % value ) remote_field = self.field.remote_field # If we're setting the value of a OneToOneField to None, we need to clear # out the cache on any old related object. Otherwise, deleting the # previously-related object will also cause this object to be deleted, # which is wrong. if value is None: # Look up the previously-related object, which may still be available # since we've not yet cleared out the related field. # Use the cache directly, instead of the accessor; if we haven't # populated the cache, then we don't care - we're only accessing # the object to invalidate the accessor cache, so there's no # need to populate the cache just to expire it again. related = self.field.get_cached_value(instance, default=None) # If we've got an old related object, we need to clear out its # cache. This cache also might not exist if the related object # hasn't been accessed yet. if related is not None: remote_field.set_cached_value(related, None) for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, None) # Set the values of the related field. else: for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, getattr(value, rh_field.attname)) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. self.field.set_cached_value(instance, value) # If this is a one-to-one relation, set the reverse accessor cache on # the related object to the current instance to avoid an extra SQL # query if it's accessed later on. if value is not None and not remote_field.multiple: remote_field.set_cached_value(value, instance) def __reduce__(self): """ Pickling should return the instance attached by self.field on the model, not a new copy of that descriptor. Use getattr() to retrieve the instance directly from the model. """ return getattr, (self.field.model, self.field.name) class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor): """ Accessor to the related object on the forward side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``Restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance. """ def get_object(self, instance): if self.field.remote_field.parent_link: deferred = instance.get_deferred_fields() # Because it's a parent link, all the data is available in the # instance, so populate the parent model with this data. rel_model = self.field.remote_field.model fields = [field.attname for field in rel_model._meta.concrete_fields] # If any of the related model's fields are deferred, fallback to # fetching all fields from the related model. This avoids a query # on the related model for every deferred field. if not any(field in fields for field in deferred): kwargs = {field: getattr(instance, field) for field in fields} obj = rel_model(**kwargs) obj._state.adding = instance._state.adding obj._state.db = instance._state.db return obj return super().get_object(instance) def __set__(self, instance, value): super().__set__(instance, value) # If the primary key is a link to a parent model and a parent instance # is being set, update the value of the inherited pk(s). if self.field.primary_key and self.field.remote_field.parent_link: opts = instance._meta # Inherited primary key fields from this object's base classes. inherited_pk_fields = [ field for field in opts.concrete_fields if field.primary_key and field.remote_field ] for field in inherited_pk_fields: rel_model_pk_name = field.remote_field.model._meta.pk.attname raw_value = ( getattr(value, rel_model_pk_name) if value is not None else None ) setattr(instance, rel_model_pk_name, raw_value) class ReverseOneToOneDescriptor: """ Accessor to the related object on the reverse side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``Place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance. """ def __init__(self, related): # Following the example above, `related` is an instance of OneToOneRel # which represents the reverse restaurant field (place.restaurant). self.related = related @cached_property def RelatedObjectDoesNotExist(self): # The exception isn't created at initialization time for the sake of # consistency with `ForwardManyToOneDescriptor`. return type( "RelatedObjectDoesNotExist", (self.related.related_model.DoesNotExist, AttributeError), { "__module__": self.related.model.__module__, "__qualname__": "%s.%s.RelatedObjectDoesNotExist" % ( self.related.model.__qualname__, self.related.name, ), }, ) def is_cached(self, instance): return self.related.is_cached(instance) def get_queryset(self, **hints): return self.related.related_model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = self.related.field.get_local_related_value instance_attr = self.related.field.get_foreign_related_value instances_dict = {instance_attr(inst): inst for inst in instances} query = {"%s__in" % self.related.field.name: instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] self.related.field.set_cached_value(rel_obj, instance) return ( queryset, rel_obj_attr, instance_attr, True, self.related.get_cache_name(), False, ) def __get__(self, instance, cls=None): """ Get the related instance through the reverse relation. With the example above, when getting ``place.restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``cls`` is the ``Place`` class (unused) Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ if instance is None: return self # The related instance is loaded from the database and then cached # by the field on the model instance state. It can also be pre-cached # by the forward accessor (ForwardManyToOneDescriptor). try: rel_obj = self.related.get_cached_value(instance) except KeyError: related_pk = instance.pk if related_pk is None: rel_obj = None else: filter_args = self.related.field.get_forward_related_filter(instance) try: rel_obj = self.get_queryset(instance=instance).get(**filter_args) except self.related.related_model.DoesNotExist: rel_obj = None else: # Set the forward accessor cache on the related object to # the current instance to avoid an extra SQL query if it's # accessed later on. self.related.field.set_cached_value(rel_obj, instance) self.related.set_cached_value(instance, rel_obj) if rel_obj is None: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (instance.__class__.__name__, self.related.get_accessor_name()) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the reverse relation. With the example above, when setting ``place.restaurant = restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``value`` is the ``restaurant`` instance on the right of the equal sign Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ # The similarity of the code below to the code in # ForwardManyToOneDescriptor is annoying, but there's a bunch # of small differences that would make a common base class convoluted. if value is None: # Update the cached related instance (if any) & clear the cache. # Following the example above, this would be the cached # ``restaurant`` instance (if any). rel_obj = self.related.get_cached_value(instance, default=None) if rel_obj is not None: # Remove the ``restaurant`` instance from the ``place`` # instance cache. self.related.delete_cached_value(instance) # Set the ``place`` field on the ``restaurant`` # instance to None. setattr(rel_obj, self.related.field.name, None) elif not isinstance(value, self.related.related_model): # An object must be an instance of the related class. raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.related.get_accessor_name(), self.related.related_model._meta.object_name, ) ) else: if instance._state.db is None: instance._state.db = router.db_for_write( instance.__class__, instance=value ) if value._state.db is None: value._state.db = router.db_for_write( value.__class__, instance=instance ) if not router.allow_relation(value, instance): raise ValueError( 'Cannot assign "%r": the current database router prevents this ' "relation." % value ) related_pk = tuple( getattr(instance, field.attname) for field in self.related.field.foreign_related_fields ) # Set the value of the related field to the value of the related # object's related field. for index, field in enumerate(self.related.field.local_related_fields): setattr(value, field.attname, related_pk[index]) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. self.related.set_cached_value(instance, value) # Set the forward accessor cache on the related object to the current # instance to avoid an extra SQL query if it's accessed later on. self.related.field.set_cached_value(value, instance) def __reduce__(self): # Same purpose as ForwardManyToOneDescriptor.__reduce__(). return getattr, (self.related.model, self.related.name) class ReverseManyToOneDescriptor: """ Accessor to the related objects manager on the reverse side of a many-to-one relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``Parent.children`` is a ``ReverseManyToOneDescriptor`` instance. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel): self.rel = rel self.field = rel.field @cached_property def related_manager_cls(self): related_model = self.rel.related_model return create_reverse_many_to_one_manager( related_model._default_manager.__class__, self.rel, ) def __get__(self, instance, cls=None): """ Get the related objects through the reverse relation. With the example above, when getting ``parent.children``: - ``self`` is the descriptor managing the ``children`` attribute - ``instance`` is the ``parent`` instance - ``cls`` is the ``Parent`` class (unused) """ if instance is None: return self return self.related_manager_cls(instance) def _get_set_deprecation_msg_params(self): return ( "reverse side of a related set", self.rel.get_accessor_name(), ) def __set__(self, instance, value): raise TypeError( "Direct assignment to the %s is prohibited. Use %s.set() instead." % self._get_set_deprecation_msg_params(), ) def create_reverse_many_to_one_manager(superclass, rel): """ Create a manager for the reverse side of a many-to-one relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-one relations. """ class RelatedManager(superclass, AltersData): def __init__(self, instance): super().__init__() self.instance = instance self.model = rel.related_model self.field = rel.field self.core_filters = {self.field.name: instance} def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_reverse_many_to_one_manager(manager.__class__, rel) return manager_class(self.instance) do_not_call_in_templates = True def _check_fk_val(self): for field in self.field.foreign_related_fields: if getattr(self.instance, field.attname) is None: raise ValueError( f'"{self.instance!r}" needs to have a value for field ' f'"{field.attname}" before this relationship can be used.' ) def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ db = self._db or router.db_for_read(self.model, instance=self.instance) empty_strings_as_null = connections[ db ].features.interprets_empty_strings_as_nulls queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset._defer_next_filter = True queryset = queryset.filter(**self.core_filters) for field in self.field.foreign_related_fields: val = getattr(self.instance, field.attname) if val is None or (val == "" and empty_strings_as_null): return queryset.none() if self.field.many_to_one: # Guard against field-like objects such as GenericRelation # that abuse create_reverse_many_to_one_manager() with reverse # one-to-many relationships instead and break known related # objects assignment. try: target_field = self.field.target_field except FieldError: # The relationship has multiple target fields. Use a tuple # for related object id. rel_obj_id = tuple( [ getattr(self.instance, target_field.attname) for target_field in self.field.path_infos[-1].target_fields ] ) else: rel_obj_id = getattr(self.instance, target_field.attname) queryset._known_related_objects = { self.field: {rel_obj_id: self.instance} } return queryset def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop( self.field.remote_field.get_cache_name() ) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if self.instance.pk is None: raise ValueError( f"{self.instance.__class__.__name__!r} instance needs to have a " f"primary key value before this relationship can be used." ) try: return self.instance._prefetched_objects_cache[ self.field.remote_field.get_cache_name() ] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) rel_obj_attr = self.field.get_local_related_value instance_attr = self.field.get_foreign_related_value instances_dict = {instance_attr(inst): inst for inst in instances} queryset = _filter_prefetch_queryset(queryset, self.field.name, instances) # Since we just bypassed this class' get_queryset(), we must manage # the reverse relation manually. for rel_obj in queryset: if not self.field.is_cached(rel_obj): instance = instances_dict[rel_obj_attr(rel_obj)] setattr(rel_obj, self.field.name, instance) cache_name = self.field.remote_field.get_cache_name() return queryset, rel_obj_attr, instance_attr, False, cache_name, False def add(self, *objs, bulk=True): self._check_fk_val() self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) def check_and_update_obj(obj): if not isinstance(obj, self.model): raise TypeError( "'%s' instance expected, got %r" % ( self.model._meta.object_name, obj, ) ) setattr(obj, self.field.name, self.instance) if bulk: pks = [] for obj in objs: check_and_update_obj(obj) if obj._state.adding or obj._state.db != db: raise ValueError( "%r instance isn't saved. Use bulk=False or save " "the object first." % obj ) pks.append(obj.pk) self.model._base_manager.using(db).filter(pk__in=pks).update( **{ self.field.name: self.instance, } ) else: with transaction.atomic(using=db, savepoint=False): for obj in objs: check_and_update_obj(obj) obj.save() add.alters_data = True async def aadd(self, *objs, bulk=True): return await sync_to_async(self.add)(*objs, bulk=bulk) aadd.alters_data = True def create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).create(**kwargs) create.alters_data = True async def acreate(self, **kwargs): return await sync_to_async(self.create)(**kwargs) acreate.alters_data = True def get_or_create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs) get_or_create.alters_data = True async def aget_or_create(self, **kwargs): return await sync_to_async(self.get_or_create)(**kwargs) aget_or_create.alters_data = True def update_or_create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs) update_or_create.alters_data = True async def aupdate_or_create(self, **kwargs): return await sync_to_async(self.update_or_create)(**kwargs) aupdate_or_create.alters_data = True # remove() and clear() are only provided if the ForeignKey can have a # value of null. if rel.field.null: def remove(self, *objs, bulk=True): if not objs: return self._check_fk_val() val = self.field.get_foreign_related_value(self.instance) old_ids = set() for obj in objs: if not isinstance(obj, self.model): raise TypeError( "'%s' instance expected, got %r" % ( self.model._meta.object_name, obj, ) ) # Is obj actually part of this descriptor set? if self.field.get_local_related_value(obj) == val: old_ids.add(obj.pk) else: raise self.field.remote_field.model.DoesNotExist( "%r is not related to %r." % (obj, self.instance) ) self._clear(self.filter(pk__in=old_ids), bulk) remove.alters_data = True async def aremove(self, *objs, bulk=True): return await sync_to_async(self.remove)(*objs, bulk=bulk) aremove.alters_data = True def clear(self, *, bulk=True): self._check_fk_val() self._clear(self, bulk) clear.alters_data = True async def aclear(self, *, bulk=True): return await sync_to_async(self.clear)(bulk=bulk) aclear.alters_data = True def _clear(self, queryset, bulk): self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) queryset = queryset.using(db) if bulk: # `QuerySet.update()` is intrinsically atomic. queryset.update(**{self.field.name: None}) else: with transaction.atomic(using=db, savepoint=False): for obj in queryset: setattr(obj, self.field.name, None) obj.save(update_fields=[self.field.name]) _clear.alters_data = True def set(self, objs, *, bulk=True, clear=False): self._check_fk_val() # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) if self.field.null: db = router.db_for_write(self.model, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear(bulk=bulk) self.add(*objs, bulk=bulk) else: old_objs = set(self.using(db).all()) new_objs = [] for obj in objs: if obj in old_objs: old_objs.remove(obj) else: new_objs.append(obj) self.remove(*old_objs, bulk=bulk) self.add(*new_objs, bulk=bulk) else: self.add(*objs, bulk=bulk) set.alters_data = True async def aset(self, objs, *, bulk=True, clear=False): return await sync_to_async(self.set)(objs=objs, bulk=bulk, clear=clear) aset.alters_data = True return RelatedManager class ManyToManyDescriptor(ReverseManyToOneDescriptor): """ Accessor to the related objects manager on the forward and reverse sides of a many-to-many relation. In the example:: class Pizza(Model): toppings = ManyToManyField(Topping, related_name='pizzas') ``Pizza.toppings`` and ``Topping.pizzas`` are ``ManyToManyDescriptor`` instances. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel, reverse=False): super().__init__(rel) self.reverse = reverse @property def through(self): # through is provided so that you have easy access to the through # model (Book.authors.through) for inlines, etc. This is done as # a property to ensure that the fully resolved value is returned. return self.rel.through @cached_property def related_manager_cls(self): related_model = self.rel.related_model if self.reverse else self.rel.model return create_forward_many_to_many_manager( related_model._default_manager.__class__, self.rel, reverse=self.reverse, ) def _get_set_deprecation_msg_params(self): return ( "%s side of a many-to-many set" % ("reverse" if self.reverse else "forward"), self.rel.get_accessor_name() if self.reverse else self.field.name, ) def create_forward_many_to_many_manager(superclass, rel, reverse): """ Create a manager for the either side of a many-to-many relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-many relations. """ class ManyRelatedManager(superclass, AltersData): def __init__(self, instance=None): super().__init__() self.instance = instance if not reverse: self.model = rel.model self.query_field_name = rel.field.related_query_name() self.prefetch_cache_name = rel.field.name self.source_field_name = rel.field.m2m_field_name() self.target_field_name = rel.field.m2m_reverse_field_name() self.symmetrical = rel.symmetrical else: self.model = rel.related_model self.query_field_name = rel.field.name self.prefetch_cache_name = rel.field.related_query_name() self.source_field_name = rel.field.m2m_reverse_field_name() self.target_field_name = rel.field.m2m_field_name() self.symmetrical = False self.through = rel.through self.reverse = reverse self.source_field = self.through._meta.get_field(self.source_field_name) self.target_field = self.through._meta.get_field(self.target_field_name) self.core_filters = {} self.pk_field_names = {} for lh_field, rh_field in self.source_field.related_fields: core_filter_key = "%s__%s" % (self.query_field_name, rh_field.name) self.core_filters[core_filter_key] = getattr(instance, rh_field.attname) self.pk_field_names[lh_field.name] = rh_field.name self.related_val = self.source_field.get_foreign_related_value(instance) if None in self.related_val: raise ValueError( '"%r" needs to have a value for field "%s" before ' "this many-to-many relationship can be used." % (instance, self.pk_field_names[self.source_field_name]) ) # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if instance.pk is None: raise ValueError( "%r instance needs to have a primary key value before " "a many-to-many relationship can be used." % instance.__class__.__name__ ) def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_forward_many_to_many_manager( manager.__class__, rel, reverse ) return manager_class(instance=self.instance) do_not_call_in_templates = True def _build_remove_filters(self, removed_vals): filters = Q.create([(self.source_field_name, self.related_val)]) # No need to add a subquery condition if removed_vals is a QuerySet without # filters. removed_vals_filters = ( not isinstance(removed_vals, QuerySet) or removed_vals._has_filters() ) if removed_vals_filters: filters &= Q.create([(f"{self.target_field_name}__in", removed_vals)]) if self.symmetrical: symmetrical_filters = Q.create( [(self.target_field_name, self.related_val)] ) if removed_vals_filters: symmetrical_filters &= Q.create( [(f"{self.source_field_name}__in", removed_vals)] ) filters |= symmetrical_filters return filters def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset._defer_next_filter = True return queryset._next_is_sticky().filter(**self.core_filters) def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) queryset = _filter_prefetch_queryset( queryset._next_is_sticky(), self.query_field_name, instances ) # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that # there will already be a join on the join table, so we can just add # the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. fk = self.through._meta.get_field(self.source_field_name) join_table = fk.model._meta.db_table connection = connections[queryset.db] qn = connection.ops.quote_name queryset = queryset.extra( select={ "_prefetch_related_val_%s" % f.attname: "%s.%s" % (qn(join_table), qn(f.column)) for f in fk.local_related_fields } ) return ( queryset, lambda result: tuple( getattr(result, "_prefetch_related_val_%s" % f.attname) for f in fk.local_related_fields ), lambda inst: tuple( f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields ), False, self.prefetch_cache_name, False, ) def add(self, *objs, through_defaults=None): self._remove_prefetched_objects() db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): self._add_items( self.source_field_name, self.target_field_name, *objs, through_defaults=through_defaults, ) # If this is a symmetrical m2m relation to self, add the mirror # entry in the m2m table. if self.symmetrical: self._add_items( self.target_field_name, self.source_field_name, *objs, through_defaults=through_defaults, ) add.alters_data = True async def aadd(self, *objs, through_defaults=None): return await sync_to_async(self.add)( *objs, through_defaults=through_defaults ) aadd.alters_data = True def remove(self, *objs): self._remove_prefetched_objects() self._remove_items(self.source_field_name, self.target_field_name, *objs) remove.alters_data = True async def aremove(self, *objs): return await sync_to_async(self.remove)(*objs) aremove.alters_data = True def clear(self): db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): signals.m2m_changed.send( sender=self.through, action="pre_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) self._remove_prefetched_objects() filters = self._build_remove_filters(super().get_queryset().using(db)) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) clear.alters_data = True async def aclear(self): return await sync_to_async(self.clear)() aclear.alters_data = True def set(self, objs, *, clear=False, through_defaults=None): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, through_defaults=through_defaults) else: old_ids = set( self.using(db).values_list( self.target_field.target_field.attname, flat=True ) ) new_objs = [] for obj in objs: fk_val = ( self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else self.target_field.get_prep_value(obj) ) if fk_val in old_ids: old_ids.remove(fk_val) else: new_objs.append(obj) self.remove(*old_ids) self.add(*new_objs, through_defaults=through_defaults) set.alters_data = True async def aset(self, objs, *, clear=False, through_defaults=None): return await sync_to_async(self.set)( objs=objs, clear=clear, through_defaults=through_defaults ) aset.alters_data = True def create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs) self.add(new_obj, through_defaults=through_defaults) return new_obj create.alters_data = True async def acreate(self, *, through_defaults=None, **kwargs): return await sync_to_async(self.create)( through_defaults=through_defaults, **kwargs ) acreate.alters_data = True def get_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create( **kwargs ) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created get_or_create.alters_data = True async def aget_or_create(self, *, through_defaults=None, **kwargs): return await sync_to_async(self.get_or_create)( through_defaults=through_defaults, **kwargs ) aget_or_create.alters_data = True def update_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super( ManyRelatedManager, self.db_manager(db) ).update_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created update_or_create.alters_data = True async def aupdate_or_create(self, *, through_defaults=None, **kwargs): return await sync_to_async(self.update_or_create)( through_defaults=through_defaults, **kwargs ) aupdate_or_create.alters_data = True def _get_target_ids(self, target_field_name, objs): """ Return the set of ids of `objs` that the target field references. """ from django.db.models import Model target_ids = set() target_field = self.through._meta.get_field(target_field_name) for obj in objs: if isinstance(obj, self.model): if not router.allow_relation(obj, self.instance): raise ValueError( 'Cannot add "%r": instance is on database "%s", ' 'value is on database "%s"' % (obj, self.instance._state.db, obj._state.db) ) target_id = target_field.get_foreign_related_value(obj)[0] if target_id is None: raise ValueError( 'Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name) ) target_ids.add(target_id) elif isinstance(obj, Model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) else: target_ids.add(target_field.get_prep_value(obj)) return target_ids def _get_missing_target_ids( self, source_field_name, target_field_name, db, target_ids ): """ Return the subset of ids of `objs` that aren't already assigned to this relationship. """ vals = ( self.through._default_manager.using(db) .values_list(target_field_name, flat=True) .filter( **{ source_field_name: self.related_val[0], "%s__in" % target_field_name: target_ids, } ) ) return target_ids.difference(vals) def _get_add_plan(self, db, source_field_name): """ Return a boolean triple of the way the add should be performed. The first element is whether or not bulk_create(ignore_conflicts) can be used, the second whether or not signals must be sent, and the third element is whether or not the immediate bulk insertion with conflicts ignored can be performed. """ # Conflicts can be ignored when the intermediary model is # auto-created as the only possible collision is on the # (source_id, target_id) tuple. The same assertion doesn't hold for # user-defined intermediary models as they could have other fields # causing conflicts which must be surfaced. can_ignore_conflicts = ( self.through._meta.auto_created is not False and connections[db].features.supports_ignore_conflicts ) # Don't send the signal when inserting duplicate data row # for symmetrical reverse entries. must_send_signals = ( self.reverse or source_field_name == self.source_field_name ) and (signals.m2m_changed.has_listeners(self.through)) # Fast addition through bulk insertion can only be performed # if no m2m_changed listeners are connected for self.through # as they require the added set of ids to be provided via # pk_set. return ( can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and not must_send_signals), ) def _add_items( self, source_field_name, target_field_name, *objs, through_defaults=None ): # source_field_name: the PK fieldname in join table for the source object # target_field_name: the PK fieldname in join table for the target object # *objs - objects to add. Either object instances, or primary keys # of object instances. if not objs: return through_defaults = dict(resolve_callables(through_defaults or {})) target_ids = self._get_target_ids(target_field_name, objs) db = router.db_for_write(self.through, instance=self.instance) can_ignore_conflicts, must_send_signals, can_fast_add = self._get_add_plan( db, source_field_name ) if can_fast_add: self.through._default_manager.using(db).bulk_create( [ self.through( **{ "%s_id" % source_field_name: self.related_val[0], "%s_id" % target_field_name: target_id, } ) for target_id in target_ids ], ignore_conflicts=True, ) return missing_target_ids = self._get_missing_target_ids( source_field_name, target_field_name, db, target_ids ) with transaction.atomic(using=db, savepoint=False): if must_send_signals: signals.m2m_changed.send( sender=self.through, action="pre_add", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) # Add the ones that aren't there already. self.through._default_manager.using(db).bulk_create( [ self.through( **through_defaults, **{ "%s_id" % source_field_name: self.related_val[0], "%s_id" % target_field_name: target_id, }, ) for target_id in missing_target_ids ], ignore_conflicts=can_ignore_conflicts, ) if must_send_signals: signals.m2m_changed.send( sender=self.through, action="post_add", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) def _remove_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK colname in join table for the source object # target_field_name: the PK colname in join table for the target object # *objs - objects to remove. Either object instances, or primary # keys of object instances. if not objs: return # Check that all the objects are of the right type old_ids = set() for obj in objs: if isinstance(obj, self.model): fk_val = self.target_field.get_foreign_related_value(obj)[0] old_ids.add(fk_val) else: old_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): # Send a signal to the other end if need be. signals.m2m_changed.send( sender=self.through, action="pre_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) target_model_qs = super().get_queryset() if target_model_qs._has_filters(): old_vals = target_model_qs.using(db).filter( **{"%s__in" % self.target_field.target_field.attname: old_ids} ) else: old_vals = old_ids filters = self._build_remove_filters(old_vals) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) return ManyRelatedManager
73b3ae66e74192fa6a835591db0137a7544fcc2df2b39deda22914f8128fc537
import operator from django.db import transaction from django.db.backends.base.features import BaseDatabaseFeatures from django.db.utils import OperationalError from django.utils.functional import cached_property from .base import Database class DatabaseFeatures(BaseDatabaseFeatures): minimum_database_version = (3, 9) test_db_allows_multiple_connections = False supports_unspecified_pk = True supports_timezones = False max_query_params = 999 supports_transactions = True atomic_transactions = False can_rollback_ddl = True can_create_inline_fk = False requires_literal_defaults = True can_clone_databases = True supports_temporal_subtraction = True ignores_table_name_case = True supports_cast_with_precision = False time_cast_precision = 3 can_release_savepoints = True has_case_insensitive_like = True # Is "ALTER TABLE ... RENAME COLUMN" supported? can_alter_table_rename_column = Database.sqlite_version_info >= (3, 25, 0) # Is "ALTER TABLE ... DROP COLUMN" supported? can_alter_table_drop_column = Database.sqlite_version_info >= (3, 35, 5) supports_parentheses_in_compound = False # Deferred constraint checks can be emulated on SQLite < 3.20 but not in a # reasonably performant way. supports_pragma_foreign_key_check = Database.sqlite_version_info >= (3, 20, 0) can_defer_constraint_checks = supports_pragma_foreign_key_check supports_functions_in_partial_indexes = Database.sqlite_version_info >= (3, 15, 0) supports_over_clause = Database.sqlite_version_info >= (3, 25, 0) supports_frame_range_fixed_distance = Database.sqlite_version_info >= (3, 28, 0) supports_aggregate_filter_clause = Database.sqlite_version_info >= (3, 30, 1) supports_order_by_nulls_modifier = Database.sqlite_version_info >= (3, 30, 0) # NULLS LAST/FIRST emulation on < 3.30 requires subquery wrapping. requires_compound_order_by_subquery = Database.sqlite_version_info < (3, 30) order_by_nulls_first = True supports_json_field_contains = False supports_update_conflicts = Database.sqlite_version_info >= (3, 24, 0) supports_update_conflicts_with_target = supports_update_conflicts test_collations = { "ci": "nocase", "cs": "binary", "non_default": "nocase", } django_test_expected_failures = { # The django_format_dtdelta() function doesn't properly handle mixed # Date/DateTime fields and timedeltas. "expressions.tests.FTimeDeltaTests.test_mixed_comparisons1", } create_test_table_with_composite_primary_key = """ CREATE TABLE test_table_composite_pk ( column_1 INTEGER NOT NULL, column_2 INTEGER NOT NULL, PRIMARY KEY(column_1, column_2) ) """ @cached_property def django_test_skips(self): skips = { "SQLite stores values rounded to 15 significant digits.": { "model_fields.test_decimalfield.DecimalFieldTests." "test_fetch_from_db_without_float_rounding", }, "SQLite naively remakes the table on field alteration.": { "schema.tests.SchemaTests.test_unique_no_unnecessary_fk_drops", "schema.tests.SchemaTests.test_unique_and_reverse_m2m", "schema.tests.SchemaTests." "test_alter_field_default_doesnt_perform_queries", "schema.tests.SchemaTests." "test_rename_column_renames_deferred_sql_references", }, "SQLite doesn't support negative precision for ROUND().": { "db_functions.math.test_round.RoundTests." "test_null_with_negative_precision", "db_functions.math.test_round.RoundTests." "test_decimal_with_negative_precision", "db_functions.math.test_round.RoundTests." "test_float_with_negative_precision", "db_functions.math.test_round.RoundTests." "test_integer_with_negative_precision", }, } if Database.sqlite_version_info < (3, 27): skips.update( { "Nondeterministic failure on SQLite < 3.27.": { "expressions_window.tests.WindowFunctionTests." "test_subquery_row_range_rank", }, } ) if self.connection.is_in_memory_db(): skips.update( { "the sqlite backend's close() method is a no-op when using an " "in-memory database": { "servers.test_liveserverthread.LiveServerThreadTest." "test_closes_connections", "servers.tests.LiveServerTestCloseConnectionTest." "test_closes_connections", }, } ) return skips @cached_property def supports_atomic_references_rename(self): return Database.sqlite_version_info >= (3, 26, 0) @cached_property def introspected_field_types(self): return { **super().introspected_field_types, "BigAutoField": "AutoField", "DurationField": "BigIntegerField", "GenericIPAddressField": "CharField", "SmallAutoField": "AutoField", } @cached_property def supports_json_field(self): with self.connection.cursor() as cursor: try: with transaction.atomic(self.connection.alias): cursor.execute('SELECT JSON(\'{"a": "b"}\')') except OperationalError: return False return True can_introspect_json_field = property(operator.attrgetter("supports_json_field")) has_json_object_function = property(operator.attrgetter("supports_json_field")) @cached_property def can_return_columns_from_insert(self): return Database.sqlite_version_info >= (3, 35) can_return_rows_from_bulk_insert = property( operator.attrgetter("can_return_columns_from_insert") )
d8493c2e3604f4ca657178db232ab47aa06082e7b8c881c715c4d5f3fedab639
""" SQLite backend for the sqlite3 module in the standard library. """ import datetime import decimal import warnings from collections.abc import Mapping from itertools import chain, tee from sqlite3 import dbapi2 as Database from django.core.exceptions import ImproperlyConfigured from django.db import IntegrityError from django.db.backends.base.base import BaseDatabaseWrapper from django.utils.asyncio import async_unsafe from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.regex_helper import _lazy_re_compile from ._functions import register as register_functions from .client import DatabaseClient from .creation import DatabaseCreation from .features import DatabaseFeatures from .introspection import DatabaseIntrospection from .operations import DatabaseOperations from .schema import DatabaseSchemaEditor def decoder(conv_func): """ Convert bytestrings from Python's sqlite3 interface to a regular string. """ return lambda s: conv_func(s.decode()) def adapt_date(val): return val.isoformat() def adapt_datetime(val): return val.isoformat(" ") Database.register_converter("bool", b"1".__eq__) Database.register_converter("date", decoder(parse_date)) Database.register_converter("time", decoder(parse_time)) Database.register_converter("datetime", decoder(parse_datetime)) Database.register_converter("timestamp", decoder(parse_datetime)) Database.register_adapter(decimal.Decimal, str) Database.register_adapter(datetime.date, adapt_date) Database.register_adapter(datetime.datetime, adapt_datetime) class DatabaseWrapper(BaseDatabaseWrapper): vendor = "sqlite" display_name = "SQLite" # SQLite doesn't actually support most of these types, but it "does the right # thing" given more verbose field definitions, so leave them as is so that # schema inspection is more useful. data_types = { "AutoField": "integer", "BigAutoField": "integer", "BinaryField": "BLOB", "BooleanField": "bool", "CharField": "varchar(%(max_length)s)", "DateField": "date", "DateTimeField": "datetime", "DecimalField": "decimal", "DurationField": "bigint", "FileField": "varchar(%(max_length)s)", "FilePathField": "varchar(%(max_length)s)", "FloatField": "real", "IntegerField": "integer", "BigIntegerField": "bigint", "IPAddressField": "char(15)", "GenericIPAddressField": "char(39)", "JSONField": "text", "OneToOneField": "integer", "PositiveBigIntegerField": "bigint unsigned", "PositiveIntegerField": "integer unsigned", "PositiveSmallIntegerField": "smallint unsigned", "SlugField": "varchar(%(max_length)s)", "SmallAutoField": "integer", "SmallIntegerField": "smallint", "TextField": "text", "TimeField": "time", "UUIDField": "char(32)", } data_type_check_constraints = { "PositiveBigIntegerField": '"%(column)s" >= 0', "JSONField": '(JSON_VALID("%(column)s") OR "%(column)s" IS NULL)', "PositiveIntegerField": '"%(column)s" >= 0', "PositiveSmallIntegerField": '"%(column)s" >= 0', } data_types_suffix = { "AutoField": "AUTOINCREMENT", "BigAutoField": "AUTOINCREMENT", "SmallAutoField": "AUTOINCREMENT", } # SQLite requires LIKE statements to include an ESCAPE clause if the value # being escaped has a percent or underscore in it. # See https://www.sqlite.org/lang_expr.html for an explanation. operators = { "exact": "= %s", "iexact": "LIKE %s ESCAPE '\\'", "contains": "LIKE %s ESCAPE '\\'", "icontains": "LIKE %s ESCAPE '\\'", "regex": "REGEXP %s", "iregex": "REGEXP '(?i)' || %s", "gt": "> %s", "gte": ">= %s", "lt": "< %s", "lte": "<= %s", "startswith": "LIKE %s ESCAPE '\\'", "endswith": "LIKE %s ESCAPE '\\'", "istartswith": "LIKE %s ESCAPE '\\'", "iendswith": "LIKE %s ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { "contains": r"LIKE '%%' || {} || '%%' ESCAPE '\'", "icontains": r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'", "startswith": r"LIKE {} || '%%' ESCAPE '\'", "istartswith": r"LIKE UPPER({}) || '%%' ESCAPE '\'", "endswith": r"LIKE '%%' || {} ESCAPE '\'", "iendswith": r"LIKE '%%' || UPPER({}) ESCAPE '\'", } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations def get_connection_params(self): settings_dict = self.settings_dict if not settings_dict["NAME"]: raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value." ) kwargs = { "database": settings_dict["NAME"], "detect_types": Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES, **settings_dict["OPTIONS"], } # Always allow the underlying SQLite connection to be shareable # between multiple threads. The safe-guarding will be handled at a # higher level by the `BaseDatabaseWrapper.allow_thread_sharing` # property. This is necessary as the shareability is disabled by # default in sqlite3 and it cannot be changed once a connection is # opened. if "check_same_thread" in kwargs and kwargs["check_same_thread"]: warnings.warn( "The `check_same_thread` option was provided and set to " "True. It will be overridden with False. Use the " "`DatabaseWrapper.allow_thread_sharing` property instead " "for controlling thread shareability.", RuntimeWarning, ) kwargs.update({"check_same_thread": False, "uri": True}) return kwargs def get_database_version(self): return self.Database.sqlite_version_info @async_unsafe def get_new_connection(self, conn_params): conn = Database.connect(**conn_params) register_functions(conn) conn.execute("PRAGMA foreign_keys = ON") # The macOS bundled SQLite defaults legacy_alter_table ON, which # prevents atomic table renames (feature supports_atomic_references_rename) conn.execute("PRAGMA legacy_alter_table = OFF") return conn def create_cursor(self, name=None): return self.connection.cursor(factory=SQLiteCursorWrapper) @async_unsafe def close(self): self.validate_thread_sharing() # If database is in memory, closing the connection destroys the # database. To prevent accidental data loss, ignore close requests on # an in-memory db. if not self.is_in_memory_db(): BaseDatabaseWrapper.close(self) def _savepoint_allowed(self): # When 'isolation_level' is not None, sqlite3 commits before each # savepoint; it's a bug. When it is None, savepoints don't make sense # because autocommit is enabled. The only exception is inside 'atomic' # blocks. To work around that bug, on SQLite, 'atomic' starts a # transaction explicitly rather than simply disable autocommit. return self.in_atomic_block def _set_autocommit(self, autocommit): if autocommit: level = None else: # sqlite3's internal default is ''. It's different from None. # See Modules/_sqlite/connection.c. level = "" # 'isolation_level' is a misleading API. # SQLite always runs at the SERIALIZABLE isolation level. with self.wrap_database_errors: self.connection.isolation_level = level def disable_constraint_checking(self): with self.cursor() as cursor: cursor.execute("PRAGMA foreign_keys = OFF") # Foreign key constraints cannot be turned off while in a multi- # statement transaction. Fetch the current state of the pragma # to determine if constraints are effectively disabled. enabled = cursor.execute("PRAGMA foreign_keys").fetchone()[0] return not bool(enabled) def enable_constraint_checking(self): with self.cursor() as cursor: cursor.execute("PRAGMA foreign_keys = ON") def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ if self.features.supports_pragma_foreign_key_check: with self.cursor() as cursor: if table_names is None: violations = cursor.execute("PRAGMA foreign_key_check").fetchall() else: violations = chain.from_iterable( cursor.execute( "PRAGMA foreign_key_check(%s)" % self.ops.quote_name(table_name) ).fetchall() for table_name in table_names ) # See https://www.sqlite.org/pragma.html#pragma_foreign_key_check for ( table_name, rowid, referenced_table_name, foreign_key_index, ) in violations: foreign_key = cursor.execute( "PRAGMA foreign_key_list(%s)" % self.ops.quote_name(table_name) ).fetchall()[foreign_key_index] column_name, referenced_column_name = foreign_key[3:5] primary_key_column_name = self.introspection.get_primary_key_column( cursor, table_name ) primary_key_value, bad_value = cursor.execute( "SELECT %s, %s FROM %s WHERE rowid = %%s" % ( self.ops.quote_name(primary_key_column_name), self.ops.quote_name(column_name), self.ops.quote_name(table_name), ), (rowid,), ).fetchone() raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, primary_key_value, table_name, column_name, bad_value, referenced_table_name, referenced_column_name, ) ) else: with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column( cursor, table_name ) if not primary_key_column_name: continue relations = self.introspection.get_relations(cursor, table_name) for column_name, ( referenced_column_name, referenced_table_name, ) in relations.items(): cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): return True def _start_transaction_under_autocommit(self): """ Start a transaction explicitly in autocommit mode. Staying in autocommit mode works around a bug of sqlite3 that breaks savepoints when autocommit is disabled. """ self.cursor().execute("BEGIN") def is_in_memory_db(self): return self.creation.is_in_memory_db(self.settings_dict["NAME"]) FORMAT_QMARK_REGEX = _lazy_re_compile(r"(?<!%)%s") class SQLiteCursorWrapper(Database.Cursor): """ Django uses the "format" and "pyformat" styles, but Python's sqlite3 module supports neither of these styles. This wrapper performs the following conversions: - "format" style to "qmark" style - "pyformat" style to "named" style In both cases, if you want to use a literal "%s", you'll need to use "%%s". """ def execute(self, query, params=None): if params is None: return super().execute(query) # Extract names if params is a mapping, i.e. "pyformat" style is used. param_names = list(params) if isinstance(params, Mapping) else None query = self.convert_query(query, param_names=param_names) return super().execute(query, params) def executemany(self, query, param_list): # Extract names if params is a mapping, i.e. "pyformat" style is used. # Peek carefully as a generator can be passed instead of a list/tuple. peekable, param_list = tee(iter(param_list)) if (params := next(peekable, None)) and isinstance(params, Mapping): param_names = list(params) else: param_names = None query = self.convert_query(query, param_names=param_names) return super().executemany(query, param_list) def convert_query(self, query, *, param_names=None): if param_names is None: # Convert from "format" style to "qmark" style. return FORMAT_QMARK_REGEX.sub("?", query).replace("%%", "%") else: # Convert from "pyformat" style to "named" style. return query % {name: f":{name}" for name in param_names}
9b98e6ee89e4c3227409393538afd56ccb2ed08369e00ee506b8fc59f6534aa3
import copy import json import re from functools import partial, update_wrapper from urllib.parse import quote as urlquote from django import forms from django.conf import settings from django.contrib import messages from django.contrib.admin import helpers, widgets from django.contrib.admin.checks import ( BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks, ) from django.contrib.admin.decorators import display from django.contrib.admin.exceptions import DisallowedModelAdminToField from django.contrib.admin.templatetags.admin_urls import add_preserved_filters from django.contrib.admin.utils import ( NestedObjects, construct_change_message, flatten_fieldsets, get_deleted_objects, lookup_spawns_duplicates, model_format_dict, model_ngettext, quote, unquote, ) from django.contrib.admin.widgets import AutocompleteSelect, AutocompleteSelectMultiple from django.contrib.auth import get_permission_codename from django.core.exceptions import ( FieldDoesNotExist, FieldError, PermissionDenied, ValidationError, ) from django.core.paginator import Paginator from django.db import models, router, transaction from django.db.models.constants import LOOKUP_SEP from django.forms.formsets import DELETION_FIELD_NAME, all_valid from django.forms.models import ( BaseInlineFormSet, inlineformset_factory, modelform_defines_fields, modelform_factory, modelformset_factory, ) from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple from django.http import HttpResponseRedirect from django.http.response import HttpResponseBase from django.template.response import SimpleTemplateResponse, TemplateResponse from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.html import format_html from django.utils.http import urlencode from django.utils.safestring import mark_safe from django.utils.text import ( capfirst, format_lazy, get_text_list, smart_split, unescape_string_literal, ) from django.utils.translation import gettext as _ from django.utils.translation import ngettext from django.views.decorators.csrf import csrf_protect from django.views.generic import RedirectView IS_POPUP_VAR = "_popup" TO_FIELD_VAR = "_to_field" HORIZONTAL, VERTICAL = 1, 2 def get_content_type_for_model(obj): # Since this module gets imported in the application's root package, # it cannot import models from other applications at the module level. from django.contrib.contenttypes.models import ContentType return ContentType.objects.get_for_model(obj, for_concrete_model=False) def get_ul_class(radio_style): return "radiolist" if radio_style == VERTICAL else "radiolist inline" class IncorrectLookupParameters(Exception): pass # Defaults for formfield_overrides. ModelAdmin subclasses can change this # by adding to ModelAdmin.formfield_overrides. FORMFIELD_FOR_DBFIELD_DEFAULTS = { models.DateTimeField: { "form_class": forms.SplitDateTimeField, "widget": widgets.AdminSplitDateTime, }, models.DateField: {"widget": widgets.AdminDateWidget}, models.TimeField: {"widget": widgets.AdminTimeWidget}, models.TextField: {"widget": widgets.AdminTextareaWidget}, models.URLField: {"widget": widgets.AdminURLFieldWidget}, models.IntegerField: {"widget": widgets.AdminIntegerFieldWidget}, models.BigIntegerField: {"widget": widgets.AdminBigIntegerFieldWidget}, models.CharField: {"widget": widgets.AdminTextInputWidget}, models.ImageField: {"widget": widgets.AdminFileWidget}, models.FileField: {"widget": widgets.AdminFileWidget}, models.EmailField: {"widget": widgets.AdminEmailInputWidget}, models.UUIDField: {"widget": widgets.AdminUUIDInputWidget}, } csrf_protect_m = method_decorator(csrf_protect) class BaseModelAdmin(metaclass=forms.MediaDefiningClass): """Functionality common to both ModelAdmin and InlineAdmin.""" autocomplete_fields = () raw_id_fields = () fields = None exclude = None fieldsets = None form = forms.ModelForm filter_vertical = () filter_horizontal = () radio_fields = {} prepopulated_fields = {} formfield_overrides = {} readonly_fields = () ordering = None sortable_by = None view_on_site = True show_full_result_count = True checks_class = BaseModelAdminChecks def check(self, **kwargs): return self.checks_class().check(self, **kwargs) def __init__(self): # Merge FORMFIELD_FOR_DBFIELD_DEFAULTS with the formfield_overrides # rather than simply overwriting. overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS) for k, v in self.formfield_overrides.items(): overrides.setdefault(k, {}).update(v) self.formfield_overrides = overrides def formfield_for_dbfield(self, db_field, request, **kwargs): """ Hook for specifying the form Field instance for a given database Field instance. If kwargs are given, they're passed to the form Field's constructor. """ # If the field specifies choices, we don't need to look for special # admin widgets - we just need to use a select widget of some kind. if db_field.choices: return self.formfield_for_choice_field(db_field, request, **kwargs) # ForeignKey or ManyToManyFields if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)): # Combine the field kwargs with any options for formfield_overrides. # Make sure the passed in **kwargs override anything in # formfield_overrides because **kwargs is more specific, and should # always win. if db_field.__class__ in self.formfield_overrides: kwargs = {**self.formfield_overrides[db_field.__class__], **kwargs} # Get the correct formfield. if isinstance(db_field, models.ForeignKey): formfield = self.formfield_for_foreignkey(db_field, request, **kwargs) elif isinstance(db_field, models.ManyToManyField): formfield = self.formfield_for_manytomany(db_field, request, **kwargs) # For non-raw_id fields, wrap the widget with a wrapper that adds # extra HTML -- the "add other" interface -- to the end of the # rendered output. formfield can be None if it came from a # OneToOneField with parent_link=True or a M2M intermediary. if formfield and db_field.name not in self.raw_id_fields: related_modeladmin = self.admin_site._registry.get( db_field.remote_field.model ) wrapper_kwargs = {} if related_modeladmin: wrapper_kwargs.update( can_add_related=related_modeladmin.has_add_permission(request), can_change_related=related_modeladmin.has_change_permission( request ), can_delete_related=related_modeladmin.has_delete_permission( request ), can_view_related=related_modeladmin.has_view_permission( request ), ) formfield.widget = widgets.RelatedFieldWidgetWrapper( formfield.widget, db_field.remote_field, self.admin_site, **wrapper_kwargs, ) return formfield # If we've got overrides for the formfield defined, use 'em. **kwargs # passed to formfield_for_dbfield override the defaults. for klass in db_field.__class__.mro(): if klass in self.formfield_overrides: kwargs = {**copy.deepcopy(self.formfield_overrides[klass]), **kwargs} return db_field.formfield(**kwargs) # For any other type of field, just call its formfield() method. return db_field.formfield(**kwargs) def formfield_for_choice_field(self, db_field, request, **kwargs): """ Get a form Field for a database Field that has declared choices. """ # If the field is named as a radio_field, use a RadioSelect if db_field.name in self.radio_fields: # Avoid stomping on custom widget/choices arguments. if "widget" not in kwargs: kwargs["widget"] = widgets.AdminRadioSelect( attrs={ "class": get_ul_class(self.radio_fields[db_field.name]), } ) if "choices" not in kwargs: kwargs["choices"] = db_field.get_choices( include_blank=db_field.blank, blank_choice=[("", _("None"))] ) return db_field.formfield(**kwargs) def get_field_queryset(self, db, db_field, request): """ If the ModelAdmin specifies ordering, the queryset should respect that ordering. Otherwise don't specify the queryset, let the field decide (return None in that case). """ related_admin = self.admin_site._registry.get(db_field.remote_field.model) if related_admin is not None: ordering = related_admin.get_ordering(request) if ordering is not None and ordering != (): return db_field.remote_field.model._default_manager.using(db).order_by( *ordering ) return None def formfield_for_foreignkey(self, db_field, request, **kwargs): """ Get a form Field for a ForeignKey. """ db = kwargs.get("using") if "widget" not in kwargs: if db_field.name in self.get_autocomplete_fields(request): kwargs["widget"] = AutocompleteSelect( db_field, self.admin_site, using=db ) elif db_field.name in self.raw_id_fields: kwargs["widget"] = widgets.ForeignKeyRawIdWidget( db_field.remote_field, self.admin_site, using=db ) elif db_field.name in self.radio_fields: kwargs["widget"] = widgets.AdminRadioSelect( attrs={ "class": get_ul_class(self.radio_fields[db_field.name]), } ) kwargs["empty_label"] = ( kwargs.get("empty_label", _("None")) if db_field.blank else None ) if "queryset" not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs["queryset"] = queryset return db_field.formfield(**kwargs) def formfield_for_manytomany(self, db_field, request, **kwargs): """ Get a form Field for a ManyToManyField. """ # If it uses an intermediary model that isn't auto created, don't show # a field in admin. if not db_field.remote_field.through._meta.auto_created: return None db = kwargs.get("using") if "widget" not in kwargs: autocomplete_fields = self.get_autocomplete_fields(request) if db_field.name in autocomplete_fields: kwargs["widget"] = AutocompleteSelectMultiple( db_field, self.admin_site, using=db, ) elif db_field.name in self.raw_id_fields: kwargs["widget"] = widgets.ManyToManyRawIdWidget( db_field.remote_field, self.admin_site, using=db, ) elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]: kwargs["widget"] = widgets.FilteredSelectMultiple( db_field.verbose_name, db_field.name in self.filter_vertical ) if "queryset" not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs["queryset"] = queryset form_field = db_field.formfield(**kwargs) if ( isinstance(form_field.widget, SelectMultiple) and form_field.widget.allow_multiple_selected and not isinstance( form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple) ) ): msg = _( "Hold down “Control”, or “Command” on a Mac, to select more than one." ) help_text = form_field.help_text form_field.help_text = ( format_lazy("{} {}", help_text, msg) if help_text else msg ) return form_field def get_autocomplete_fields(self, request): """ Return a list of ForeignKey and/or ManyToMany fields which should use an autocomplete widget. """ return self.autocomplete_fields def get_view_on_site_url(self, obj=None): if obj is None or not self.view_on_site: return None if callable(self.view_on_site): return self.view_on_site(obj) elif hasattr(obj, "get_absolute_url"): # use the ContentType lookup if view_on_site is True return reverse( "admin:view_on_site", kwargs={ "content_type_id": get_content_type_for_model(obj).pk, "object_id": obj.pk, }, current_app=self.admin_site.name, ) def get_empty_value_display(self): """ Return the empty_value_display set on ModelAdmin or AdminSite. """ try: return mark_safe(self.empty_value_display) except AttributeError: return mark_safe(self.admin_site.empty_value_display) def get_exclude(self, request, obj=None): """ Hook for specifying exclude. """ return self.exclude def get_fields(self, request, obj=None): """ Hook for specifying fields. """ if self.fields: return self.fields # _get_form_for_get_fields() is implemented in subclasses. form = self._get_form_for_get_fields(request, obj) return [*form.base_fields, *self.get_readonly_fields(request, obj)] def get_fieldsets(self, request, obj=None): """ Hook for specifying fieldsets. """ if self.fieldsets: return self.fieldsets return [(None, {"fields": self.get_fields(request, obj)})] def get_inlines(self, request, obj): """Hook for specifying custom inlines.""" return self.inlines def get_ordering(self, request): """ Hook for specifying field ordering. """ return self.ordering or () # otherwise we might try to *None, which is bad ;) def get_readonly_fields(self, request, obj=None): """ Hook for specifying custom readonly fields. """ return self.readonly_fields def get_prepopulated_fields(self, request, obj=None): """ Hook for specifying custom prepopulated fields. """ return self.prepopulated_fields def get_queryset(self, request): """ Return a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view. """ qs = self.model._default_manager.get_queryset() # TODO: this should be handled by some parameter to the ChangeList. ordering = self.get_ordering(request) if ordering: qs = qs.order_by(*ordering) return qs def get_sortable_by(self, request): """Hook for specifying which fields can be sorted in the changelist.""" return ( self.sortable_by if self.sortable_by is not None else self.get_list_display(request) ) def lookup_allowed(self, lookup, value): from django.contrib.admin.filters import SimpleListFilter model = self.model # Check FKey lookups that are allowed, so that popups produced by # ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to, # are allowed to work. for fk_lookup in model._meta.related_fkey_lookups: # As ``limit_choices_to`` can be a callable, invoke it here. if callable(fk_lookup): fk_lookup = fk_lookup() if (lookup, value) in widgets.url_params_from_lookup_dict( fk_lookup ).items(): return True relation_parts = [] prev_field = None for part in lookup.split(LOOKUP_SEP): try: field = model._meta.get_field(part) except FieldDoesNotExist: # Lookups on nonexistent fields are ok, since they're ignored # later. break # It is allowed to filter on values that would be found from local # model anyways. For example, if you filter on employee__department__id, # then the id value would be found already from employee__department_id. if not prev_field or ( prev_field.is_relation and field not in prev_field.path_infos[-1].target_fields ): relation_parts.append(part) if not getattr(field, "path_infos", None): # This is not a relational field, so further parts # must be transforms. break prev_field = field model = field.path_infos[-1].to_opts.model if len(relation_parts) <= 1: # Either a local field filter, or no fields at all. return True valid_lookups = {self.date_hierarchy} for filter_item in self.list_filter: if isinstance(filter_item, type) and issubclass( filter_item, SimpleListFilter ): valid_lookups.add(filter_item.parameter_name) elif isinstance(filter_item, (list, tuple)): valid_lookups.add(filter_item[0]) else: valid_lookups.add(filter_item) # Is it a valid relational lookup? return not { LOOKUP_SEP.join(relation_parts), LOOKUP_SEP.join(relation_parts + [part]), }.isdisjoint(valid_lookups) def to_field_allowed(self, request, to_field): """ Return True if the model associated with this admin should be allowed to be referenced by the specified field. """ try: field = self.opts.get_field(to_field) except FieldDoesNotExist: return False # Always allow referencing the primary key since it's already possible # to get this information from the change view URL. if field.primary_key: return True # Allow reverse relationships to models defining m2m fields if they # target the specified field. for many_to_many in self.opts.many_to_many: if many_to_many.m2m_target_field_name() == to_field: return True # Make sure at least one of the models registered for this site # references this field through a FK or a M2M relationship. registered_models = set() for model, admin in self.admin_site._registry.items(): registered_models.add(model) for inline in admin.inlines: registered_models.add(inline.model) related_objects = ( f for f in self.opts.get_fields(include_hidden=True) if (f.auto_created and not f.concrete) ) for related_object in related_objects: related_model = related_object.related_model remote_field = related_object.field.remote_field if ( any(issubclass(model, related_model) for model in registered_models) and hasattr(remote_field, "get_related_field") and remote_field.get_related_field() == field ): return True return False def has_add_permission(self, request): """ Return True if the given request has permission to add an object. Can be overridden by the user in subclasses. """ opts = self.opts codename = get_permission_codename("add", opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_change_permission(self, request, obj=None): """ Return True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to change the `obj` model instance. If `obj` is None, this should return True if the given request has permission to change *any* object of the given type. """ opts = self.opts codename = get_permission_codename("change", opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_delete_permission(self, request, obj=None): """ Return True if the given request has permission to delete the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to delete the `obj` model instance. If `obj` is None, this should return True if the given request has permission to delete *any* object of the given type. """ opts = self.opts codename = get_permission_codename("delete", opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_view_permission(self, request, obj=None): """ Return True if the given request has permission to view the given Django model instance. The default implementation doesn't examine the `obj` parameter. If overridden by the user in subclasses, it should return True if the given request has permission to view the `obj` model instance. If `obj` is None, it should return True if the request has permission to view any object of the given type. """ opts = self.opts codename_view = get_permission_codename("view", opts) codename_change = get_permission_codename("change", opts) return request.user.has_perm( "%s.%s" % (opts.app_label, codename_view) ) or request.user.has_perm("%s.%s" % (opts.app_label, codename_change)) def has_view_or_change_permission(self, request, obj=None): return self.has_view_permission(request, obj) or self.has_change_permission( request, obj ) def has_module_permission(self, request): """ Return True if the given request has any permission in the given app label. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to view the module on the admin index page and access the module's index page. Overriding it does not restrict access to the add, change or delete views. Use `ModelAdmin.has_(add|change|delete)_permission` for that. """ return request.user.has_module_perms(self.opts.app_label) class ModelAdmin(BaseModelAdmin): """Encapsulate all admin options and functionality for a given model.""" list_display = ("__str__",) list_display_links = () list_filter = () list_select_related = False list_per_page = 100 list_max_show_all = 200 list_editable = () search_fields = () search_help_text = None date_hierarchy = None save_as = False save_as_continue = True save_on_top = False paginator = Paginator preserve_filters = True inlines = () # Custom templates (designed to be over-ridden in subclasses) add_form_template = None change_form_template = None change_list_template = None delete_confirmation_template = None delete_selected_confirmation_template = None object_history_template = None popup_response_template = None # Actions actions = () action_form = helpers.ActionForm actions_on_top = True actions_on_bottom = False actions_selection_counter = True checks_class = ModelAdminChecks def __init__(self, model, admin_site): self.model = model self.opts = model._meta self.admin_site = admin_site super().__init__() def __str__(self): return "%s.%s" % (self.opts.app_label, self.__class__.__name__) def __repr__(self): return ( f"<{self.__class__.__qualname__}: model={self.model.__qualname__} " f"site={self.admin_site!r}>" ) def get_inline_instances(self, request, obj=None): inline_instances = [] for inline_class in self.get_inlines(request, obj): inline = inline_class(self.model, self.admin_site) if request: if not ( inline.has_view_or_change_permission(request, obj) or inline.has_add_permission(request, obj) or inline.has_delete_permission(request, obj) ): continue if not inline.has_add_permission(request, obj): inline.max_num = 0 inline_instances.append(inline) return inline_instances def get_urls(self): from django.urls import path def wrap(view): def wrapper(*args, **kwargs): return self.admin_site.admin_view(view)(*args, **kwargs) wrapper.model_admin = self return update_wrapper(wrapper, view) info = self.opts.app_label, self.opts.model_name return [ path("", wrap(self.changelist_view), name="%s_%s_changelist" % info), path("add/", wrap(self.add_view), name="%s_%s_add" % info), path( "<path:object_id>/history/", wrap(self.history_view), name="%s_%s_history" % info, ), path( "<path:object_id>/delete/", wrap(self.delete_view), name="%s_%s_delete" % info, ), path( "<path:object_id>/change/", wrap(self.change_view), name="%s_%s_change" % info, ), # For backwards compatibility (was the change url before 1.9) path( "<path:object_id>/", wrap( RedirectView.as_view( pattern_name="%s:%s_%s_change" % ((self.admin_site.name,) + info) ) ), ), ] @property def urls(self): return self.get_urls() @property def media(self): extra = "" if settings.DEBUG else ".min" js = [ "vendor/jquery/jquery%s.js" % extra, "jquery.init.js", "core.js", "admin/RelatedObjectLookups.js", "actions.js", "urlify.js", "prepopulate.js", "vendor/xregexp/xregexp%s.js" % extra, ] return forms.Media(js=["admin/js/%s" % url for url in js]) def get_model_perms(self, request): """ Return a dict of all perms for this model. This dict has the keys ``add``, ``change``, ``delete``, and ``view`` mapping to the True/False for each of those actions. """ return { "add": self.has_add_permission(request), "change": self.has_change_permission(request), "delete": self.has_delete_permission(request), "view": self.has_view_permission(request), } def _get_form_for_get_fields(self, request, obj): return self.get_form(request, obj, fields=None) def get_form(self, request, obj=None, change=False, **kwargs): """ Return a Form class for use in the admin add view. This is used by add_view and change_view. """ if "fields" in kwargs: fields = kwargs.pop("fields") else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) excluded = self.get_exclude(request, obj) exclude = [] if excluded is None else list(excluded) readonly_fields = self.get_readonly_fields(request, obj) exclude.extend(readonly_fields) # Exclude all fields if it's a change form and the user doesn't have # the change permission. if ( change and hasattr(request, "user") and not self.has_change_permission(request, obj) ): exclude.extend(fields) if excluded is None and hasattr(self.form, "_meta") and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # ModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # if exclude is an empty list we pass None to be consistent with the # default on modelform_factory exclude = exclude or None # Remove declared form fields which are in readonly_fields. new_attrs = dict.fromkeys( f for f in readonly_fields if f in self.form.declared_fields ) form = type(self.form.__name__, (self.form,), new_attrs) defaults = { "form": form, "fields": fields, "exclude": exclude, "formfield_callback": partial(self.formfield_for_dbfield, request=request), **kwargs, } if defaults["fields"] is None and not modelform_defines_fields( defaults["form"] ): defaults["fields"] = forms.ALL_FIELDS try: return modelform_factory(self.model, **defaults) except FieldError as e: raise FieldError( "%s. Check fields/fieldsets/exclude attributes of class %s." % (e, self.__class__.__name__) ) def get_changelist(self, request, **kwargs): """ Return the ChangeList class for use on the changelist page. """ from django.contrib.admin.views.main import ChangeList return ChangeList def get_changelist_instance(self, request): """ Return a `ChangeList` instance based on `request`. May raise `IncorrectLookupParameters`. """ list_display = self.get_list_display(request) list_display_links = self.get_list_display_links(request, list_display) # Add the action checkboxes if any actions are available. if self.get_actions(request): list_display = ["action_checkbox", *list_display] sortable_by = self.get_sortable_by(request) ChangeList = self.get_changelist(request) return ChangeList( request, self.model, list_display, list_display_links, self.get_list_filter(request), self.date_hierarchy, self.get_search_fields(request), self.get_list_select_related(request), self.list_per_page, self.list_max_show_all, self.list_editable, self, sortable_by, self.search_help_text, ) def get_object(self, request, object_id, from_field=None): """ Return an instance matching the field and value provided, the primary key is used if no field is provided. Return ``None`` if no match is found or the object_id fails validation. """ queryset = self.get_queryset(request) model = queryset.model field = ( model._meta.pk if from_field is None else model._meta.get_field(from_field) ) try: object_id = field.to_python(object_id) return queryset.get(**{field.name: object_id}) except (model.DoesNotExist, ValidationError, ValueError): return None def get_changelist_form(self, request, **kwargs): """ Return a Form class for use in the Formset on the changelist page. """ defaults = { "formfield_callback": partial(self.formfield_for_dbfield, request=request), **kwargs, } if defaults.get("fields") is None and not modelform_defines_fields( defaults.get("form") ): defaults["fields"] = forms.ALL_FIELDS return modelform_factory(self.model, **defaults) def get_changelist_formset(self, request, **kwargs): """ Return a FormSet class for use on the changelist page if list_editable is used. """ defaults = { "formfield_callback": partial(self.formfield_for_dbfield, request=request), **kwargs, } return modelformset_factory( self.model, self.get_changelist_form(request), extra=0, fields=self.list_editable, **defaults, ) def get_formsets_with_inlines(self, request, obj=None): """ Yield formsets and the corresponding inlines. """ for inline in self.get_inline_instances(request, obj): yield inline.get_formset(request, obj), inline def get_paginator( self, request, queryset, per_page, orphans=0, allow_empty_first_page=True ): return self.paginator(queryset, per_page, orphans, allow_empty_first_page) def log_addition(self, request, obj, message): """ Log that an object has been successfully added. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import ADDITION, LogEntry return LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(obj).pk, object_id=obj.pk, object_repr=str(obj), action_flag=ADDITION, change_message=message, ) def log_change(self, request, obj, message): """ Log that an object has been successfully changed. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import CHANGE, LogEntry return LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(obj).pk, object_id=obj.pk, object_repr=str(obj), action_flag=CHANGE, change_message=message, ) def log_deletion(self, request, obj, object_repr): """ Log that an object will be deleted. Note that this method must be called before the deletion. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import DELETION, LogEntry return LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(obj).pk, object_id=obj.pk, object_repr=object_repr, action_flag=DELETION, ) @display(description=mark_safe('<input type="checkbox" id="action-toggle">')) def action_checkbox(self, obj): """ A list_display column containing a checkbox widget. """ return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, str(obj.pk)) @staticmethod def _get_action_description(func, name): return getattr(func, "short_description", capfirst(name.replace("_", " "))) def _get_base_actions(self): """Return the list of actions, prior to any request-based filtering.""" actions = [] base_actions = (self.get_action(action) for action in self.actions or []) # get_action might have returned None, so filter any of those out. base_actions = [action for action in base_actions if action] base_action_names = {name for _, name, _ in base_actions} # Gather actions from the admin site first for (name, func) in self.admin_site.actions: if name in base_action_names: continue description = self._get_action_description(func, name) actions.append((func, name, description)) # Add actions from this ModelAdmin. actions.extend(base_actions) return actions def _filter_actions_by_permissions(self, request, actions): """Filter out any actions that the user doesn't have access to.""" filtered_actions = [] for action in actions: callable = action[0] if not hasattr(callable, "allowed_permissions"): filtered_actions.append(action) continue permission_checks = ( getattr(self, "has_%s_permission" % permission) for permission in callable.allowed_permissions ) if any(has_permission(request) for has_permission in permission_checks): filtered_actions.append(action) return filtered_actions def get_actions(self, request): """ Return a dictionary mapping the names of all actions for this ModelAdmin to a tuple of (callable, name, description) for each action. """ # If self.actions is set to None that means actions are disabled on # this page. if self.actions is None or IS_POPUP_VAR in request.GET: return {} actions = self._filter_actions_by_permissions(request, self._get_base_actions()) return {name: (func, name, desc) for func, name, desc in actions} def get_action_choices(self, request, default_choices=models.BLANK_CHOICE_DASH): """ Return a list of choices for use in a form object. Each choice is a tuple (name, description). """ choices = [] + default_choices for func, name, description in self.get_actions(request).values(): choice = (name, description % model_format_dict(self.opts)) choices.append(choice) return choices def get_action(self, action): """ Return a given action from a parameter, which can either be a callable, or the name of a method on the ModelAdmin. Return is a tuple of (callable, name, description). """ # If the action is a callable, just use it. if callable(action): func = action action = action.__name__ # Next, look for a method. Grab it off self.__class__ to get an unbound # method instead of a bound one; this ensures that the calling # conventions are the same for functions and methods. elif hasattr(self.__class__, action): func = getattr(self.__class__, action) # Finally, look for a named method on the admin site else: try: func = self.admin_site.get_action(action) except KeyError: return None description = self._get_action_description(func, action) return func, action, description def get_list_display(self, request): """ Return a sequence containing the fields to be displayed on the changelist. """ return self.list_display def get_list_display_links(self, request, list_display): """ Return a sequence containing the fields to be displayed as links on the changelist. The list_display parameter is the list of fields returned by get_list_display(). """ if ( self.list_display_links or self.list_display_links is None or not list_display ): return self.list_display_links else: # Use only the first item in list_display as link return list(list_display)[:1] def get_list_filter(self, request): """ Return a sequence containing the fields to be displayed as filters in the right sidebar of the changelist page. """ return self.list_filter def get_list_select_related(self, request): """ Return a list of fields to add to the select_related() part of the changelist items query. """ return self.list_select_related def get_search_fields(self, request): """ Return a sequence containing the fields to be searched whenever somebody submits a search query. """ return self.search_fields def get_search_results(self, request, queryset, search_term): """ Return a tuple containing a queryset to implement the search and a boolean indicating if the results may contain duplicates. """ # Apply keyword searches. def construct_search(field_name): if field_name.startswith("^"): return "%s__istartswith" % field_name[1:] elif field_name.startswith("="): return "%s__iexact" % field_name[1:] elif field_name.startswith("@"): return "%s__search" % field_name[1:] # Use field_name if it includes a lookup. opts = queryset.model._meta lookup_fields = field_name.split(LOOKUP_SEP) # Go through the fields, following all relations. prev_field = None for path_part in lookup_fields: if path_part == "pk": path_part = opts.pk.name try: field = opts.get_field(path_part) except FieldDoesNotExist: # Use valid query lookups. if prev_field and prev_field.get_lookup(path_part): return field_name else: prev_field = field if hasattr(field, "path_infos"): # Update opts to follow the relation. opts = field.path_infos[-1].to_opts # Otherwise, use the field with icontains. return "%s__icontains" % field_name may_have_duplicates = False search_fields = self.get_search_fields(request) if search_fields and search_term: orm_lookups = [ construct_search(str(search_field)) for search_field in search_fields ] term_queries = [] for bit in smart_split(search_term): if bit.startswith(('"', "'")) and bit[0] == bit[-1]: bit = unescape_string_literal(bit) or_queries = models.Q.create( [(orm_lookup, bit) for orm_lookup in orm_lookups], connector=models.Q.OR, ) term_queries.append(or_queries) queryset = queryset.filter(models.Q.create(term_queries)) may_have_duplicates |= any( lookup_spawns_duplicates(self.opts, search_spec) for search_spec in orm_lookups ) return queryset, may_have_duplicates def get_preserved_filters(self, request): """ Return the preserved filters querystring. """ match = request.resolver_match if self.preserve_filters and match: current_url = "%s:%s" % (match.app_name, match.url_name) changelist_url = "admin:%s_%s_changelist" % ( self.opts.app_label, self.opts.model_name, ) if current_url == changelist_url: preserved_filters = request.GET.urlencode() else: preserved_filters = request.GET.get("_changelist_filters") if preserved_filters: return urlencode({"_changelist_filters": preserved_filters}) return "" def construct_change_message(self, request, form, formsets, add=False): """ Construct a JSON structure describing changes from a changed object. """ return construct_change_message(form, formsets, add) def message_user( self, request, message, level=messages.INFO, extra_tags="", fail_silently=False ): """ Send a message to the user. The default implementation posts a message using the django.contrib.messages backend. Exposes almost the same API as messages.add_message(), but accepts the positional arguments in a different order to maintain backwards compatibility. For convenience, it accepts the `level` argument as a string rather than the usual level number. """ if not isinstance(level, int): # attempt to get the level if passed a string try: level = getattr(messages.constants, level.upper()) except AttributeError: levels = messages.constants.DEFAULT_TAGS.values() levels_repr = ", ".join("`%s`" % level for level in levels) raise ValueError( "Bad message level string: `%s`. Possible values are: %s" % (level, levels_repr) ) messages.add_message( request, level, message, extra_tags=extra_tags, fail_silently=fail_silently ) def save_form(self, request, form, change): """ Given a ModelForm return an unsaved instance. ``change`` is True if the object is being changed, and False if it's being added. """ return form.save(commit=False) def save_model(self, request, obj, form, change): """ Given a model instance save it to the database. """ obj.save() def delete_model(self, request, obj): """ Given a model instance delete it from the database. """ obj.delete() def delete_queryset(self, request, queryset): """Given a queryset, delete it from the database.""" queryset.delete() def save_formset(self, request, form, formset, change): """ Given an inline formset save it to the database. """ formset.save() def save_related(self, request, form, formsets, change): """ Given the ``HttpRequest``, the parent ``ModelForm`` instance, the list of inline formsets and a boolean value based on whether the parent is being added or changed, save the related objects to the database. Note that at this point save_form() and save_model() have already been called. """ form.save_m2m() for formset in formsets: self.save_formset(request, form, formset, change=change) def render_change_form( self, request, context, add=False, change=False, form_url="", obj=None ): app_label = self.opts.app_label preserved_filters = self.get_preserved_filters(request) form_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": self.opts}, form_url ) view_on_site_url = self.get_view_on_site_url(obj) has_editable_inline_admin_formsets = False for inline in context["inline_admin_formsets"]: if ( inline.has_add_permission or inline.has_change_permission or inline.has_delete_permission ): has_editable_inline_admin_formsets = True break context.update( { "add": add, "change": change, "has_view_permission": self.has_view_permission(request, obj), "has_add_permission": self.has_add_permission(request), "has_change_permission": self.has_change_permission(request, obj), "has_delete_permission": self.has_delete_permission(request, obj), "has_editable_inline_admin_formsets": ( has_editable_inline_admin_formsets ), "has_file_field": context["adminform"].form.is_multipart() or any( admin_formset.formset.is_multipart() for admin_formset in context["inline_admin_formsets"] ), "has_absolute_url": view_on_site_url is not None, "absolute_url": view_on_site_url, "form_url": form_url, "opts": self.opts, "content_type_id": get_content_type_for_model(self.model).pk, "save_as": self.save_as, "save_on_top": self.save_on_top, "to_field_var": TO_FIELD_VAR, "is_popup_var": IS_POPUP_VAR, "app_label": app_label, } ) if add and self.add_form_template is not None: form_template = self.add_form_template else: form_template = self.change_form_template request.current_app = self.admin_site.name return TemplateResponse( request, form_template or [ "admin/%s/%s/change_form.html" % (app_label, self.opts.model_name), "admin/%s/change_form.html" % app_label, "admin/change_form.html", ], context, ) def response_add(self, request, obj, post_url_continue=None): """ Determine the HttpResponse for the add_view stage. """ opts = obj._meta preserved_filters = self.get_preserved_filters(request) obj_url = reverse( "admin:%s_%s_change" % (opts.app_label, opts.model_name), args=(quote(obj.pk),), current_app=self.admin_site.name, ) # Add a link to the object's change form if the user can edit the obj. if self.has_change_permission(request, obj): obj_repr = format_html('<a href="{}">{}</a>', urlquote(obj_url), obj) else: obj_repr = str(obj) msg_dict = { "name": opts.verbose_name, "obj": obj_repr, } # Here, we distinguish between different save types by checking for # the presence of keys in request.POST. if IS_POPUP_VAR in request.POST: to_field = request.POST.get(TO_FIELD_VAR) if to_field: attr = str(to_field) else: attr = obj._meta.pk.attname value = obj.serializable_value(attr) popup_response_data = json.dumps( { "value": str(value), "obj": str(obj), } ) return TemplateResponse( request, self.popup_response_template or [ "admin/%s/%s/popup_response.html" % (opts.app_label, opts.model_name), "admin/%s/popup_response.html" % opts.app_label, "admin/popup_response.html", ], { "popup_response_data": popup_response_data, }, ) elif "_continue" in request.POST or ( # Redirecting after "Save as new". "_saveasnew" in request.POST and self.save_as_continue and self.has_change_permission(request, obj) ): msg = _("The {name} “{obj}” was added successfully.") if self.has_change_permission(request, obj): msg += " " + _("You may edit it again below.") self.message_user(request, format_html(msg, **msg_dict), messages.SUCCESS) if post_url_continue is None: post_url_continue = obj_url post_url_continue = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, post_url_continue, ) return HttpResponseRedirect(post_url_continue) elif "_addanother" in request.POST: msg = format_html( _( "The {name} “{obj}” was added successfully. You may add another " "{name} below." ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) else: msg = format_html( _("The {name} “{obj}” was added successfully."), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_add(request, obj) def response_change(self, request, obj): """ Determine the HttpResponse for the change_view stage. """ if IS_POPUP_VAR in request.POST: opts = obj._meta to_field = request.POST.get(TO_FIELD_VAR) attr = str(to_field) if to_field else opts.pk.attname value = request.resolver_match.kwargs["object_id"] new_value = obj.serializable_value(attr) popup_response_data = json.dumps( { "action": "change", "value": str(value), "obj": str(obj), "new_value": str(new_value), } ) return TemplateResponse( request, self.popup_response_template or [ "admin/%s/%s/popup_response.html" % (opts.app_label, opts.model_name), "admin/%s/popup_response.html" % opts.app_label, "admin/popup_response.html", ], { "popup_response_data": popup_response_data, }, ) opts = self.opts preserved_filters = self.get_preserved_filters(request) msg_dict = { "name": opts.verbose_name, "obj": format_html('<a href="{}">{}</a>', urlquote(request.path), obj), } if "_continue" in request.POST: msg = format_html( _( "The {name} “{obj}” was changed successfully. You may edit it " "again below." ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) elif "_saveasnew" in request.POST: msg = format_html( _( "The {name} “{obj}” was added successfully. You may edit it again " "below." ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = reverse( "admin:%s_%s_change" % (opts.app_label, opts.model_name), args=(obj.pk,), current_app=self.admin_site.name, ) redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) elif "_addanother" in request.POST: msg = format_html( _( "The {name} “{obj}” was changed successfully. You may add another " "{name} below." ), **msg_dict, ) self.message_user(request, msg, messages.SUCCESS) redirect_url = reverse( "admin:%s_%s_add" % (opts.app_label, opts.model_name), current_app=self.admin_site.name, ) redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, redirect_url ) return HttpResponseRedirect(redirect_url) else: msg = format_html( _("The {name} “{obj}” was changed successfully."), **msg_dict ) self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_change(request, obj) def _response_post_save(self, request, obj): if self.has_view_or_change_permission(request): post_url = reverse( "admin:%s_%s_changelist" % (self.opts.app_label, self.opts.model_name), current_app=self.admin_site.name, ) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": self.opts}, post_url ) else: post_url = reverse("admin:index", current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def response_post_save_add(self, request, obj): """ Figure out where to redirect after the 'Save' button has been pressed when adding a new object. """ return self._response_post_save(request, obj) def response_post_save_change(self, request, obj): """ Figure out where to redirect after the 'Save' button has been pressed when editing an existing object. """ return self._response_post_save(request, obj) def response_action(self, request, queryset): """ Handle an admin action. This is called if a request is POSTed to the changelist; it returns an HttpResponse if the action was handled, and None otherwise. """ # There can be multiple action forms on the page (at the top # and bottom of the change list, for example). Get the action # whose button was pushed. try: action_index = int(request.POST.get("index", 0)) except ValueError: action_index = 0 # Construct the action form. data = request.POST.copy() data.pop(helpers.ACTION_CHECKBOX_NAME, None) data.pop("index", None) # Use the action whose button was pushed try: data.update({"action": data.getlist("action")[action_index]}) except IndexError: # If we didn't get an action from the chosen form that's invalid # POST data, so by deleting action it'll fail the validation check # below. So no need to do anything here pass action_form = self.action_form(data, auto_id=None) action_form.fields["action"].choices = self.get_action_choices(request) # If the form's valid we can handle the action. if action_form.is_valid(): action = action_form.cleaned_data["action"] select_across = action_form.cleaned_data["select_across"] func = self.get_actions(request)[action][0] # Get the list of selected PKs. If nothing's selected, we can't # perform an action on it, so bail. Except we want to perform # the action explicitly on all objects. selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) if not selected and not select_across: # Reminder that something needs to be selected or nothing will happen msg = _( "Items must be selected in order to perform " "actions on them. No items have been changed." ) self.message_user(request, msg, messages.WARNING) return None if not select_across: # Perform the action only on the selected objects queryset = queryset.filter(pk__in=selected) response = func(self, request, queryset) # Actions may return an HttpResponse-like object, which will be # used as the response from the POST. If not, we'll be a good # little HTTP citizen and redirect back to the changelist page. if isinstance(response, HttpResponseBase): return response else: return HttpResponseRedirect(request.get_full_path()) else: msg = _("No action selected.") self.message_user(request, msg, messages.WARNING) return None def response_delete(self, request, obj_display, obj_id): """ Determine the HttpResponse for the delete_view stage. """ if IS_POPUP_VAR in request.POST: popup_response_data = json.dumps( { "action": "delete", "value": str(obj_id), } ) return TemplateResponse( request, self.popup_response_template or [ "admin/%s/%s/popup_response.html" % (self.opts.app_label, self.opts.model_name), "admin/%s/popup_response.html" % self.opts.app_label, "admin/popup_response.html", ], { "popup_response_data": popup_response_data, }, ) self.message_user( request, _("The %(name)s “%(obj)s” was deleted successfully.") % { "name": self.opts.verbose_name, "obj": obj_display, }, messages.SUCCESS, ) if self.has_change_permission(request, None): post_url = reverse( "admin:%s_%s_changelist" % (self.opts.app_label, self.opts.model_name), current_app=self.admin_site.name, ) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": self.opts}, post_url ) else: post_url = reverse("admin:index", current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def render_delete_form(self, request, context): app_label = self.opts.app_label request.current_app = self.admin_site.name context.update( to_field_var=TO_FIELD_VAR, is_popup_var=IS_POPUP_VAR, media=self.media, ) return TemplateResponse( request, self.delete_confirmation_template or [ "admin/{}/{}/delete_confirmation.html".format( app_label, self.opts.model_name ), "admin/{}/delete_confirmation.html".format(app_label), "admin/delete_confirmation.html", ], context, ) def get_inline_formsets(self, request, formsets, inline_instances, obj=None): # Edit permissions on parent model are required for editable inlines. can_edit_parent = ( self.has_change_permission(request, obj) if obj else self.has_add_permission(request) ) inline_admin_formsets = [] for inline, formset in zip(inline_instances, formsets): fieldsets = list(inline.get_fieldsets(request, obj)) readonly = list(inline.get_readonly_fields(request, obj)) if can_edit_parent: has_add_permission = inline.has_add_permission(request, obj) has_change_permission = inline.has_change_permission(request, obj) has_delete_permission = inline.has_delete_permission(request, obj) else: # Disable all edit-permissions, and override formset settings. has_add_permission = ( has_change_permission ) = has_delete_permission = False formset.extra = formset.max_num = 0 has_view_permission = inline.has_view_permission(request, obj) prepopulated = dict(inline.get_prepopulated_fields(request, obj)) inline_admin_formset = helpers.InlineAdminFormSet( inline, formset, fieldsets, prepopulated, readonly, model_admin=self, has_add_permission=has_add_permission, has_change_permission=has_change_permission, has_delete_permission=has_delete_permission, has_view_permission=has_view_permission, ) inline_admin_formsets.append(inline_admin_formset) return inline_admin_formsets def get_changeform_initial_data(self, request): """ Get the initial form data from the request's GET params. """ initial = dict(request.GET.items()) for k in initial: try: f = self.opts.get_field(k) except FieldDoesNotExist: continue # We have to special-case M2Ms as a list of comma-separated PKs. if isinstance(f, models.ManyToManyField): initial[k] = initial[k].split(",") return initial def _get_obj_does_not_exist_redirect(self, request, opts, object_id): """ Create a message informing the user that the object doesn't exist and return a redirect to the admin index page. """ msg = _("%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?") % { "name": opts.verbose_name, "key": unquote(object_id), } self.message_user(request, msg, messages.WARNING) url = reverse("admin:index", current_app=self.admin_site.name) return HttpResponseRedirect(url) @csrf_protect_m def changeform_view(self, request, object_id=None, form_url="", extra_context=None): with transaction.atomic(using=router.db_for_write(self.model)): return self._changeform_view(request, object_id, form_url, extra_context) def _changeform_view(self, request, object_id, form_url, extra_context): to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR)) if to_field and not self.to_field_allowed(request, to_field): raise DisallowedModelAdminToField( "The field %s cannot be referenced." % to_field ) if request.method == "POST" and "_saveasnew" in request.POST: object_id = None add = object_id is None if add: if not self.has_add_permission(request): raise PermissionDenied obj = None else: obj = self.get_object(request, unquote(object_id), to_field) if request.method == "POST": if not self.has_change_permission(request, obj): raise PermissionDenied else: if not self.has_view_or_change_permission(request, obj): raise PermissionDenied if obj is None: return self._get_obj_does_not_exist_redirect( request, self.opts, object_id ) fieldsets = self.get_fieldsets(request, obj) ModelForm = self.get_form( request, obj, change=not add, fields=flatten_fieldsets(fieldsets) ) if request.method == "POST": form = ModelForm(request.POST, request.FILES, instance=obj) formsets, inline_instances = self._create_formsets( request, form.instance, change=not add, ) form_validated = form.is_valid() if form_validated: new_object = self.save_form(request, form, change=not add) else: new_object = form.instance if all_valid(formsets) and form_validated: self.save_model(request, new_object, form, not add) self.save_related(request, form, formsets, not add) change_message = self.construct_change_message( request, form, formsets, add ) if add: self.log_addition(request, new_object, change_message) return self.response_add(request, new_object) else: self.log_change(request, new_object, change_message) return self.response_change(request, new_object) else: form_validated = False else: if add: initial = self.get_changeform_initial_data(request) form = ModelForm(initial=initial) formsets, inline_instances = self._create_formsets( request, form.instance, change=False ) else: form = ModelForm(instance=obj) formsets, inline_instances = self._create_formsets( request, obj, change=True ) if not add and not self.has_change_permission(request, obj): readonly_fields = flatten_fieldsets(fieldsets) else: readonly_fields = self.get_readonly_fields(request, obj) admin_form = helpers.AdminForm( form, list(fieldsets), # Clear prepopulated fields on a view-only form to avoid a crash. self.get_prepopulated_fields(request, obj) if add or self.has_change_permission(request, obj) else {}, readonly_fields, model_admin=self, ) media = self.media + admin_form.media inline_formsets = self.get_inline_formsets( request, formsets, inline_instances, obj ) for inline_formset in inline_formsets: media += inline_formset.media if add: title = _("Add %s") elif self.has_change_permission(request, obj): title = _("Change %s") else: title = _("View %s") context = { **self.admin_site.each_context(request), "title": title % self.opts.verbose_name, "subtitle": str(obj) if obj else None, "adminform": admin_form, "object_id": object_id, "original": obj, "is_popup": IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET, "to_field": to_field, "media": media, "inline_admin_formsets": inline_formsets, "errors": helpers.AdminErrorList(form, formsets), "preserved_filters": self.get_preserved_filters(request), } # Hide the "Save" and "Save and continue" buttons if "Save as New" was # previously chosen to prevent the interface from getting confusing. if ( request.method == "POST" and not form_validated and "_saveasnew" in request.POST ): context["show_save"] = False context["show_save_and_continue"] = False # Use the change template instead of the add template. add = False context.update(extra_context or {}) return self.render_change_form( request, context, add=add, change=not add, obj=obj, form_url=form_url ) def add_view(self, request, form_url="", extra_context=None): return self.changeform_view(request, None, form_url, extra_context) def change_view(self, request, object_id, form_url="", extra_context=None): return self.changeform_view(request, object_id, form_url, extra_context) def _get_edited_object_pks(self, request, prefix): """Return POST data values of list_editable primary keys.""" pk_pattern = re.compile( r"{}-\d+-{}$".format(re.escape(prefix), self.opts.pk.name) ) return [value for key, value in request.POST.items() if pk_pattern.match(key)] def _get_list_editable_queryset(self, request, prefix): """ Based on POST data, return a queryset of the objects that were edited via list_editable. """ object_pks = self._get_edited_object_pks(request, prefix) queryset = self.get_queryset(request) validate = queryset.model._meta.pk.to_python try: for pk in object_pks: validate(pk) except ValidationError: # Disable the optimization if the POST data was tampered with. return queryset return queryset.filter(pk__in=object_pks) @csrf_protect_m def changelist_view(self, request, extra_context=None): """ The 'change list' admin view for this model. """ from django.contrib.admin.views.main import ERROR_FLAG app_label = self.opts.app_label if not self.has_view_or_change_permission(request): raise PermissionDenied try: cl = self.get_changelist_instance(request) except IncorrectLookupParameters: # Wacky lookup parameters were given, so redirect to the main # changelist page, without parameters, and pass an 'invalid=1' # parameter via the query string. If wacky parameters were given # and the 'invalid=1' parameter was already in the query string, # something is screwed up with the database, so display an error # page. if ERROR_FLAG in request.GET: return SimpleTemplateResponse( "admin/invalid_setup.html", { "title": _("Database error"), }, ) return HttpResponseRedirect(request.path + "?" + ERROR_FLAG + "=1") # If the request was POSTed, this might be a bulk action or a bulk # edit. Try to look up an action or confirmation first, but if this # isn't an action the POST will fall through to the bulk edit check, # below. action_failed = False selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) actions = self.get_actions(request) # Actions with no confirmation if ( actions and request.method == "POST" and "index" in request.POST and "_save" not in request.POST ): if selected: response = self.response_action( request, queryset=cl.get_queryset(request) ) if response: return response else: action_failed = True else: msg = _( "Items must be selected in order to perform " "actions on them. No items have been changed." ) self.message_user(request, msg, messages.WARNING) action_failed = True # Actions with confirmation if ( actions and request.method == "POST" and helpers.ACTION_CHECKBOX_NAME in request.POST and "index" not in request.POST and "_save" not in request.POST ): if selected: response = self.response_action( request, queryset=cl.get_queryset(request) ) if response: return response else: action_failed = True if action_failed: # Redirect back to the changelist page to avoid resubmitting the # form if the user refreshes the browser or uses the "No, take # me back" button on the action confirmation page. return HttpResponseRedirect(request.get_full_path()) # If we're allowing changelist editing, we need to construct a formset # for the changelist given all the fields to be edited. Then we'll # use the formset to validate/process POSTed data. formset = cl.formset = None # Handle POSTed bulk-edit data. if request.method == "POST" and cl.list_editable and "_save" in request.POST: if not self.has_change_permission(request): raise PermissionDenied FormSet = self.get_changelist_formset(request) modified_objects = self._get_list_editable_queryset( request, FormSet.get_default_prefix() ) formset = cl.formset = FormSet( request.POST, request.FILES, queryset=modified_objects ) if formset.is_valid(): changecount = 0 with transaction.atomic(using=router.db_for_write(self.model)): for form in formset.forms: if form.has_changed(): obj = self.save_form(request, form, change=True) self.save_model(request, obj, form, change=True) self.save_related(request, form, formsets=[], change=True) change_msg = self.construct_change_message( request, form, None ) self.log_change(request, obj, change_msg) changecount += 1 if changecount: msg = ngettext( "%(count)s %(name)s was changed successfully.", "%(count)s %(name)s were changed successfully.", changecount, ) % { "count": changecount, "name": model_ngettext(self.opts, changecount), } self.message_user(request, msg, messages.SUCCESS) return HttpResponseRedirect(request.get_full_path()) # Handle GET -- construct a formset for display. elif cl.list_editable and self.has_change_permission(request): FormSet = self.get_changelist_formset(request) formset = cl.formset = FormSet(queryset=cl.result_list) # Build the list of media to be used by the formset. if formset: media = self.media + formset.media else: media = self.media # Build the action form and populate it with available actions. if actions: action_form = self.action_form(auto_id=None) action_form.fields["action"].choices = self.get_action_choices(request) media += action_form.media else: action_form = None selection_note_all = ngettext( "%(total_count)s selected", "All %(total_count)s selected", cl.result_count ) context = { **self.admin_site.each_context(request), "module_name": str(self.opts.verbose_name_plural), "selection_note": _("0 of %(cnt)s selected") % {"cnt": len(cl.result_list)}, "selection_note_all": selection_note_all % {"total_count": cl.result_count}, "title": cl.title, "subtitle": None, "is_popup": cl.is_popup, "to_field": cl.to_field, "cl": cl, "media": media, "has_add_permission": self.has_add_permission(request), "opts": cl.opts, "action_form": action_form, "actions_on_top": self.actions_on_top, "actions_on_bottom": self.actions_on_bottom, "actions_selection_counter": self.actions_selection_counter, "preserved_filters": self.get_preserved_filters(request), **(extra_context or {}), } request.current_app = self.admin_site.name return TemplateResponse( request, self.change_list_template or [ "admin/%s/%s/change_list.html" % (app_label, self.opts.model_name), "admin/%s/change_list.html" % app_label, "admin/change_list.html", ], context, ) def get_deleted_objects(self, objs, request): """ Hook for customizing the delete process for the delete view and the "delete selected" action. """ return get_deleted_objects(objs, request, self.admin_site) @csrf_protect_m def delete_view(self, request, object_id, extra_context=None): with transaction.atomic(using=router.db_for_write(self.model)): return self._delete_view(request, object_id, extra_context) def _delete_view(self, request, object_id, extra_context): "The 'delete' admin view for this model." app_label = self.opts.app_label to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR)) if to_field and not self.to_field_allowed(request, to_field): raise DisallowedModelAdminToField( "The field %s cannot be referenced." % to_field ) obj = self.get_object(request, unquote(object_id), to_field) if not self.has_delete_permission(request, obj): raise PermissionDenied if obj is None: return self._get_obj_does_not_exist_redirect(request, self.opts, object_id) # Populate deleted_objects, a data structure of all related objects that # will also be deleted. ( deleted_objects, model_count, perms_needed, protected, ) = self.get_deleted_objects([obj], request) if request.POST and not protected: # The user has confirmed the deletion. if perms_needed: raise PermissionDenied obj_display = str(obj) attr = str(to_field) if to_field else self.opts.pk.attname obj_id = obj.serializable_value(attr) self.log_deletion(request, obj, obj_display) self.delete_model(request, obj) return self.response_delete(request, obj_display, obj_id) object_name = str(self.opts.verbose_name) if perms_needed or protected: title = _("Cannot delete %(name)s") % {"name": object_name} else: title = _("Are you sure?") context = { **self.admin_site.each_context(request), "title": title, "subtitle": None, "object_name": object_name, "object": obj, "deleted_objects": deleted_objects, "model_count": dict(model_count).items(), "perms_lacking": perms_needed, "protected": protected, "opts": self.opts, "app_label": app_label, "preserved_filters": self.get_preserved_filters(request), "is_popup": IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET, "to_field": to_field, **(extra_context or {}), } return self.render_delete_form(request, context) def history_view(self, request, object_id, extra_context=None): "The 'history' admin view for this model." from django.contrib.admin.models import LogEntry from django.contrib.admin.views.main import PAGE_VAR # First check if the user can see this history. model = self.model obj = self.get_object(request, unquote(object_id)) if obj is None: return self._get_obj_does_not_exist_redirect( request, model._meta, object_id ) if not self.has_view_or_change_permission(request, obj): raise PermissionDenied # Then get the history for this object. app_label = self.opts.app_label action_list = ( LogEntry.objects.filter( object_id=unquote(object_id), content_type=get_content_type_for_model(model), ) .select_related() .order_by("action_time") ) paginator = self.get_paginator(request, action_list, 100) page_number = request.GET.get(PAGE_VAR, 1) page_obj = paginator.get_page(page_number) page_range = paginator.get_elided_page_range(page_obj.number) context = { **self.admin_site.each_context(request), "title": _("Change history: %s") % obj, "subtitle": None, "action_list": page_obj, "page_range": page_range, "page_var": PAGE_VAR, "pagination_required": paginator.count > 100, "module_name": str(capfirst(self.opts.verbose_name_plural)), "object": obj, "opts": self.opts, "preserved_filters": self.get_preserved_filters(request), **(extra_context or {}), } request.current_app = self.admin_site.name return TemplateResponse( request, self.object_history_template or [ "admin/%s/%s/object_history.html" % (app_label, self.opts.model_name), "admin/%s/object_history.html" % app_label, "admin/object_history.html", ], context, ) def get_formset_kwargs(self, request, obj, inline, prefix): formset_params = { "instance": obj, "prefix": prefix, "queryset": inline.get_queryset(request), } if request.method == "POST": formset_params.update( { "data": request.POST.copy(), "files": request.FILES, "save_as_new": "_saveasnew" in request.POST, } ) return formset_params def _create_formsets(self, request, obj, change): "Helper function to generate formsets for add/change_view." formsets = [] inline_instances = [] prefixes = {} get_formsets_args = [request] if change: get_formsets_args.append(obj) for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args): prefix = FormSet.get_default_prefix() prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1 or not prefix: prefix = "%s-%s" % (prefix, prefixes[prefix]) formset_params = self.get_formset_kwargs(request, obj, inline, prefix) formset = FormSet(**formset_params) def user_deleted_form(request, obj, formset, index, inline): """Return whether or not the user deleted the form.""" return ( inline.has_delete_permission(request, obj) and "{}-{}-DELETE".format(formset.prefix, index) in request.POST ) # Bypass validation of each view-only inline form (since the form's # data won't be in request.POST), unless the form was deleted. if not inline.has_change_permission(request, obj if change else None): for index, form in enumerate(formset.initial_forms): if user_deleted_form(request, obj, formset, index, inline): continue form._errors = {} form.cleaned_data = form.initial formsets.append(formset) inline_instances.append(inline) return formsets, inline_instances class InlineModelAdmin(BaseModelAdmin): """ Options for inline editing of ``model`` instances. Provide ``fk_name`` to specify the attribute name of the ``ForeignKey`` from ``model`` to its parent. This is required if ``model`` has more than one ``ForeignKey`` to its parent. """ model = None fk_name = None formset = BaseInlineFormSet extra = 3 min_num = None max_num = None template = None verbose_name = None verbose_name_plural = None can_delete = True show_change_link = False checks_class = InlineModelAdminChecks classes = None def __init__(self, parent_model, admin_site): self.admin_site = admin_site self.parent_model = parent_model self.opts = self.model._meta self.has_registered_model = admin_site.is_registered(self.model) super().__init__() if self.verbose_name_plural is None: if self.verbose_name is None: self.verbose_name_plural = self.opts.verbose_name_plural else: self.verbose_name_plural = format_lazy("{}s", self.verbose_name) if self.verbose_name is None: self.verbose_name = self.opts.verbose_name @property def media(self): extra = "" if settings.DEBUG else ".min" js = ["vendor/jquery/jquery%s.js" % extra, "jquery.init.js", "inlines.js"] if self.filter_vertical or self.filter_horizontal: js.extend(["SelectBox.js", "SelectFilter2.js"]) if self.classes and "collapse" in self.classes: js.append("collapse.js") return forms.Media(js=["admin/js/%s" % url for url in js]) def get_extra(self, request, obj=None, **kwargs): """Hook for customizing the number of extra inline forms.""" return self.extra def get_min_num(self, request, obj=None, **kwargs): """Hook for customizing the min number of inline forms.""" return self.min_num def get_max_num(self, request, obj=None, **kwargs): """Hook for customizing the max number of extra inline forms.""" return self.max_num def get_formset(self, request, obj=None, **kwargs): """Return a BaseInlineFormSet class for use in admin add/change views.""" if "fields" in kwargs: fields = kwargs.pop("fields") else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) excluded = self.get_exclude(request, obj) exclude = [] if excluded is None else list(excluded) exclude.extend(self.get_readonly_fields(request, obj)) if excluded is None and hasattr(self.form, "_meta") and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # InlineModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # If exclude is an empty list we use None, since that's the actual # default. exclude = exclude or None can_delete = self.can_delete and self.has_delete_permission(request, obj) defaults = { "form": self.form, "formset": self.formset, "fk_name": self.fk_name, "fields": fields, "exclude": exclude, "formfield_callback": partial(self.formfield_for_dbfield, request=request), "extra": self.get_extra(request, obj, **kwargs), "min_num": self.get_min_num(request, obj, **kwargs), "max_num": self.get_max_num(request, obj, **kwargs), "can_delete": can_delete, **kwargs, } base_model_form = defaults["form"] can_change = self.has_change_permission(request, obj) if request else True can_add = self.has_add_permission(request, obj) if request else True class DeleteProtectedModelForm(base_model_form): def hand_clean_DELETE(self): """ We don't validate the 'DELETE' field itself because on templates it's not rendered using the field information, but just using a generic "deletion_field" of the InlineModelAdmin. """ if self.cleaned_data.get(DELETION_FIELD_NAME, False): using = router.db_for_write(self._meta.model) collector = NestedObjects(using=using) if self.instance._state.adding: return collector.collect([self.instance]) if collector.protected: objs = [] for p in collector.protected: objs.append( # Translators: Model verbose name and instance # representation, suitable to be an item in a # list. _("%(class_name)s %(instance)s") % {"class_name": p._meta.verbose_name, "instance": p} ) params = { "class_name": self._meta.model._meta.verbose_name, "instance": self.instance, "related_objects": get_text_list(objs, _("and")), } msg = _( "Deleting %(class_name)s %(instance)s would require " "deleting the following protected related objects: " "%(related_objects)s" ) raise ValidationError( msg, code="deleting_protected", params=params ) def is_valid(self): result = super().is_valid() self.hand_clean_DELETE() return result def has_changed(self): # Protect against unauthorized edits. if not can_change and not self.instance._state.adding: return False if not can_add and self.instance._state.adding: return False return super().has_changed() defaults["form"] = DeleteProtectedModelForm if defaults["fields"] is None and not modelform_defines_fields( defaults["form"] ): defaults["fields"] = forms.ALL_FIELDS return inlineformset_factory(self.parent_model, self.model, **defaults) def _get_form_for_get_fields(self, request, obj=None): return self.get_formset(request, obj, fields=None).form def get_queryset(self, request): queryset = super().get_queryset(request) if not self.has_view_or_change_permission(request): queryset = queryset.none() return queryset def _has_any_perms_for_target_model(self, request, perms): """ This method is called only when the ModelAdmin's model is for an ManyToManyField's implicit through model (if self.opts.auto_created). Return True if the user has any of the given permissions ('add', 'change', etc.) for the model that points to the through model. """ opts = self.opts # Find the target model of an auto-created many-to-many relationship. for field in opts.fields: if field.remote_field and field.remote_field.model != self.parent_model: opts = field.remote_field.model._meta break return any( request.user.has_perm( "%s.%s" % (opts.app_label, get_permission_codename(perm, opts)) ) for perm in perms ) def has_add_permission(self, request, obj): if self.opts.auto_created: # Auto-created intermediate models don't have their own # permissions. The user needs to have the change permission for the # related model in order to be able to do anything with the # intermediate model. return self._has_any_perms_for_target_model(request, ["change"]) return super().has_add_permission(request) def has_change_permission(self, request, obj=None): if self.opts.auto_created: # Same comment as has_add_permission(). return self._has_any_perms_for_target_model(request, ["change"]) return super().has_change_permission(request) def has_delete_permission(self, request, obj=None): if self.opts.auto_created: # Same comment as has_add_permission(). return self._has_any_perms_for_target_model(request, ["change"]) return super().has_delete_permission(request, obj) def has_view_permission(self, request, obj=None): if self.opts.auto_created: # Same comment as has_add_permission(). The 'change' permission # also implies the 'view' permission. return self._has_any_perms_for_target_model(request, ["view", "change"]) return super().has_view_permission(request) class StackedInline(InlineModelAdmin): template = "admin/edit_inline/stacked.html" class TabularInline(InlineModelAdmin): template = "admin/edit_inline/tabular.html"
0aa62be0b9d01539738deddd21492e24e34b615f2872c60b8122c771a2d257e4
import functools import itertools from collections import defaultdict from asgiref.sync import sync_to_async from django.contrib.contenttypes.models import ContentType from django.core import checks from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist from django.db import DEFAULT_DB_ALIAS, models, router, transaction from django.db.models import DO_NOTHING, ForeignObject, ForeignObjectRel from django.db.models.base import ModelBase, make_foreign_order_accessors from django.db.models.fields.mixins import FieldCacheMixin from django.db.models.fields.related import ( ReverseManyToOneDescriptor, lazy_related_operation, ) from django.db.models.query_utils import PathInfo from django.db.models.sql import AND from django.db.models.sql.where import WhereNode from django.db.models.utils import AltersData from django.utils.functional import cached_property class GenericForeignKey(FieldCacheMixin): """ Provide a generic many-to-one relation through the ``content_type`` and ``object_id`` fields. This class also doubles as an accessor to the related object (similar to ForwardManyToOneDescriptor) by adding itself as a model attribute. """ # Field flags auto_created = False concrete = False editable = False hidden = False is_relation = True many_to_many = False many_to_one = True one_to_many = False one_to_one = False related_model = None remote_field = None def __init__( self, ct_field="content_type", fk_field="object_id", for_concrete_model=True ): self.ct_field = ct_field self.fk_field = fk_field self.for_concrete_model = for_concrete_model self.editable = False self.rel = None self.column = None def contribute_to_class(self, cls, name, **kwargs): self.name = name self.model = cls cls._meta.add_field(self, private=True) setattr(cls, name, self) def get_filter_kwargs_for_object(self, obj): """See corresponding method on Field""" return { self.fk_field: getattr(obj, self.fk_field), self.ct_field: getattr(obj, self.ct_field), } def get_forward_related_filter(self, obj): """See corresponding method on RelatedField""" return { self.fk_field: obj.pk, self.ct_field: ContentType.objects.get_for_model(obj).pk, } def __str__(self): model = self.model return "%s.%s" % (model._meta.label, self.name) def check(self, **kwargs): return [ *self._check_field_name(), *self._check_object_id_field(), *self._check_content_type_field(), ] def _check_field_name(self): if self.name.endswith("_"): return [ checks.Error( "Field names must not end with an underscore.", obj=self, id="fields.E001", ) ] else: return [] def _check_object_id_field(self): try: self.model._meta.get_field(self.fk_field) except FieldDoesNotExist: return [ checks.Error( "The GenericForeignKey object ID references the " "nonexistent field '%s'." % self.fk_field, obj=self, id="contenttypes.E001", ) ] else: return [] def _check_content_type_field(self): """ Check if field named `field_name` in model `model` exists and is a valid content_type field (is a ForeignKey to ContentType). """ try: field = self.model._meta.get_field(self.ct_field) except FieldDoesNotExist: return [ checks.Error( "The GenericForeignKey content type references the " "nonexistent field '%s.%s'." % (self.model._meta.object_name, self.ct_field), obj=self, id="contenttypes.E002", ) ] else: if not isinstance(field, models.ForeignKey): return [ checks.Error( "'%s.%s' is not a ForeignKey." % (self.model._meta.object_name, self.ct_field), hint=( "GenericForeignKeys must use a ForeignKey to " "'contenttypes.ContentType' as the 'content_type' field." ), obj=self, id="contenttypes.E003", ) ] elif field.remote_field.model != ContentType: return [ checks.Error( "'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (self.model._meta.object_name, self.ct_field), hint=( "GenericForeignKeys must use a ForeignKey to " "'contenttypes.ContentType' as the 'content_type' field." ), obj=self, id="contenttypes.E004", ) ] else: return [] def get_cache_name(self): return self.name def get_content_type(self, obj=None, id=None, using=None): if obj is not None: return ContentType.objects.db_manager(obj._state.db).get_for_model( obj, for_concrete_model=self.for_concrete_model ) elif id is not None: return ContentType.objects.db_manager(using).get_for_id(id) else: # This should never happen. I love comments like this, don't you? raise Exception("Impossible arguments to GFK.get_content_type!") def get_prefetch_queryset(self, instances, queryset=None): if queryset is not None: raise ValueError("Custom queryset can't be used for this lookup.") # For efficiency, group the instances by content type and then do one # query per model fk_dict = defaultdict(set) # We need one instance for each group in order to get the right db: instance_dict = {} ct_attname = self.model._meta.get_field(self.ct_field).get_attname() for instance in instances: # We avoid looking for values if either ct_id or fkey value is None ct_id = getattr(instance, ct_attname) if ct_id is not None: fk_val = getattr(instance, self.fk_field) if fk_val is not None: fk_dict[ct_id].add(fk_val) instance_dict[ct_id] = instance ret_val = [] for ct_id, fkeys in fk_dict.items(): instance = instance_dict[ct_id] ct = self.get_content_type(id=ct_id, using=instance._state.db) ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys)) # For doing the join in Python, we have to match both the FK val and the # content type, so we use a callable that returns a (fk, class) pair. def gfk_key(obj): ct_id = getattr(obj, ct_attname) if ct_id is None: return None else: model = self.get_content_type( id=ct_id, using=obj._state.db ).model_class() return ( model._meta.pk.get_prep_value(getattr(obj, self.fk_field)), model, ) return ( ret_val, lambda obj: (obj.pk, obj.__class__), gfk_key, True, self.name, False, ) def __get__(self, instance, cls=None): if instance is None: return self # Don't use getattr(instance, self.ct_field) here because that might # reload the same ContentType over and over (#5570). Instead, get the # content type ID here, and later when the actual instance is needed, # use ContentType.objects.get_for_id(), which has a global cache. f = self.model._meta.get_field(self.ct_field) ct_id = getattr(instance, f.get_attname(), None) pk_val = getattr(instance, self.fk_field) rel_obj = self.get_cached_value(instance, default=None) if rel_obj is None and self.is_cached(instance): return rel_obj if rel_obj is not None: ct_match = ( ct_id == self.get_content_type(obj=rel_obj, using=instance._state.db).id ) pk_match = rel_obj._meta.pk.to_python(pk_val) == rel_obj.pk if ct_match and pk_match: return rel_obj else: rel_obj = None if ct_id is not None: ct = self.get_content_type(id=ct_id, using=instance._state.db) try: rel_obj = ct.get_object_for_this_type(pk=pk_val) except ObjectDoesNotExist: pass self.set_cached_value(instance, rel_obj) return rel_obj def __set__(self, instance, value): ct = None fk = None if value is not None: ct = self.get_content_type(obj=value) fk = value.pk setattr(instance, self.ct_field, ct) setattr(instance, self.fk_field, fk) self.set_cached_value(instance, value) class GenericRel(ForeignObjectRel): """ Used by GenericRelation to store information about the relation. """ def __init__( self, field, to, related_name=None, related_query_name=None, limit_choices_to=None, ): super().__init__( field, to, related_name=related_query_name or "+", related_query_name=related_query_name, limit_choices_to=limit_choices_to, on_delete=DO_NOTHING, ) class GenericRelation(ForeignObject): """ Provide a reverse to a relation created by a GenericForeignKey. """ # Field flags auto_created = False empty_strings_allowed = False many_to_many = False many_to_one = False one_to_many = True one_to_one = False rel_class = GenericRel mti_inherited = False def __init__( self, to, object_id_field="object_id", content_type_field="content_type", for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs, ): kwargs["rel"] = self.rel_class( self, to, related_query_name=related_query_name, limit_choices_to=limit_choices_to, ) # Reverse relations are always nullable (Django can't enforce that a # foreign key on the related model points to this model). kwargs["null"] = True kwargs["blank"] = True kwargs["on_delete"] = models.CASCADE kwargs["editable"] = False kwargs["serialize"] = False # This construct is somewhat of an abuse of ForeignObject. This field # represents a relation from pk to object_id field. But, this relation # isn't direct, the join is generated reverse along foreign key. So, # the from_field is object_id field, to_field is pk because of the # reverse join. super().__init__(to, from_fields=[object_id_field], to_fields=[], **kwargs) self.object_id_field_name = object_id_field self.content_type_field_name = content_type_field self.for_concrete_model = for_concrete_model def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_generic_foreign_key_existence(), ] def _is_matching_generic_foreign_key(self, field): """ Return True if field is a GenericForeignKey whose content type and object id fields correspond to the equivalent attributes on this GenericRelation. """ return ( isinstance(field, GenericForeignKey) and field.ct_field == self.content_type_field_name and field.fk_field == self.object_id_field_name ) def _check_generic_foreign_key_existence(self): target = self.remote_field.model if isinstance(target, ModelBase): fields = target._meta.private_fields if any(self._is_matching_generic_foreign_key(field) for field in fields): return [] else: return [ checks.Error( "The GenericRelation defines a relation with the model " "'%s', but that model does not have a GenericForeignKey." % target._meta.label, obj=self, id="contenttypes.E004", ) ] else: return [] def resolve_related_fields(self): self.to_fields = [self.model._meta.pk.name] return [ ( self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk, ) ] def _get_path_info_with_parent(self, filtered_relation): """ Return the path that joins the current model through any parent models. The idea is that if you have a GFK defined on a parent model then we need to join the parent model first, then the child model. """ # With an inheritance chain ChildTag -> Tag and Tag defines the # GenericForeignKey, and a TaggedItem model has a GenericRelation to # ChildTag, then we need to generate a join from TaggedItem to Tag # (as Tag.object_id == TaggedItem.pk), and another join from Tag to # ChildTag (as that is where the relation is to). Do this by first # generating a join to the parent model, then generating joins to the # child models. path = [] opts = self.remote_field.model._meta.concrete_model._meta parent_opts = opts.get_field(self.object_id_field_name).model._meta target = parent_opts.pk path.append( PathInfo( from_opts=self.model._meta, to_opts=parent_opts, target_fields=(target,), join_field=self.remote_field, m2m=True, direct=False, filtered_relation=filtered_relation, ) ) # Collect joins needed for the parent -> child chain. This is easiest # to do if we collect joins for the child -> parent chain and then # reverse the direction (call to reverse() and use of # field.remote_field.get_path_info()). parent_field_chain = [] while parent_opts != opts: field = opts.get_ancestor_link(parent_opts.model) parent_field_chain.append(field) opts = field.remote_field.model._meta parent_field_chain.reverse() for field in parent_field_chain: path.extend(field.remote_field.path_infos) return path def get_path_info(self, filtered_relation=None): opts = self.remote_field.model._meta object_id_field = opts.get_field(self.object_id_field_name) if object_id_field.model != opts.model: return self._get_path_info_with_parent(filtered_relation) else: target = opts.pk return [ PathInfo( from_opts=self.model._meta, to_opts=opts, target_fields=(target,), join_field=self.remote_field, m2m=True, direct=False, filtered_relation=filtered_relation, ) ] def get_reverse_path_info(self, filtered_relation=None): opts = self.model._meta from_opts = self.remote_field.model._meta return [ PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, ) ] def value_to_string(self, obj): qs = getattr(obj, self.name).all() return str([instance.pk for instance in qs]) def contribute_to_class(self, cls, name, **kwargs): kwargs["private_only"] = True super().contribute_to_class(cls, name, **kwargs) self.model = cls # Disable the reverse relation for fields inherited by subclasses of a # model in multi-table inheritance. The reverse relation points to the # field of the base model. if self.mti_inherited: self.remote_field.related_name = "+" self.remote_field.related_query_name = None setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field)) # Add get_RELATED_order() and set_RELATED_order() to the model this # field belongs to, if the model on the other end of this relation # is ordered with respect to its corresponding GenericForeignKey. if not cls._meta.abstract: def make_generic_foreign_order_accessors(related_model, model): if self._is_matching_generic_foreign_key( model._meta.order_with_respect_to ): make_foreign_order_accessors(model, related_model) lazy_related_operation( make_generic_foreign_order_accessors, self.model, self.remote_field.model, ) def set_attributes_from_rel(self): pass def get_internal_type(self): return "ManyToManyField" def get_content_type(self): """ Return the content type associated with this field's model. """ return ContentType.objects.get_for_model( self.model, for_concrete_model=self.for_concrete_model ) def get_extra_restriction(self, alias, remote_alias): field = self.remote_field.model._meta.get_field(self.content_type_field_name) contenttype_pk = self.get_content_type().pk lookup = field.get_lookup("exact")(field.get_col(remote_alias), contenttype_pk) return WhereNode([lookup], connector=AND) def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS): """ Return all objects related to ``objs`` via this ``GenericRelation``. """ return self.remote_field.model._base_manager.db_manager(using).filter( **{ "%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using) .get_for_model(self.model, for_concrete_model=self.for_concrete_model) .pk, "%s__in" % self.object_id_field_name: [obj.pk for obj in objs], } ) class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor): """ Accessor to the related objects manager on the one-to-many relation created by GenericRelation. In the example:: class Post(Model): comments = GenericRelation(Comment) ``post.comments`` is a ReverseGenericManyToOneDescriptor instance. """ @cached_property def related_manager_cls(self): return create_generic_related_manager( self.rel.model._default_manager.__class__, self.rel, ) def create_generic_related_manager(superclass, rel): """ Factory function to create a manager that subclasses another manager (generally the default manager of a given model) and adds behaviors specific to generic relations. """ class GenericRelatedObjectManager(superclass, AltersData): def __init__(self, instance=None): super().__init__() self.instance = instance self.model = rel.model self.get_content_type = functools.partial( ContentType.objects.db_manager(instance._state.db).get_for_model, for_concrete_model=rel.field.for_concrete_model, ) self.content_type = self.get_content_type(instance) self.content_type_field_name = rel.field.content_type_field_name self.object_id_field_name = rel.field.object_id_field_name self.prefetch_cache_name = rel.field.attname self.pk_val = instance.pk self.core_filters = { "%s__pk" % self.content_type_field_name: self.content_type.id, self.object_id_field_name: self.pk_val, } def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_generic_related_manager(manager.__class__, rel) return manager_class(instance=self.instance) do_not_call_in_templates = True def __str__(self): return repr(self) def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ db = self._db or router.db_for_read(self.model, instance=self.instance) return queryset.using(db).filter(**self.core_filters) def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) # Group instances by content types. content_type_queries = [ models.Q.create( [ (f"{self.content_type_field_name}__pk", content_type_id), (f"{self.object_id_field_name}__in", {obj.pk for obj in objs}), ] ) for content_type_id, objs in itertools.groupby( sorted(instances, key=lambda obj: self.get_content_type(obj).pk), lambda obj: self.get_content_type(obj).pk, ) ] query = models.Q.create(content_type_queries, connector=models.Q.OR) # We (possibly) need to convert object IDs to the type of the # instances' PK in order to match up instances: object_id_converter = instances[0]._meta.pk.to_python content_type_id_field_name = "%s_id" % self.content_type_field_name return ( queryset.filter(query), lambda relobj: ( object_id_converter(getattr(relobj, self.object_id_field_name)), getattr(relobj, content_type_id_field_name), ), lambda obj: (obj.pk, self.get_content_type(obj).pk), False, self.prefetch_cache_name, False, ) def add(self, *objs, bulk=True): self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) def check_and_update_obj(obj): if not isinstance(obj, self.model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) setattr(obj, self.content_type_field_name, self.content_type) setattr(obj, self.object_id_field_name, self.pk_val) if bulk: pks = [] for obj in objs: if obj._state.adding or obj._state.db != db: raise ValueError( "%r instance isn't saved. Use bulk=False or save " "the object first." % obj ) check_and_update_obj(obj) pks.append(obj.pk) self.model._base_manager.using(db).filter(pk__in=pks).update( **{ self.content_type_field_name: self.content_type, self.object_id_field_name: self.pk_val, } ) else: with transaction.atomic(using=db, savepoint=False): for obj in objs: check_and_update_obj(obj) obj.save() add.alters_data = True async def aadd(self, *objs, bulk=True): return await sync_to_async(self.add)(*objs, bulk=bulk) aadd.alters_data = True def remove(self, *objs, bulk=True): if not objs: return self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk) remove.alters_data = True async def aremove(self, *objs, bulk=True): return await sync_to_async(self.remove)(*objs, bulk=bulk) aremove.alters_data = True def clear(self, *, bulk=True): self._clear(self, bulk) clear.alters_data = True async def aclear(self, *, bulk=True): return await sync_to_async(self.clear)(bulk=bulk) aclear.alters_data = True def _clear(self, queryset, bulk): self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) queryset = queryset.using(db) if bulk: # `QuerySet.delete()` creates its own atomic block which # contains the `pre_delete` and `post_delete` signal handlers. queryset.delete() else: with transaction.atomic(using=db, savepoint=False): for obj in queryset: obj.delete() _clear.alters_data = True def set(self, objs, *, bulk=True, clear=False): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) db = router.db_for_write(self.model, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, bulk=bulk) else: old_objs = set(self.using(db).all()) new_objs = [] for obj in objs: if obj in old_objs: old_objs.remove(obj) else: new_objs.append(obj) self.remove(*old_objs) self.add(*new_objs, bulk=bulk) set.alters_data = True async def aset(self, objs, *, bulk=True, clear=False): return await sync_to_async(self.set)(objs, bulk=bulk, clear=clear) aset.alters_data = True def create(self, **kwargs): self._remove_prefetched_objects() kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super().using(db).create(**kwargs) create.alters_data = True async def acreate(self, **kwargs): return await sync_to_async(self.create)(**kwargs) acreate.alters_data = True def get_or_create(self, **kwargs): kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super().using(db).get_or_create(**kwargs) get_or_create.alters_data = True async def aget_or_create(self, **kwargs): return await sync_to_async(self.get_or_create)(**kwargs) aget_or_create.alters_data = True def update_or_create(self, **kwargs): kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super().using(db).update_or_create(**kwargs) update_or_create.alters_data = True async def aupdate_or_create(self, **kwargs): return await sync_to_async(self.update_or_create)(**kwargs) aupdate_or_create.alters_data = True return GenericRelatedObjectManager