hash
stringlengths
64
64
content
stringlengths
0
1.51M
5edd9159e0465eab523d7cee936ce52501fbeddd41314a970e3b122396f157d3
import os import shutil import tempfile from contextlib import contextmanager from importlib import import_module from django.apps import apps from django.db import connection, connections, migrations, models from django.db.migrations.migration import Migration from django.db.migrations.recorder import MigrationRecorder from django.db.migrations.state import ProjectState from django.test import TransactionTestCase from django.test.utils import extend_sys_path from django.utils.module_loading import module_dir class MigrationTestBase(TransactionTestCase): """ Contains an extended set of asserts for testing migrations and schema operations. """ available_apps = ["migrations"] databases = {"default", "other"} def tearDown(self): # Reset applied-migrations state. for db in self.databases: recorder = MigrationRecorder(connections[db]) recorder.migration_qs.filter(app="migrations").delete() def get_table_description(self, table, using="default"): with connections[using].cursor() as cursor: return connections[using].introspection.get_table_description(cursor, table) def assertTableExists(self, table, using="default"): with connections[using].cursor() as cursor: self.assertIn(table, connections[using].introspection.table_names(cursor)) def assertTableNotExists(self, table, using="default"): with connections[using].cursor() as cursor: self.assertNotIn( table, connections[using].introspection.table_names(cursor) ) def assertColumnExists(self, table, column, using="default"): self.assertIn( column, [c.name for c in self.get_table_description(table, using=using)] ) def assertColumnNotExists(self, table, column, using="default"): self.assertNotIn( column, [c.name for c in self.get_table_description(table, using=using)] ) def _get_column_allows_null(self, table, column, using): return [ c.null_ok for c in self.get_table_description(table, using=using) if c.name == column ][0] def assertColumnNull(self, table, column, using="default"): self.assertTrue(self._get_column_allows_null(table, column, using)) def assertColumnNotNull(self, table, column, using="default"): self.assertFalse(self._get_column_allows_null(table, column, using)) def _get_column_collation(self, table, column, using): return next( f.collation for f in self.get_table_description(table, using=using) if f.name == column ) def assertColumnCollation(self, table, column, collation, using="default"): self.assertEqual(self._get_column_collation(table, column, using), collation) def assertIndexExists( self, table, columns, value=True, using="default", index_type=None ): with connections[using].cursor() as cursor: self.assertEqual( value, any( c["index"] for c in connections[using] .introspection.get_constraints(cursor, table) .values() if ( c["columns"] == list(columns) and (index_type is None or c["type"] == index_type) and not c["unique"] ) ), ) def assertIndexNotExists(self, table, columns): return self.assertIndexExists(table, columns, False) def assertIndexNameExists(self, table, index, using="default"): with connections[using].cursor() as cursor: self.assertIn( index, connection.introspection.get_constraints(cursor, table), ) def assertIndexNameNotExists(self, table, index, using="default"): with connections[using].cursor() as cursor: self.assertNotIn( index, connection.introspection.get_constraints(cursor, table), ) def assertConstraintExists(self, table, name, value=True, using="default"): with connections[using].cursor() as cursor: constraints = ( connections[using].introspection.get_constraints(cursor, table).items() ) self.assertEqual( value, any(c["check"] for n, c in constraints if n == name), ) def assertConstraintNotExists(self, table, name): return self.assertConstraintExists(table, name, False) def assertUniqueConstraintExists(self, table, columns, value=True, using="default"): with connections[using].cursor() as cursor: constraints = ( connections[using].introspection.get_constraints(cursor, table).values() ) self.assertEqual( value, any(c["unique"] for c in constraints if c["columns"] == list(columns)), ) def assertFKExists(self, table, columns, to, value=True, using="default"): if not connections[using].features.can_introspect_foreign_keys: return with connections[using].cursor() as cursor: self.assertEqual( value, any( c["foreign_key"] == to for c in connections[using] .introspection.get_constraints(cursor, table) .values() if c["columns"] == list(columns) ), ) def assertFKNotExists(self, table, columns, to): return self.assertFKExists(table, columns, to, False) @contextmanager def temporary_migration_module(self, app_label="migrations", module=None): """ Allows testing management commands in a temporary migrations module. Wrap all invocations to makemigrations and squashmigrations with this context manager in order to avoid creating migration files in your source tree inadvertently. Takes the application label that will be passed to makemigrations or squashmigrations and the Python path to a migrations module. The migrations module is used as a template for creating the temporary migrations module. If it isn't provided, the application's migrations module is used, if it exists. Returns the filesystem path to the temporary migrations module. """ with tempfile.TemporaryDirectory() as temp_dir: target_dir = tempfile.mkdtemp(dir=temp_dir) with open(os.path.join(target_dir, "__init__.py"), "w"): pass target_migrations_dir = os.path.join(target_dir, "migrations") if module is None: module = apps.get_app_config(app_label).name + ".migrations" try: source_migrations_dir = module_dir(import_module(module)) except (ImportError, ValueError): pass else: shutil.copytree(source_migrations_dir, target_migrations_dir) with extend_sys_path(temp_dir): new_module = os.path.basename(target_dir) + ".migrations" with self.settings(MIGRATION_MODULES={app_label: new_module}): yield target_migrations_dir class OperationTestBase(MigrationTestBase): """Common functions to help test operations.""" @classmethod def setUpClass(cls): super().setUpClass() cls._initial_table_names = frozenset(connection.introspection.table_names()) def tearDown(self): self.cleanup_test_tables() super().tearDown() def cleanup_test_tables(self): table_names = ( frozenset(connection.introspection.table_names()) - self._initial_table_names ) with connection.schema_editor() as editor: with connection.constraint_checks_disabled(): for table_name in table_names: editor.execute( editor.sql_delete_table % { "table": editor.quote_name(table_name), } ) def apply_operations(self, app_label, project_state, operations, atomic=True): migration = Migration("name", app_label) migration.operations = operations with connection.schema_editor(atomic=atomic) as editor: return migration.apply(project_state, editor) def unapply_operations(self, app_label, project_state, operations, atomic=True): migration = Migration("name", app_label) migration.operations = operations with connection.schema_editor(atomic=atomic) as editor: return migration.unapply(project_state, editor) def make_test_state(self, app_label, operation, **kwargs): """ Makes a test state using set_up_test_model and returns the original state and the state after the migration is applied. """ project_state = self.set_up_test_model(app_label, **kwargs) new_state = project_state.clone() operation.state_forwards(app_label, new_state) return project_state, new_state def set_up_test_model( self, app_label, second_model=False, third_model=False, index=False, multicol_index=False, related_model=False, mti_model=False, proxy_model=False, manager_model=False, unique_together=False, options=False, db_table=None, index_together=False, # RemovedInDjango51Warning. constraints=None, indexes=None, ): """Creates a test model state and database table.""" # Make the "current" state. model_options = { "swappable": "TEST_SWAP_MODEL", # RemovedInDjango51Warning. "index_together": [["weight", "pink"]] if index_together else [], "unique_together": [["pink", "weight"]] if unique_together else [], } if options: model_options["permissions"] = [("can_groom", "Can groom")] if db_table: model_options["db_table"] = db_table operations = [ migrations.CreateModel( "Pony", [ ("id", models.AutoField(primary_key=True)), ("pink", models.IntegerField(default=3)), ("weight", models.FloatField()), ], options=model_options, ) ] if index: operations.append( migrations.AddIndex( "Pony", models.Index(fields=["pink"], name="pony_pink_idx"), ) ) if multicol_index: operations.append( migrations.AddIndex( "Pony", models.Index(fields=["pink", "weight"], name="pony_test_idx"), ) ) if indexes: for index in indexes: operations.append(migrations.AddIndex("Pony", index)) if constraints: for constraint in constraints: operations.append(migrations.AddConstraint("Pony", constraint)) if second_model: operations.append( migrations.CreateModel( "Stable", [ ("id", models.AutoField(primary_key=True)), ], ) ) if third_model: operations.append( migrations.CreateModel( "Van", [ ("id", models.AutoField(primary_key=True)), ], ) ) if related_model: operations.append( migrations.CreateModel( "Rider", [ ("id", models.AutoField(primary_key=True)), ("pony", models.ForeignKey("Pony", models.CASCADE)), ( "friend", models.ForeignKey("self", models.CASCADE, null=True), ), ], ) ) if mti_model: operations.append( migrations.CreateModel( "ShetlandPony", fields=[ ( "pony_ptr", models.OneToOneField( "Pony", models.CASCADE, auto_created=True, parent_link=True, primary_key=True, to_field="id", serialize=False, ), ), ("cuteness", models.IntegerField(default=1)), ], bases=["%s.Pony" % app_label], ) ) if proxy_model: operations.append( migrations.CreateModel( "ProxyPony", fields=[], options={"proxy": True}, bases=["%s.Pony" % app_label], ) ) if manager_model: from .models import FoodManager, FoodQuerySet operations.append( migrations.CreateModel( "Food", fields=[ ("id", models.AutoField(primary_key=True)), ], managers=[ ("food_qs", FoodQuerySet.as_manager()), ("food_mgr", FoodManager("a", "b")), ("food_mgr_kwargs", FoodManager("x", "y", 3, 4)), ], ) ) return self.apply_operations(app_label, ProjectState(), operations)
8ef2eec3bf7a5fdc2f5e9dc082287e540d60b985e227ed2590df39ac35cd5215
import functools import re from unittest import mock from django.apps import apps from django.conf import settings from django.contrib.auth.models import AbstractBaseUser from django.core.validators import RegexValidator, validate_slug from django.db import connection, migrations, models from django.db.migrations.autodetector import MigrationAutodetector from django.db.migrations.graph import MigrationGraph from django.db.migrations.loader import MigrationLoader from django.db.migrations.questioner import MigrationQuestioner from django.db.migrations.state import ModelState, ProjectState from django.test import SimpleTestCase, TestCase, ignore_warnings, override_settings from django.test.utils import isolate_lru_cache from django.utils.deprecation import RemovedInDjango51Warning from .models import FoodManager, FoodQuerySet class DeconstructibleObject: """ A custom deconstructible object. """ def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs def deconstruct(self): return (self.__module__ + "." + self.__class__.__name__, self.args, self.kwargs) class BaseAutodetectorTests(TestCase): def repr_changes(self, changes, include_dependencies=False): output = "" for app_label, migrations_ in sorted(changes.items()): output += " %s:\n" % app_label for migration in migrations_: output += " %s\n" % migration.name for operation in migration.operations: output += " %s\n" % operation if include_dependencies: output += " Dependencies:\n" if migration.dependencies: for dep in migration.dependencies: output += " %s\n" % (dep,) else: output += " None\n" return output def assertNumberMigrations(self, changes, app_label, number): if len(changes.get(app_label, [])) != number: self.fail( "Incorrect number of migrations (%s) for %s (expected %s)\n%s" % ( len(changes.get(app_label, [])), app_label, number, self.repr_changes(changes), ) ) def assertMigrationDependencies(self, changes, app_label, position, dependencies): if not changes.get(app_label): self.fail( "No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)) ) if len(changes[app_label]) < position + 1: self.fail( "No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)) ) migration = changes[app_label][position] if set(migration.dependencies) != set(dependencies): self.fail( "Migration dependencies mismatch for %s.%s (expected %s):\n%s" % ( app_label, migration.name, dependencies, self.repr_changes(changes, include_dependencies=True), ) ) def assertOperationTypes(self, changes, app_label, position, types): if not changes.get(app_label): self.fail( "No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)) ) if len(changes[app_label]) < position + 1: self.fail( "No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)) ) migration = changes[app_label][position] real_types = [ operation.__class__.__name__ for operation in migration.operations ] if types != real_types: self.fail( "Operation type mismatch for %s.%s (expected %s):\n%s" % ( app_label, migration.name, types, self.repr_changes(changes), ) ) def assertOperationAttributes( self, changes, app_label, position, operation_position, **attrs ): if not changes.get(app_label): self.fail( "No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)) ) if len(changes[app_label]) < position + 1: self.fail( "No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)) ) migration = changes[app_label][position] if len(changes[app_label]) < position + 1: self.fail( "No operation at index %s for %s.%s\n%s" % ( operation_position, app_label, migration.name, self.repr_changes(changes), ) ) operation = migration.operations[operation_position] for attr, value in attrs.items(): if getattr(operation, attr, None) != value: self.fail( "Attribute mismatch for %s.%s op #%s, %s (expected %r, got %r):\n%s" % ( app_label, migration.name, operation_position, attr, value, getattr(operation, attr, None), self.repr_changes(changes), ) ) def assertOperationFieldAttributes( self, changes, app_label, position, operation_position, **attrs ): if not changes.get(app_label): self.fail( "No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)) ) if len(changes[app_label]) < position + 1: self.fail( "No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)) ) migration = changes[app_label][position] if len(changes[app_label]) < position + 1: self.fail( "No operation at index %s for %s.%s\n%s" % ( operation_position, app_label, migration.name, self.repr_changes(changes), ) ) operation = migration.operations[operation_position] if not hasattr(operation, "field"): self.fail( "No field attribute for %s.%s op #%s." % ( app_label, migration.name, operation_position, ) ) field = operation.field for attr, value in attrs.items(): if getattr(field, attr, None) != value: self.fail( "Field attribute mismatch for %s.%s op #%s, field.%s (expected %r, " "got %r):\n%s" % ( app_label, migration.name, operation_position, attr, value, getattr(field, attr, None), self.repr_changes(changes), ) ) def make_project_state(self, model_states): "Shortcut to make ProjectStates from lists of predefined models" project_state = ProjectState() for model_state in model_states: project_state.add_model(model_state.clone()) return project_state def get_changes(self, before_states, after_states, questioner=None): if not isinstance(before_states, ProjectState): before_states = self.make_project_state(before_states) if not isinstance(after_states, ProjectState): after_states = self.make_project_state(after_states) return MigrationAutodetector( before_states, after_states, questioner, )._detect_changes() class AutodetectorTests(BaseAutodetectorTests): """ Tests the migration autodetector. """ author_empty = ModelState( "testapp", "Author", [("id", models.AutoField(primary_key=True))] ) author_name = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ], ) author_name_null = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, null=True)), ], ) author_name_longer = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=400)), ], ) author_name_renamed = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("names", models.CharField(max_length=200)), ], ) author_name_default = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default="Ada Lovelace")), ], ) author_name_check_constraint = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ], { "constraints": [ models.CheckConstraint( check=models.Q(name__contains="Bob"), name="name_contains_bob" ) ] }, ) author_dates_of_birth_auto_now = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("date_of_birth", models.DateField(auto_now=True)), ("date_time_of_birth", models.DateTimeField(auto_now=True)), ("time_of_birth", models.TimeField(auto_now=True)), ], ) author_dates_of_birth_auto_now_add = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("date_of_birth", models.DateField(auto_now_add=True)), ("date_time_of_birth", models.DateTimeField(auto_now_add=True)), ("time_of_birth", models.TimeField(auto_now_add=True)), ], ) author_name_deconstructible_1 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=DeconstructibleObject())), ], ) author_name_deconstructible_2 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=DeconstructibleObject())), ], ) author_name_deconstructible_3 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=models.IntegerField())), ], ) author_name_deconstructible_4 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, default=models.IntegerField())), ], ) author_name_deconstructible_list_1 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=[DeconstructibleObject(), 123] ), ), ], ) author_name_deconstructible_list_2 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=[DeconstructibleObject(), 123] ), ), ], ) author_name_deconstructible_list_3 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=[DeconstructibleObject(), 999] ), ), ], ) author_name_deconstructible_tuple_1 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=(DeconstructibleObject(), 123) ), ), ], ) author_name_deconstructible_tuple_2 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=(DeconstructibleObject(), 123) ), ), ], ) author_name_deconstructible_tuple_3 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=(DeconstructibleObject(), 999) ), ), ], ) author_name_deconstructible_dict_1 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default={"item": DeconstructibleObject(), "otheritem": 123}, ), ), ], ) author_name_deconstructible_dict_2 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default={"item": DeconstructibleObject(), "otheritem": 123}, ), ), ], ) author_name_deconstructible_dict_3 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default={"item": DeconstructibleObject(), "otheritem": 999}, ), ), ], ) author_name_nested_deconstructible_1 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=DeconstructibleObject( DeconstructibleObject(1), ( DeconstructibleObject("t1"), DeconstructibleObject("t2"), ), a=DeconstructibleObject("A"), b=DeconstructibleObject(B=DeconstructibleObject("c")), ), ), ), ], ) author_name_nested_deconstructible_2 = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=DeconstructibleObject( DeconstructibleObject(1), ( DeconstructibleObject("t1"), DeconstructibleObject("t2"), ), a=DeconstructibleObject("A"), b=DeconstructibleObject(B=DeconstructibleObject("c")), ), ), ), ], ) author_name_nested_deconstructible_changed_arg = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=DeconstructibleObject( DeconstructibleObject(1), ( DeconstructibleObject("t1"), DeconstructibleObject("t2-changed"), ), a=DeconstructibleObject("A"), b=DeconstructibleObject(B=DeconstructibleObject("c")), ), ), ), ], ) author_name_nested_deconstructible_extra_arg = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=DeconstructibleObject( DeconstructibleObject(1), ( DeconstructibleObject("t1"), DeconstructibleObject("t2"), ), None, a=DeconstructibleObject("A"), b=DeconstructibleObject(B=DeconstructibleObject("c")), ), ), ), ], ) author_name_nested_deconstructible_changed_kwarg = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=DeconstructibleObject( DeconstructibleObject(1), ( DeconstructibleObject("t1"), DeconstructibleObject("t2"), ), a=DeconstructibleObject("A"), b=DeconstructibleObject(B=DeconstructibleObject("c-changed")), ), ), ), ], ) author_name_nested_deconstructible_extra_kwarg = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, default=DeconstructibleObject( DeconstructibleObject(1), ( DeconstructibleObject("t1"), DeconstructibleObject("t2"), ), a=DeconstructibleObject("A"), b=DeconstructibleObject(B=DeconstructibleObject("c")), c=None, ), ), ), ], ) author_custom_pk = ModelState( "testapp", "Author", [("pk_field", models.IntegerField(primary_key=True))] ) author_with_biography_non_blank = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField()), ("biography", models.TextField()), ], ) author_with_biography_blank = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(blank=True)), ("biography", models.TextField(blank=True)), ], ) author_with_book = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book", models.CASCADE)), ], ) author_with_book_order_wrt = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book", models.CASCADE)), ], options={"order_with_respect_to": "book"}, ) author_renamed_with_book = ModelState( "testapp", "Writer", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book", models.CASCADE)), ], ) author_with_publisher_string = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("publisher_name", models.CharField(max_length=200)), ], ) author_with_publisher = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)), ], ) author_with_user = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("user", models.ForeignKey("auth.User", models.CASCADE)), ], ) author_with_custom_user = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("user", models.ForeignKey("thirdapp.CustomUser", models.CASCADE)), ], ) author_proxy = ModelState( "testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author",) ) author_proxy_options = ModelState( "testapp", "AuthorProxy", [], { "proxy": True, "verbose_name": "Super Author", }, ("testapp.author",), ) author_proxy_notproxy = ModelState( "testapp", "AuthorProxy", [], {}, ("testapp.author",) ) author_proxy_third = ModelState( "thirdapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author",) ) author_proxy_third_notproxy = ModelState( "thirdapp", "AuthorProxy", [], {}, ("testapp.author",) ) author_proxy_proxy = ModelState( "testapp", "AAuthorProxyProxy", [], {"proxy": True}, ("testapp.authorproxy",) ) author_unmanaged = ModelState( "testapp", "AuthorUnmanaged", [], {"managed": False}, ("testapp.author",) ) author_unmanaged_managed = ModelState( "testapp", "AuthorUnmanaged", [], {}, ("testapp.author",) ) author_unmanaged_default_pk = ModelState( "testapp", "Author", [("id", models.AutoField(primary_key=True))] ) author_unmanaged_custom_pk = ModelState( "testapp", "Author", [ ("pk_field", models.IntegerField(primary_key=True)), ], ) author_with_m2m = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("publishers", models.ManyToManyField("testapp.Publisher")), ], ) author_with_m2m_blank = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("publishers", models.ManyToManyField("testapp.Publisher", blank=True)), ], ) author_with_m2m_through = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "publishers", models.ManyToManyField("testapp.Publisher", through="testapp.Contract"), ), ], ) author_with_renamed_m2m_through = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "publishers", models.ManyToManyField("testapp.Publisher", through="testapp.Deal"), ), ], ) author_with_former_m2m = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("publishers", models.CharField(max_length=100)), ], ) author_with_options = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ], { "permissions": [("can_hire", "Can hire")], "verbose_name": "Authi", }, ) author_with_db_table_options = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ], {"db_table": "author_one"}, ) author_with_new_db_table_options = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ], {"db_table": "author_two"}, ) author_renamed_with_db_table_options = ModelState( "testapp", "NewAuthor", [ ("id", models.AutoField(primary_key=True)), ], {"db_table": "author_one"}, ) author_renamed_with_new_db_table_options = ModelState( "testapp", "NewAuthor", [ ("id", models.AutoField(primary_key=True)), ], {"db_table": "author_three"}, ) contract = ModelState( "testapp", "Contract", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)), ], ) contract_renamed = ModelState( "testapp", "Deal", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)), ], ) publisher = ModelState( "testapp", "Publisher", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ], ) publisher_with_author = ModelState( "testapp", "Publisher", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("name", models.CharField(max_length=100)), ], ) publisher_with_aardvark_author = ModelState( "testapp", "Publisher", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Aardvark", models.CASCADE)), ("name", models.CharField(max_length=100)), ], ) publisher_with_book = ModelState( "testapp", "Publisher", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("otherapp.Book", models.CASCADE)), ("name", models.CharField(max_length=100)), ], ) other_pony = ModelState( "otherapp", "Pony", [ ("id", models.AutoField(primary_key=True)), ], ) other_pony_food = ModelState( "otherapp", "Pony", [ ("id", models.AutoField(primary_key=True)), ], managers=[ ("food_qs", FoodQuerySet.as_manager()), ("food_mgr", FoodManager("a", "b")), ("food_mgr_kwargs", FoodManager("x", "y", 3, 4)), ], ) other_stable = ModelState( "otherapp", "Stable", [("id", models.AutoField(primary_key=True))] ) third_thing = ModelState( "thirdapp", "Thing", [("id", models.AutoField(primary_key=True))] ) book = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], ) book_proxy_fk = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("thirdapp.AuthorProxy", models.CASCADE)), ("title", models.CharField(max_length=200)), ], ) book_proxy_proxy_fk = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.AAuthorProxyProxy", models.CASCADE)), ], ) book_migrations_fk = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("migrations.UnmigratedModel", models.CASCADE)), ("title", models.CharField(max_length=200)), ], ) book_with_no_author_fk = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.IntegerField()), ("title", models.CharField(max_length=200)), ], ) book_with_no_author = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("title", models.CharField(max_length=200)), ], ) book_with_author_renamed = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Writer", models.CASCADE)), ("title", models.CharField(max_length=200)), ], ) book_with_field_and_author_renamed = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("writer", models.ForeignKey("testapp.Writer", models.CASCADE)), ("title", models.CharField(max_length=200)), ], ) book_with_multiple_authors = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("authors", models.ManyToManyField("testapp.Author")), ("title", models.CharField(max_length=200)), ], ) book_with_multiple_authors_through_attribution = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ( "authors", models.ManyToManyField( "testapp.Author", through="otherapp.Attribution" ), ), ("title", models.CharField(max_length=200)), ], ) book_indexes = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "indexes": [ models.Index(fields=["author", "title"], name="book_title_author_idx") ], }, ) book_unordered_indexes = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "indexes": [ models.Index(fields=["title", "author"], name="book_author_title_idx") ], }, ) book_unique_together = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "unique_together": {("author", "title")}, }, ) book_unique_together_2 = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "unique_together": {("title", "author")}, }, ) book_unique_together_3 = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("newfield", models.IntegerField()), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "unique_together": {("title", "newfield")}, }, ) book_unique_together_4 = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("newfield2", models.IntegerField()), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "unique_together": {("title", "newfield2")}, }, ) attribution = ModelState( "otherapp", "Attribution", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("book", models.ForeignKey("otherapp.Book", models.CASCADE)), ], ) edition = ModelState( "thirdapp", "Edition", [ ("id", models.AutoField(primary_key=True)), ("book", models.ForeignKey("otherapp.Book", models.CASCADE)), ], ) custom_user = ModelState( "thirdapp", "CustomUser", [ ("id", models.AutoField(primary_key=True)), ("username", models.CharField(max_length=255)), ], bases=(AbstractBaseUser,), ) custom_user_no_inherit = ModelState( "thirdapp", "CustomUser", [ ("id", models.AutoField(primary_key=True)), ("username", models.CharField(max_length=255)), ], ) aardvark = ModelState( "thirdapp", "Aardvark", [("id", models.AutoField(primary_key=True))] ) aardvark_testapp = ModelState( "testapp", "Aardvark", [("id", models.AutoField(primary_key=True))] ) aardvark_based_on_author = ModelState( "testapp", "Aardvark", [], bases=("testapp.Author",) ) aardvark_pk_fk_author = ModelState( "testapp", "Aardvark", [ ( "id", models.OneToOneField( "testapp.Author", models.CASCADE, primary_key=True ), ), ], ) knight = ModelState("eggs", "Knight", [("id", models.AutoField(primary_key=True))]) rabbit = ModelState( "eggs", "Rabbit", [ ("id", models.AutoField(primary_key=True)), ("knight", models.ForeignKey("eggs.Knight", models.CASCADE)), ("parent", models.ForeignKey("eggs.Rabbit", models.CASCADE)), ], { "unique_together": {("parent", "knight")}, "indexes": [ models.Index( fields=["parent", "knight"], name="rabbit_circular_fk_index" ) ], }, ) def test_arrange_for_graph(self): """Tests auto-naming of migrations for graph matching.""" # Make a fake graph graph = MigrationGraph() graph.add_node(("testapp", "0001_initial"), None) graph.add_node(("testapp", "0002_foobar"), None) graph.add_node(("otherapp", "0001_initial"), None) graph.add_dependency( "testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"), ) graph.add_dependency( "testapp.0002_foobar", ("testapp", "0002_foobar"), ("otherapp", "0001_initial"), ) # Use project state to make a new migration change set before = self.make_project_state([self.publisher, self.other_pony]) after = self.make_project_state( [ self.author_empty, self.publisher, self.other_pony, self.other_stable, ] ) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Run through arrange_for_graph changes = autodetector.arrange_for_graph(changes, graph) # Make sure there's a new name, deps match, etc. self.assertEqual(changes["testapp"][0].name, "0003_author") self.assertEqual( changes["testapp"][0].dependencies, [("testapp", "0002_foobar")] ) self.assertEqual(changes["otherapp"][0].name, "0002_stable") self.assertEqual( changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")] ) def test_arrange_for_graph_with_multiple_initial(self): # Make a fake graph. graph = MigrationGraph() # Use project state to make a new migration change set. before = self.make_project_state([]) after = self.make_project_state( [self.author_with_book, self.book, self.attribution] ) autodetector = MigrationAutodetector( before, after, MigrationQuestioner({"ask_initial": True}) ) changes = autodetector._detect_changes() changes = autodetector.arrange_for_graph(changes, graph) self.assertEqual(changes["otherapp"][0].name, "0001_initial") self.assertEqual(changes["otherapp"][0].dependencies, []) self.assertEqual(changes["otherapp"][1].name, "0002_initial") self.assertCountEqual( changes["otherapp"][1].dependencies, [("testapp", "0001_initial"), ("otherapp", "0001_initial")], ) self.assertEqual(changes["testapp"][0].name, "0001_initial") self.assertEqual( changes["testapp"][0].dependencies, [("otherapp", "0001_initial")] ) def test_trim_apps(self): """ Trim does not remove dependencies but does remove unwanted apps. """ # Use project state to make a new migration change set before = self.make_project_state([]) after = self.make_project_state( [self.author_empty, self.other_pony, self.other_stable, self.third_thing] ) autodetector = MigrationAutodetector( before, after, MigrationQuestioner({"ask_initial": True}) ) changes = autodetector._detect_changes() # Run through arrange_for_graph graph = MigrationGraph() changes = autodetector.arrange_for_graph(changes, graph) changes["testapp"][0].dependencies.append(("otherapp", "0001_initial")) changes = autodetector._trim_to_apps(changes, {"testapp"}) # Make sure there's the right set of migrations self.assertEqual(changes["testapp"][0].name, "0001_initial") self.assertEqual(changes["otherapp"][0].name, "0001_initial") self.assertNotIn("thirdapp", changes) def test_custom_migration_name(self): """Tests custom naming of migrations for graph matching.""" # Make a fake graph graph = MigrationGraph() graph.add_node(("testapp", "0001_initial"), None) graph.add_node(("testapp", "0002_foobar"), None) graph.add_node(("otherapp", "0001_initial"), None) graph.add_dependency( "testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"), ) # Use project state to make a new migration change set before = self.make_project_state([]) after = self.make_project_state( [self.author_empty, self.other_pony, self.other_stable] ) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Run through arrange_for_graph migration_name = "custom_name" changes = autodetector.arrange_for_graph(changes, graph, migration_name) # Make sure there's a new name, deps match, etc. self.assertEqual(changes["testapp"][0].name, "0003_%s" % migration_name) self.assertEqual( changes["testapp"][0].dependencies, [("testapp", "0002_foobar")] ) self.assertEqual(changes["otherapp"][0].name, "0002_%s" % migration_name) self.assertEqual( changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")] ) def test_new_model(self): """Tests autodetection of new models.""" changes = self.get_changes([], [self.other_pony_food]) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Pony") self.assertEqual( [name for name, mgr in changes["otherapp"][0].operations[0].managers], ["food_qs", "food_mgr", "food_mgr_kwargs"], ) def test_old_model(self): """Tests deletion of old models.""" changes = self.get_changes([self.author_empty], []) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") def test_add_field(self): """Tests autodetection of new fields.""" changes = self.get_changes([self.author_empty], [self.author_name]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AddField"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="name") @mock.patch( "django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition", side_effect=AssertionError("Should not have prompted for not null addition"), ) def test_add_date_fields_with_auto_now_not_asking_for_default( self, mocked_ask_method ): changes = self.get_changes( [self.author_empty], [self.author_dates_of_birth_auto_now] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["AddField", "AddField", "AddField"] ) self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now=True) self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now=True) self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now=True) @mock.patch( "django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition", side_effect=AssertionError("Should not have prompted for not null addition"), ) def test_add_date_fields_with_auto_now_add_not_asking_for_null_addition( self, mocked_ask_method ): changes = self.get_changes( [self.author_empty], [self.author_dates_of_birth_auto_now_add] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["AddField", "AddField", "AddField"] ) self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now_add=True) self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now_add=True) self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now_add=True) @mock.patch( "django.db.migrations.questioner.MigrationQuestioner.ask_auto_now_add_addition" ) def test_add_date_fields_with_auto_now_add_asking_for_default( self, mocked_ask_method ): changes = self.get_changes( [self.author_empty], [self.author_dates_of_birth_auto_now_add] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["AddField", "AddField", "AddField"] ) self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now_add=True) self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now_add=True) self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now_add=True) self.assertEqual(mocked_ask_method.call_count, 3) def test_remove_field(self): """Tests autodetection of removed fields.""" changes = self.get_changes([self.author_name], [self.author_empty]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["RemoveField"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="name") def test_alter_field(self): """Tests autodetection of new fields.""" changes = self.get_changes([self.author_name], [self.author_name_longer]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterField"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="name", preserve_default=True ) def test_supports_functools_partial(self): def _content_file_name(instance, filename, key, **kwargs): return "{}/{}".format(instance, filename) def content_file_name(key, **kwargs): return functools.partial(_content_file_name, key, **kwargs) # An unchanged partial reference. before = [ ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "file", models.FileField( max_length=200, upload_to=content_file_name("file") ), ), ], ) ] after = [ ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "file", models.FileField( max_length=200, upload_to=content_file_name("file") ), ), ], ) ] changes = self.get_changes(before, after) self.assertNumberMigrations(changes, "testapp", 0) # A changed partial reference. args_changed = [ ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "file", models.FileField( max_length=200, upload_to=content_file_name("other-file") ), ), ], ) ] changes = self.get_changes(before, args_changed) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterField"]) # Can't use assertOperationFieldAttributes because we need the # deconstructed version, i.e., the exploded func/args/keywords rather # than the partial: we don't care if it's not the same instance of the # partial, only if it's the same source function, args, and keywords. value = changes["testapp"][0].operations[0].field.upload_to self.assertEqual( (_content_file_name, ("other-file",), {}), (value.func, value.args, value.keywords), ) kwargs_changed = [ ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "file", models.FileField( max_length=200, upload_to=content_file_name("file", spam="eggs"), ), ), ], ) ] changes = self.get_changes(before, kwargs_changed) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterField"]) value = changes["testapp"][0].operations[0].field.upload_to self.assertEqual( (_content_file_name, ("file",), {"spam": "eggs"}), (value.func, value.args, value.keywords), ) @mock.patch( "django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration", side_effect=AssertionError("Should not have prompted for not null addition"), ) def test_alter_field_to_not_null_with_default(self, mocked_ask_method): """ #23609 - Tests autodetection of nullable to non-nullable alterations. """ changes = self.get_changes([self.author_name_null], [self.author_name_default]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterField"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="name", preserve_default=True ) self.assertOperationFieldAttributes( changes, "testapp", 0, 0, default="Ada Lovelace" ) @mock.patch( "django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration", return_value=models.NOT_PROVIDED, ) def test_alter_field_to_not_null_without_default(self, mocked_ask_method): """ #23609 - Tests autodetection of nullable to non-nullable alterations. """ changes = self.get_changes([self.author_name_null], [self.author_name]) self.assertEqual(mocked_ask_method.call_count, 1) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterField"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="name", preserve_default=True ) self.assertOperationFieldAttributes( changes, "testapp", 0, 0, default=models.NOT_PROVIDED ) @mock.patch( "django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration", return_value="Some Name", ) def test_alter_field_to_not_null_oneoff_default(self, mocked_ask_method): """ #23609 - Tests autodetection of nullable to non-nullable alterations. """ changes = self.get_changes([self.author_name_null], [self.author_name]) self.assertEqual(mocked_ask_method.call_count, 1) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterField"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="name", preserve_default=False ) self.assertOperationFieldAttributes( changes, "testapp", 0, 0, default="Some Name" ) def test_rename_field(self): """Tests autodetection of renamed fields.""" changes = self.get_changes( [self.author_name], [self.author_name_renamed], MigrationQuestioner({"ask_rename": True}), ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["RenameField"]) self.assertOperationAttributes( changes, "testapp", 0, 0, old_name="name", new_name="names" ) def test_rename_field_foreign_key_to_field(self): before = [ ModelState( "app", "Foo", [ ("id", models.AutoField(primary_key=True)), ("field", models.IntegerField(unique=True)), ], ), ModelState( "app", "Bar", [ ("id", models.AutoField(primary_key=True)), ( "foo", models.ForeignKey("app.Foo", models.CASCADE, to_field="field"), ), ], ), ] after = [ ModelState( "app", "Foo", [ ("id", models.AutoField(primary_key=True)), ("renamed_field", models.IntegerField(unique=True)), ], ), ModelState( "app", "Bar", [ ("id", models.AutoField(primary_key=True)), ( "foo", models.ForeignKey( "app.Foo", models.CASCADE, to_field="renamed_field" ), ), ], ), ] changes = self.get_changes( before, after, MigrationQuestioner({"ask_rename": True}) ) # Right number/type of migrations? self.assertNumberMigrations(changes, "app", 1) self.assertOperationTypes(changes, "app", 0, ["RenameField"]) self.assertOperationAttributes( changes, "app", 0, 0, old_name="field", new_name="renamed_field" ) def test_rename_foreign_object_fields(self): fields = ("first", "second") renamed_fields = ("first_renamed", "second_renamed") before = [ ModelState( "app", "Foo", [ ("id", models.AutoField(primary_key=True)), ("first", models.IntegerField()), ("second", models.IntegerField()), ], options={"unique_together": {fields}}, ), ModelState( "app", "Bar", [ ("id", models.AutoField(primary_key=True)), ("first", models.IntegerField()), ("second", models.IntegerField()), ( "foo", models.ForeignObject( "app.Foo", models.CASCADE, from_fields=fields, to_fields=fields, ), ), ], ), ] # Case 1: to_fields renames. after = [ ModelState( "app", "Foo", [ ("id", models.AutoField(primary_key=True)), ("first_renamed", models.IntegerField()), ("second_renamed", models.IntegerField()), ], options={"unique_together": {renamed_fields}}, ), ModelState( "app", "Bar", [ ("id", models.AutoField(primary_key=True)), ("first", models.IntegerField()), ("second", models.IntegerField()), ( "foo", models.ForeignObject( "app.Foo", models.CASCADE, from_fields=fields, to_fields=renamed_fields, ), ), ], ), ] changes = self.get_changes( before, after, MigrationQuestioner({"ask_rename": True}) ) self.assertNumberMigrations(changes, "app", 1) self.assertOperationTypes( changes, "app", 0, ["RenameField", "RenameField", "AlterUniqueTogether"] ) self.assertOperationAttributes( changes, "app", 0, 0, model_name="foo", old_name="first", new_name="first_renamed", ) self.assertOperationAttributes( changes, "app", 0, 1, model_name="foo", old_name="second", new_name="second_renamed", ) # Case 2: from_fields renames. after = [ ModelState( "app", "Foo", [ ("id", models.AutoField(primary_key=True)), ("first", models.IntegerField()), ("second", models.IntegerField()), ], options={"unique_together": {fields}}, ), ModelState( "app", "Bar", [ ("id", models.AutoField(primary_key=True)), ("first_renamed", models.IntegerField()), ("second_renamed", models.IntegerField()), ( "foo", models.ForeignObject( "app.Foo", models.CASCADE, from_fields=renamed_fields, to_fields=fields, ), ), ], ), ] changes = self.get_changes( before, after, MigrationQuestioner({"ask_rename": True}) ) self.assertNumberMigrations(changes, "app", 1) self.assertOperationTypes(changes, "app", 0, ["RenameField", "RenameField"]) self.assertOperationAttributes( changes, "app", 0, 0, model_name="bar", old_name="first", new_name="first_renamed", ) self.assertOperationAttributes( changes, "app", 0, 1, model_name="bar", old_name="second", new_name="second_renamed", ) def test_rename_referenced_primary_key(self): before = [ ModelState( "app", "Foo", [ ("id", models.CharField(primary_key=True, serialize=False)), ], ), ModelState( "app", "Bar", [ ("id", models.AutoField(primary_key=True)), ("foo", models.ForeignKey("app.Foo", models.CASCADE)), ], ), ] after = [ ModelState( "app", "Foo", [("renamed_id", models.CharField(primary_key=True, serialize=False))], ), ModelState( "app", "Bar", [ ("id", models.AutoField(primary_key=True)), ("foo", models.ForeignKey("app.Foo", models.CASCADE)), ], ), ] changes = self.get_changes( before, after, MigrationQuestioner({"ask_rename": True}) ) self.assertNumberMigrations(changes, "app", 1) self.assertOperationTypes(changes, "app", 0, ["RenameField"]) self.assertOperationAttributes( changes, "app", 0, 0, old_name="id", new_name="renamed_id" ) def test_rename_field_preserved_db_column(self): """ RenameField is used if a field is renamed and db_column equal to the old field's column is added. """ before = [ ModelState( "app", "Foo", [ ("id", models.AutoField(primary_key=True)), ("field", models.IntegerField()), ], ), ] after = [ ModelState( "app", "Foo", [ ("id", models.AutoField(primary_key=True)), ("renamed_field", models.IntegerField(db_column="field")), ], ), ] changes = self.get_changes( before, after, MigrationQuestioner({"ask_rename": True}) ) self.assertNumberMigrations(changes, "app", 1) self.assertOperationTypes(changes, "app", 0, ["AlterField", "RenameField"]) self.assertOperationAttributes( changes, "app", 0, 0, model_name="foo", name="field", ) self.assertEqual( changes["app"][0].operations[0].field.deconstruct(), ( "field", "django.db.models.IntegerField", [], {"db_column": "field"}, ), ) self.assertOperationAttributes( changes, "app", 0, 1, model_name="foo", old_name="field", new_name="renamed_field", ) def test_rename_related_field_preserved_db_column(self): before = [ ModelState( "app", "Foo", [ ("id", models.AutoField(primary_key=True)), ], ), ModelState( "app", "Bar", [ ("id", models.AutoField(primary_key=True)), ("foo", models.ForeignKey("app.Foo", models.CASCADE)), ], ), ] after = [ ModelState( "app", "Foo", [ ("id", models.AutoField(primary_key=True)), ], ), ModelState( "app", "Bar", [ ("id", models.AutoField(primary_key=True)), ( "renamed_foo", models.ForeignKey( "app.Foo", models.CASCADE, db_column="foo_id" ), ), ], ), ] changes = self.get_changes( before, after, MigrationQuestioner({"ask_rename": True}) ) self.assertNumberMigrations(changes, "app", 1) self.assertOperationTypes(changes, "app", 0, ["AlterField", "RenameField"]) self.assertOperationAttributes( changes, "app", 0, 0, model_name="bar", name="foo", ) self.assertEqual( changes["app"][0].operations[0].field.deconstruct(), ( "foo", "django.db.models.ForeignKey", [], {"to": "app.foo", "on_delete": models.CASCADE, "db_column": "foo_id"}, ), ) self.assertOperationAttributes( changes, "app", 0, 1, model_name="bar", old_name="foo", new_name="renamed_foo", ) def test_rename_field_with_renamed_model(self): changes = self.get_changes( [self.author_name], [ ModelState( "testapp", "RenamedAuthor", [ ("id", models.AutoField(primary_key=True)), ("renamed_name", models.CharField(max_length=200)), ], ), ], MigrationQuestioner({"ask_rename_model": True, "ask_rename": True}), ) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["RenameModel", "RenameField"]) self.assertOperationAttributes( changes, "testapp", 0, 0, old_name="Author", new_name="RenamedAuthor", ) self.assertOperationAttributes( changes, "testapp", 0, 1, old_name="name", new_name="renamed_name", ) def test_rename_model(self): """Tests autodetection of renamed models.""" changes = self.get_changes( [self.author_with_book, self.book], [self.author_renamed_with_book, self.book_with_author_renamed], MigrationQuestioner({"ask_rename_model": True}), ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, old_name="Author", new_name="Writer" ) # Now that RenameModel handles related fields too, there should be # no AlterField for the related field. self.assertNumberMigrations(changes, "otherapp", 0) def test_rename_model_case(self): """ Model name is case-insensitive. Changing case doesn't lead to any autodetected operations. """ author_renamed = ModelState( "testapp", "author", [ ("id", models.AutoField(primary_key=True)), ], ) changes = self.get_changes( [self.author_empty, self.book], [author_renamed, self.book], questioner=MigrationQuestioner({"ask_rename_model": True}), ) self.assertNumberMigrations(changes, "testapp", 0) self.assertNumberMigrations(changes, "otherapp", 0) def test_renamed_referenced_m2m_model_case(self): publisher_renamed = ModelState( "testapp", "publisher", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ], ) changes = self.get_changes( [self.publisher, self.author_with_m2m], [publisher_renamed, self.author_with_m2m], questioner=MigrationQuestioner({"ask_rename_model": True}), ) self.assertNumberMigrations(changes, "testapp", 0) self.assertNumberMigrations(changes, "otherapp", 0) def test_rename_m2m_through_model(self): """ Tests autodetection of renamed models that are used in M2M relations as through models. """ changes = self.get_changes( [self.author_with_m2m_through, self.publisher, self.contract], [ self.author_with_renamed_m2m_through, self.publisher, self.contract_renamed, ], MigrationQuestioner({"ask_rename_model": True}), ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, old_name="Contract", new_name="Deal" ) def test_rename_model_with_renamed_rel_field(self): """ Tests autodetection of renamed models while simultaneously renaming one of the fields that relate to the renamed model. """ changes = self.get_changes( [self.author_with_book, self.book], [self.author_renamed_with_book, self.book_with_field_and_author_renamed], MigrationQuestioner({"ask_rename": True, "ask_rename_model": True}), ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, old_name="Author", new_name="Writer" ) # Right number/type of migrations for related field rename? # Alter is already taken care of. self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["RenameField"]) self.assertOperationAttributes( changes, "otherapp", 0, 0, old_name="author", new_name="writer" ) def test_rename_model_with_fks_in_different_position(self): """ #24537 - The order of fields in a model does not influence the RenameModel detection. """ before = [ ModelState( "testapp", "EntityA", [ ("id", models.AutoField(primary_key=True)), ], ), ModelState( "testapp", "EntityB", [ ("id", models.AutoField(primary_key=True)), ("some_label", models.CharField(max_length=255)), ("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)), ], ), ] after = [ ModelState( "testapp", "EntityA", [ ("id", models.AutoField(primary_key=True)), ], ), ModelState( "testapp", "RenamedEntityB", [ ("id", models.AutoField(primary_key=True)), ("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)), ("some_label", models.CharField(max_length=255)), ], ), ] changes = self.get_changes( before, after, MigrationQuestioner({"ask_rename_model": True}) ) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, old_name="EntityB", new_name="RenamedEntityB" ) def test_rename_model_reverse_relation_dependencies(self): """ The migration to rename a model pointed to by a foreign key in another app must run after the other app's migration that adds the foreign key with model's original name. Therefore, the renaming migration has a dependency on that other migration. """ before = [ ModelState( "testapp", "EntityA", [ ("id", models.AutoField(primary_key=True)), ], ), ModelState( "otherapp", "EntityB", [ ("id", models.AutoField(primary_key=True)), ("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)), ], ), ] after = [ ModelState( "testapp", "RenamedEntityA", [ ("id", models.AutoField(primary_key=True)), ], ), ModelState( "otherapp", "EntityB", [ ("id", models.AutoField(primary_key=True)), ( "entity_a", models.ForeignKey("testapp.RenamedEntityA", models.CASCADE), ), ], ), ] changes = self.get_changes( before, after, MigrationQuestioner({"ask_rename_model": True}) ) self.assertNumberMigrations(changes, "testapp", 1) self.assertMigrationDependencies( changes, "testapp", 0, [("otherapp", "__first__")] ) self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, old_name="EntityA", new_name="RenamedEntityA" ) def test_fk_dependency(self): """Having a ForeignKey automatically adds a dependency.""" # Note that testapp (author) has no dependencies, # otherapp (book) depends on testapp (author), # thirdapp (edition) depends on otherapp (book) changes = self.get_changes([], [self.author_name, self.book, self.edition]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") self.assertMigrationDependencies(changes, "testapp", 0, []) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book") self.assertMigrationDependencies( changes, "otherapp", 0, [("testapp", "auto_1")] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "thirdapp", 1) self.assertOperationTypes(changes, "thirdapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="Edition") self.assertMigrationDependencies( changes, "thirdapp", 0, [("otherapp", "auto_1")] ) def test_proxy_fk_dependency(self): """FK dependencies still work on proxy models.""" # Note that testapp (author) has no dependencies, # otherapp (book) depends on testapp (authorproxy) changes = self.get_changes( [], [self.author_empty, self.author_proxy_third, self.book_proxy_fk] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") self.assertMigrationDependencies(changes, "testapp", 0, []) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book") self.assertMigrationDependencies( changes, "otherapp", 0, [("thirdapp", "auto_1")] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "thirdapp", 1) self.assertOperationTypes(changes, "thirdapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="AuthorProxy") self.assertMigrationDependencies( changes, "thirdapp", 0, [("testapp", "auto_1")] ) def test_same_app_no_fk_dependency(self): """ A migration with a FK between two models of the same app does not have a dependency to itself. """ changes = self.get_changes([], [self.author_with_publisher, self.publisher]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher") self.assertOperationAttributes(changes, "testapp", 0, 1, name="Author") self.assertMigrationDependencies(changes, "testapp", 0, []) def test_circular_fk_dependency(self): """ Having a circular ForeignKey dependency automatically resolves the situation into 2 migrations on one side and 1 on the other. """ changes = self.get_changes( [], [self.author_with_book, self.book, self.publisher_with_book] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher") self.assertOperationAttributes(changes, "testapp", 0, 1, name="Author") self.assertMigrationDependencies( changes, "testapp", 0, [("otherapp", "auto_1")] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 2) self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"]) self.assertOperationTypes(changes, "otherapp", 1, ["AddField"]) self.assertMigrationDependencies(changes, "otherapp", 0, []) self.assertMigrationDependencies( changes, "otherapp", 1, [("otherapp", "auto_1"), ("testapp", "auto_1")] ) # both split migrations should be `initial` self.assertTrue(changes["otherapp"][0].initial) self.assertTrue(changes["otherapp"][1].initial) def test_same_app_circular_fk_dependency(self): """ A migration with a FK between two models of the same app does not have a dependency to itself. """ changes = self.get_changes( [], [self.author_with_publisher, self.publisher_with_author] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["CreateModel", "CreateModel", "AddField"] ) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher") self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher") self.assertMigrationDependencies(changes, "testapp", 0, []) def test_same_app_circular_fk_dependency_with_unique_together_and_indexes(self): """ #22275 - A migration with circular FK dependency does not try to create unique together constraint and indexes before creating all required fields first. """ changes = self.get_changes([], [self.knight, self.rabbit]) # Right number/type of migrations? self.assertNumberMigrations(changes, "eggs", 1) self.assertOperationTypes( changes, "eggs", 0, ["CreateModel", "CreateModel", "AddIndex", "AlterUniqueTogether"], ) self.assertNotIn("unique_together", changes["eggs"][0].operations[0].options) self.assertNotIn("unique_together", changes["eggs"][0].operations[1].options) self.assertMigrationDependencies(changes, "eggs", 0, []) def test_alter_db_table_add(self): """Tests detection for adding db_table in model's options.""" changes = self.get_changes( [self.author_empty], [self.author_with_db_table_options] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTable"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", table="author_one" ) def test_alter_db_table_change(self): """Tests detection for changing db_table in model's options'.""" changes = self.get_changes( [self.author_with_db_table_options], [self.author_with_new_db_table_options] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTable"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", table="author_two" ) def test_alter_db_table_remove(self): """Tests detection for removing db_table in model's options.""" changes = self.get_changes( [self.author_with_db_table_options], [self.author_empty] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterModelTable"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", table=None ) def test_alter_db_table_no_changes(self): """ Alter_db_table doesn't generate a migration if no changes have been made. """ changes = self.get_changes( [self.author_with_db_table_options], [self.author_with_db_table_options] ) # Right number of migrations? self.assertEqual(len(changes), 0) def test_keep_db_table_with_model_change(self): """ Tests when model changes but db_table stays as-is, autodetector must not create more than one operation. """ changes = self.get_changes( [self.author_with_db_table_options], [self.author_renamed_with_db_table_options], MigrationQuestioner({"ask_rename_model": True}), ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor" ) def test_alter_db_table_with_model_change(self): """ Tests when model and db_table changes, autodetector must create two operations. """ changes = self.get_changes( [self.author_with_db_table_options], [self.author_renamed_with_new_db_table_options], MigrationQuestioner({"ask_rename_model": True}), ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["RenameModel", "AlterModelTable"] ) self.assertOperationAttributes( changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor" ) self.assertOperationAttributes( changes, "testapp", 0, 1, name="newauthor", table="author_three" ) def test_identical_regex_doesnt_alter(self): from_state = ModelState( "testapp", "model", [ ( "id", models.AutoField( primary_key=True, validators=[ RegexValidator( re.compile("^[-a-zA-Z0-9_]+\\Z"), "Enter a valid “slug” consisting of letters, numbers, " "underscores or hyphens.", "invalid", ) ], ), ) ], ) to_state = ModelState( "testapp", "model", [("id", models.AutoField(primary_key=True, validators=[validate_slug]))], ) changes = self.get_changes([from_state], [to_state]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 0) def test_different_regex_does_alter(self): from_state = ModelState( "testapp", "model", [ ( "id", models.AutoField( primary_key=True, validators=[ RegexValidator( re.compile("^[a-z]+\\Z", 32), "Enter a valid “slug” consisting of letters, numbers, " "underscores or hyphens.", "invalid", ) ], ), ) ], ) to_state = ModelState( "testapp", "model", [("id", models.AutoField(primary_key=True, validators=[validate_slug]))], ) changes = self.get_changes([from_state], [to_state]) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterField"]) def test_alter_regex_string_to_compiled_regex(self): regex_string = "^[a-z]+$" from_state = ModelState( "testapp", "model", [ ( "id", models.AutoField( primary_key=True, validators=[RegexValidator(regex_string)] ), ) ], ) to_state = ModelState( "testapp", "model", [ ( "id", models.AutoField( primary_key=True, validators=[RegexValidator(re.compile(regex_string))], ), ) ], ) changes = self.get_changes([from_state], [to_state]) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterField"]) def test_empty_unique_together(self): """Empty unique_together shouldn't generate a migration.""" # Explicitly testing for not specified, since this is the case after # a CreateModel operation w/o any definition on the original model model_state_not_specified = ModelState( "a", "model", [("id", models.AutoField(primary_key=True))] ) # Explicitly testing for None, since this was the issue in #23452 after # an AlterUniqueTogether operation with e.g. () as value model_state_none = ModelState( "a", "model", [("id", models.AutoField(primary_key=True))], { "unique_together": None, }, ) # Explicitly testing for the empty set, since we now always have sets. # During removal (('col1', 'col2'),) --> () this becomes set([]) model_state_empty = ModelState( "a", "model", [("id", models.AutoField(primary_key=True))], { "unique_together": set(), }, ) def test(from_state, to_state, msg): changes = self.get_changes([from_state], [to_state]) if changes: ops = ", ".join( o.__class__.__name__ for o in changes["a"][0].operations ) self.fail("Created operation(s) %s from %s" % (ops, msg)) tests = ( ( model_state_not_specified, model_state_not_specified, '"not specified" to "not specified"', ), (model_state_not_specified, model_state_none, '"not specified" to "None"'), ( model_state_not_specified, model_state_empty, '"not specified" to "empty"', ), (model_state_none, model_state_not_specified, '"None" to "not specified"'), (model_state_none, model_state_none, '"None" to "None"'), (model_state_none, model_state_empty, '"None" to "empty"'), ( model_state_empty, model_state_not_specified, '"empty" to "not specified"', ), (model_state_empty, model_state_none, '"empty" to "None"'), (model_state_empty, model_state_empty, '"empty" to "empty"'), ) for t in tests: test(*t) def test_create_model_with_indexes(self): """Test creation of new model with indexes already defined.""" author = ModelState( "otherapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ], { "indexes": [ models.Index(fields=["name"], name="create_model_with_indexes_idx") ] }, ) changes = self.get_changes([], [author]) added_index = models.Index( fields=["name"], name="create_model_with_indexes_idx" ) # Right number of migrations? self.assertEqual(len(changes["otherapp"]), 1) # Right number of actions? migration = changes["otherapp"][0] self.assertEqual(len(migration.operations), 2) # Right actions order? self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel", "AddIndex"]) self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Author") self.assertOperationAttributes( changes, "otherapp", 0, 1, model_name="author", index=added_index ) def test_add_indexes(self): """Test change detection of new indexes.""" changes = self.get_changes( [self.author_empty, self.book], [self.author_empty, self.book_indexes] ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["AddIndex"]) added_index = models.Index( fields=["author", "title"], name="book_title_author_idx" ) self.assertOperationAttributes( changes, "otherapp", 0, 0, model_name="book", index=added_index ) def test_remove_indexes(self): """Test change detection of removed indexes.""" changes = self.get_changes( [self.author_empty, self.book_indexes], [self.author_empty, self.book] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["RemoveIndex"]) self.assertOperationAttributes( changes, "otherapp", 0, 0, model_name="book", name="book_title_author_idx" ) def test_rename_indexes(self): book_renamed_indexes = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "indexes": [ models.Index( fields=["author", "title"], name="renamed_book_title_author_idx" ) ], }, ) changes = self.get_changes( [self.author_empty, self.book_indexes], [self.author_empty, book_renamed_indexes], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["RenameIndex"]) self.assertOperationAttributes( changes, "otherapp", 0, 0, model_name="book", new_name="renamed_book_title_author_idx", old_name="book_title_author_idx", ) def test_order_fields_indexes(self): """Test change detection of reordering of fields in indexes.""" changes = self.get_changes( [self.author_empty, self.book_indexes], [self.author_empty, self.book_unordered_indexes], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["RemoveIndex", "AddIndex"]) self.assertOperationAttributes( changes, "otherapp", 0, 0, model_name="book", name="book_title_author_idx" ) added_index = models.Index( fields=["title", "author"], name="book_author_title_idx" ) self.assertOperationAttributes( changes, "otherapp", 0, 1, model_name="book", index=added_index ) def test_create_model_with_check_constraint(self): """Test creation of new model with constraints already defined.""" author = ModelState( "otherapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ], { "constraints": [ models.CheckConstraint( check=models.Q(name__contains="Bob"), name="name_contains_bob" ) ] }, ) changes = self.get_changes([], [author]) added_constraint = models.CheckConstraint( check=models.Q(name__contains="Bob"), name="name_contains_bob" ) # Right number of migrations? self.assertEqual(len(changes["otherapp"]), 1) # Right number of actions? migration = changes["otherapp"][0] self.assertEqual(len(migration.operations), 2) # Right actions order? self.assertOperationTypes( changes, "otherapp", 0, ["CreateModel", "AddConstraint"] ) self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Author") self.assertOperationAttributes( changes, "otherapp", 0, 1, model_name="author", constraint=added_constraint ) def test_add_constraints(self): """Test change detection of new constraints.""" changes = self.get_changes( [self.author_name], [self.author_name_check_constraint] ) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AddConstraint"]) added_constraint = models.CheckConstraint( check=models.Q(name__contains="Bob"), name="name_contains_bob" ) self.assertOperationAttributes( changes, "testapp", 0, 0, model_name="author", constraint=added_constraint ) def test_remove_constraints(self): """Test change detection of removed constraints.""" changes = self.get_changes( [self.author_name_check_constraint], [self.author_name] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["RemoveConstraint"]) self.assertOperationAttributes( changes, "testapp", 0, 0, model_name="author", name="name_contains_bob" ) def test_add_unique_together(self): """Tests unique_together detection.""" changes = self.get_changes( [self.author_empty, self.book], [self.author_empty, self.book_unique_together], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether"]) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="book", unique_together={("author", "title")}, ) def test_remove_unique_together(self): """Tests unique_together detection.""" changes = self.get_changes( [self.author_empty, self.book_unique_together], [self.author_empty, self.book], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether"]) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="book", unique_together=set() ) def test_unique_together_remove_fk(self): """Tests unique_together and field removal detection & ordering""" changes = self.get_changes( [self.author_empty, self.book_unique_together], [self.author_empty, self.book_with_no_author], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["AlterUniqueTogether", "RemoveField"], ) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="book", unique_together=set() ) self.assertOperationAttributes( changes, "otherapp", 0, 1, model_name="book", name="author" ) def test_unique_together_no_changes(self): """ unique_together doesn't generate a migration if no changes have been made. """ changes = self.get_changes( [self.author_empty, self.book_unique_together], [self.author_empty, self.book_unique_together], ) # Right number of migrations? self.assertEqual(len(changes), 0) def test_unique_together_ordering(self): """ unique_together also triggers on ordering changes. """ changes = self.get_changes( [self.author_empty, self.book_unique_together], [self.author_empty, self.book_unique_together_2], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["AlterUniqueTogether"], ) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="book", unique_together={("title", "author")}, ) def test_add_field_and_unique_together(self): """ Added fields will be created before using them in unique_together. """ changes = self.get_changes( [self.author_empty, self.book], [self.author_empty, self.book_unique_together_3], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["AddField", "AlterUniqueTogether"], ) self.assertOperationAttributes( changes, "otherapp", 0, 1, name="book", unique_together={("title", "newfield")}, ) def test_create_model_and_unique_together(self): author = ModelState( "otherapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ], ) book_with_author = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("otherapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "unique_together": {("title", "author")}, }, ) changes = self.get_changes( [self.book_with_no_author], [author, book_with_author] ) # Right number of migrations? self.assertEqual(len(changes["otherapp"]), 1) # Right number of actions? migration = changes["otherapp"][0] self.assertEqual(len(migration.operations), 3) # Right actions order? self.assertOperationTypes( changes, "otherapp", 0, ["CreateModel", "AddField", "AlterUniqueTogether"], ) def test_remove_field_and_unique_together(self): """ Removed fields will be removed after updating unique_together. """ changes = self.get_changes( [self.author_empty, self.book_unique_together_3], [self.author_empty, self.book_unique_together], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["AlterUniqueTogether", "RemoveField"], ) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="book", unique_together={("author", "title")}, ) self.assertOperationAttributes( changes, "otherapp", 0, 1, model_name="book", name="newfield", ) def test_alter_field_and_unique_together(self): """Fields are altered after deleting some unique_together.""" initial_author = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("age", models.IntegerField(db_index=True)), ], { "unique_together": {("name",)}, }, ) author_reversed_constraints = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, unique=True)), ("age", models.IntegerField()), ], { "unique_together": {("age",)}, }, ) changes = self.get_changes([initial_author], [author_reversed_constraints]) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, [ "AlterUniqueTogether", "AlterField", "AlterField", "AlterUniqueTogether", ], ) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", unique_together=set(), ) self.assertOperationAttributes( changes, "testapp", 0, 1, model_name="author", name="age", ) self.assertOperationAttributes( changes, "testapp", 0, 2, model_name="author", name="name", ) self.assertOperationAttributes( changes, "testapp", 0, 3, name="author", unique_together={("age",)}, ) def test_partly_alter_unique_together_increase(self): initial_author = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("age", models.IntegerField()), ], { "unique_together": {("name",)}, }, ) author_new_constraints = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("age", models.IntegerField()), ], { "unique_together": {("name",), ("age",)}, }, ) changes = self.get_changes([initial_author], [author_new_constraints]) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["AlterUniqueTogether"], ) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", unique_together={("name",), ("age",)}, ) def test_partly_alter_unique_together_decrease(self): initial_author = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("age", models.IntegerField()), ], { "unique_together": {("name",), ("age",)}, }, ) author_new_constraints = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("age", models.IntegerField()), ], { "unique_together": {("name",)}, }, ) changes = self.get_changes([initial_author], [author_new_constraints]) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["AlterUniqueTogether"], ) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", unique_together={("name",)}, ) def test_rename_field_and_unique_together(self): """Fields are renamed before updating unique_together.""" changes = self.get_changes( [self.author_empty, self.book_unique_together_3], [self.author_empty, self.book_unique_together_4], MigrationQuestioner({"ask_rename": True}), ) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["RenameField", "AlterUniqueTogether"], ) self.assertOperationAttributes( changes, "otherapp", 0, 1, name="book", unique_together={("title", "newfield2")}, ) def test_proxy(self): """The autodetector correctly deals with proxy models.""" # First, we test adding a proxy model changes = self.get_changes( [self.author_empty], [self.author_empty, self.author_proxy] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="AuthorProxy", options={"proxy": True, "indexes": [], "constraints": []}, ) # Now, we test turning a proxy model into a non-proxy model # It should delete the proxy then make the real one changes = self.get_changes( [self.author_empty, self.author_proxy], [self.author_empty, self.author_proxy_notproxy], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy") self.assertOperationAttributes( changes, "testapp", 0, 1, name="AuthorProxy", options={} ) def test_proxy_non_model_parent(self): class Mixin: pass author_proxy_non_model_parent = ModelState( "testapp", "AuthorProxy", [], {"proxy": True}, (Mixin, "testapp.author"), ) changes = self.get_changes( [self.author_empty], [self.author_empty, author_proxy_non_model_parent], ) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="AuthorProxy", options={"proxy": True, "indexes": [], "constraints": []}, bases=(Mixin, "testapp.author"), ) def test_proxy_custom_pk(self): """ #23415 - The autodetector must correctly deal with custom FK on proxy models. """ # First, we test the default pk field name changes = self.get_changes( [], [self.author_empty, self.author_proxy_third, self.book_proxy_fk] ) # The model the FK is pointing from and to. self.assertEqual( changes["otherapp"][0].operations[0].fields[2][1].remote_field.model, "thirdapp.AuthorProxy", ) # Now, we test the custom pk field name changes = self.get_changes( [], [self.author_custom_pk, self.author_proxy_third, self.book_proxy_fk] ) # The model the FK is pointing from and to. self.assertEqual( changes["otherapp"][0].operations[0].fields[2][1].remote_field.model, "thirdapp.AuthorProxy", ) def test_proxy_to_mti_with_fk_to_proxy(self): # First, test the pk table and field name. to_state = self.make_project_state( [self.author_empty, self.author_proxy_third, self.book_proxy_fk], ) changes = self.get_changes([], to_state) fk_field = changes["otherapp"][0].operations[0].fields[2][1] self.assertEqual( to_state.get_concrete_model_key(fk_field.remote_field.model), ("testapp", "author"), ) self.assertEqual(fk_field.remote_field.model, "thirdapp.AuthorProxy") # Change AuthorProxy to use MTI. from_state = to_state.clone() to_state = self.make_project_state( [self.author_empty, self.author_proxy_third_notproxy, self.book_proxy_fk], ) changes = self.get_changes(from_state, to_state) # Right number/type of migrations for the AuthorProxy model? self.assertNumberMigrations(changes, "thirdapp", 1) self.assertOperationTypes( changes, "thirdapp", 0, ["DeleteModel", "CreateModel"] ) # Right number/type of migrations for the Book model with a FK to # AuthorProxy? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["AlterField"]) # otherapp should depend on thirdapp. self.assertMigrationDependencies( changes, "otherapp", 0, [("thirdapp", "auto_1")] ) # Now, test the pk table and field name. fk_field = changes["otherapp"][0].operations[0].field self.assertEqual( to_state.get_concrete_model_key(fk_field.remote_field.model), ("thirdapp", "authorproxy"), ) self.assertEqual(fk_field.remote_field.model, "thirdapp.AuthorProxy") def test_proxy_to_mti_with_fk_to_proxy_proxy(self): # First, test the pk table and field name. to_state = self.make_project_state( [ self.author_empty, self.author_proxy, self.author_proxy_proxy, self.book_proxy_proxy_fk, ] ) changes = self.get_changes([], to_state) fk_field = changes["otherapp"][0].operations[0].fields[1][1] self.assertEqual( to_state.get_concrete_model_key(fk_field.remote_field.model), ("testapp", "author"), ) self.assertEqual(fk_field.remote_field.model, "testapp.AAuthorProxyProxy") # Change AuthorProxy to use MTI. FK still points to AAuthorProxyProxy, # a proxy of AuthorProxy. from_state = to_state.clone() to_state = self.make_project_state( [ self.author_empty, self.author_proxy_notproxy, self.author_proxy_proxy, self.book_proxy_proxy_fk, ] ) changes = self.get_changes(from_state, to_state) # Right number/type of migrations for the AuthorProxy model? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"]) # Right number/type of migrations for the Book model with a FK to # AAuthorProxyProxy? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["AlterField"]) # otherapp should depend on testapp. self.assertMigrationDependencies( changes, "otherapp", 0, [("testapp", "auto_1")] ) # Now, test the pk table and field name. fk_field = changes["otherapp"][0].operations[0].field self.assertEqual( to_state.get_concrete_model_key(fk_field.remote_field.model), ("testapp", "authorproxy"), ) self.assertEqual(fk_field.remote_field.model, "testapp.AAuthorProxyProxy") def test_unmanaged_create(self): """The autodetector correctly deals with managed models.""" # First, we test adding an unmanaged model changes = self.get_changes( [self.author_empty], [self.author_empty, self.author_unmanaged] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="AuthorUnmanaged", options={"managed": False} ) def test_unmanaged_delete(self): changes = self.get_changes( [self.author_empty, self.author_unmanaged], [self.author_empty] ) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel"]) def test_unmanaged_to_managed(self): # Now, we test turning an unmanaged model into a managed model changes = self.get_changes( [self.author_empty, self.author_unmanaged], [self.author_empty, self.author_unmanaged_managed], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="authorunmanaged", options={} ) def test_managed_to_unmanaged(self): # Now, we turn managed to unmanaged. changes = self.get_changes( [self.author_empty, self.author_unmanaged_managed], [self.author_empty, self.author_unmanaged], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="authorunmanaged", options={"managed": False} ) def test_unmanaged_custom_pk(self): """ #23415 - The autodetector must correctly deal with custom FK on unmanaged models. """ # First, we test the default pk field name changes = self.get_changes([], [self.author_unmanaged_default_pk, self.book]) # The model the FK on the book model points to. fk_field = changes["otherapp"][0].operations[0].fields[2][1] self.assertEqual(fk_field.remote_field.model, "testapp.Author") # Now, we test the custom pk field name changes = self.get_changes([], [self.author_unmanaged_custom_pk, self.book]) # The model the FK on the book model points to. fk_field = changes["otherapp"][0].operations[0].fields[2][1] self.assertEqual(fk_field.remote_field.model, "testapp.Author") @override_settings(AUTH_USER_MODEL="thirdapp.CustomUser") def test_swappable(self): with isolate_lru_cache(apps.get_swappable_settings_name): changes = self.get_changes( [self.custom_user], [self.custom_user, self.author_with_custom_user] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") self.assertMigrationDependencies( changes, "testapp", 0, [("__setting__", "AUTH_USER_MODEL")] ) def test_swappable_lowercase(self): model_state = ModelState( "testapp", "Document", [ ("id", models.AutoField(primary_key=True)), ( "owner", models.ForeignKey( settings.AUTH_USER_MODEL.lower(), models.CASCADE, ), ), ], ) with isolate_lru_cache(apps.get_swappable_settings_name): changes = self.get_changes([], [model_state]) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Document") self.assertMigrationDependencies( changes, "testapp", 0, [("__setting__", "AUTH_USER_MODEL")], ) @override_settings(AUTH_USER_MODEL="thirdapp.CustomUser") def test_swappable_many_to_many_model_case(self): document_lowercase = ModelState( "testapp", "Document", [ ("id", models.AutoField(primary_key=True)), ("owners", models.ManyToManyField(settings.AUTH_USER_MODEL.lower())), ], ) document = ModelState( "testapp", "Document", [ ("id", models.AutoField(primary_key=True)), ("owners", models.ManyToManyField(settings.AUTH_USER_MODEL)), ], ) with isolate_lru_cache(apps.get_swappable_settings_name): changes = self.get_changes( [self.custom_user, document_lowercase], [self.custom_user, document], ) self.assertEqual(len(changes), 0) def test_swappable_changed(self): with isolate_lru_cache(apps.get_swappable_settings_name): before = self.make_project_state([self.custom_user, self.author_with_user]) with override_settings(AUTH_USER_MODEL="thirdapp.CustomUser"): after = self.make_project_state( [self.custom_user, self.author_with_custom_user] ) autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes() # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterField"]) self.assertOperationAttributes( changes, "testapp", 0, 0, model_name="author", name="user" ) fk_field = changes["testapp"][0].operations[0].field self.assertEqual(fk_field.remote_field.model, "thirdapp.CustomUser") def test_add_field_with_default(self): """#22030 - Adding a field with a default should work.""" changes = self.get_changes([self.author_empty], [self.author_name_default]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AddField"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="name") def test_custom_deconstructible(self): """ Two instances which deconstruct to the same value aren't considered a change. """ changes = self.get_changes( [self.author_name_deconstructible_1], [self.author_name_deconstructible_2] ) # Right number of migrations? self.assertEqual(len(changes), 0) def test_deconstruct_field_kwarg(self): """Field instances are handled correctly by nested deconstruction.""" changes = self.get_changes( [self.author_name_deconstructible_3], [self.author_name_deconstructible_4] ) self.assertEqual(changes, {}) def test_deconstructible_list(self): """Nested deconstruction descends into lists.""" # When lists contain items that deconstruct to identical values, those lists # should be considered equal for the purpose of detecting state changes # (even if the original items are unequal). changes = self.get_changes( [self.author_name_deconstructible_list_1], [self.author_name_deconstructible_list_2], ) self.assertEqual(changes, {}) # Legitimate differences within the deconstructed lists should be reported # as a change changes = self.get_changes( [self.author_name_deconstructible_list_1], [self.author_name_deconstructible_list_3], ) self.assertEqual(len(changes), 1) def test_deconstructible_tuple(self): """Nested deconstruction descends into tuples.""" # When tuples contain items that deconstruct to identical values, those tuples # should be considered equal for the purpose of detecting state changes # (even if the original items are unequal). changes = self.get_changes( [self.author_name_deconstructible_tuple_1], [self.author_name_deconstructible_tuple_2], ) self.assertEqual(changes, {}) # Legitimate differences within the deconstructed tuples should be reported # as a change changes = self.get_changes( [self.author_name_deconstructible_tuple_1], [self.author_name_deconstructible_tuple_3], ) self.assertEqual(len(changes), 1) def test_deconstructible_dict(self): """Nested deconstruction descends into dict values.""" # When dicts contain items whose values deconstruct to identical values, # those dicts should be considered equal for the purpose of detecting # state changes (even if the original values are unequal). changes = self.get_changes( [self.author_name_deconstructible_dict_1], [self.author_name_deconstructible_dict_2], ) self.assertEqual(changes, {}) # Legitimate differences within the deconstructed dicts should be reported # as a change changes = self.get_changes( [self.author_name_deconstructible_dict_1], [self.author_name_deconstructible_dict_3], ) self.assertEqual(len(changes), 1) def test_nested_deconstructible_objects(self): """ Nested deconstruction is applied recursively to the args/kwargs of deconstructed objects. """ # If the items within a deconstructed object's args/kwargs have the same # deconstructed values - whether or not the items themselves are different # instances - then the object as a whole is regarded as unchanged. changes = self.get_changes( [self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_2], ) self.assertEqual(changes, {}) # Differences that exist solely within the args list of a deconstructed object # should be reported as changes changes = self.get_changes( [self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_changed_arg], ) self.assertEqual(len(changes), 1) # Additional args should also be reported as a change changes = self.get_changes( [self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_extra_arg], ) self.assertEqual(len(changes), 1) # Differences that exist solely within the kwargs dict of a deconstructed object # should be reported as changes changes = self.get_changes( [self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_changed_kwarg], ) self.assertEqual(len(changes), 1) # Additional kwargs should also be reported as a change changes = self.get_changes( [self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_extra_kwarg], ) self.assertEqual(len(changes), 1) def test_deconstruct_type(self): """ #22951 -- Uninstantiated classes with deconstruct are correctly returned by deep_deconstruct during serialization. """ author = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ( "name", models.CharField( max_length=200, # IntegerField intentionally not instantiated. default=models.IntegerField, ), ), ], ) changes = self.get_changes([], [author]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) def test_replace_string_with_foreignkey(self): """ #22300 - Adding an FK in the same "spot" as a deleted CharField should work. """ changes = self.get_changes( [self.author_with_publisher_string], [self.author_with_publisher, self.publisher], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["CreateModel", "RemoveField", "AddField"] ) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher") self.assertOperationAttributes(changes, "testapp", 0, 1, name="publisher_name") self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher") def test_foreign_key_removed_before_target_model(self): """ Removing an FK and the model it targets in the same change must remove the FK field before the model to maintain consistency. """ changes = self.get_changes( [self.author_with_publisher, self.publisher], [self.author_name] ) # removes both the model and FK # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "DeleteModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="publisher") self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher") @mock.patch( "django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition", side_effect=AssertionError("Should not have prompted for not null addition"), ) def test_add_many_to_many(self, mocked_ask_method): """#22435 - Adding a ManyToManyField should not prompt for a default.""" changes = self.get_changes( [self.author_empty, self.publisher], [self.author_with_m2m, self.publisher] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AddField"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers") def test_alter_many_to_many(self): changes = self.get_changes( [self.author_with_m2m, self.publisher], [self.author_with_m2m_blank, self.publisher], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterField"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers") def test_create_with_through_model(self): """ Adding a m2m with a through model and the models that use it should be ordered correctly. """ changes = self.get_changes( [], [self.author_with_m2m_through, self.publisher, self.contract] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, [ "CreateModel", "CreateModel", "CreateModel", "AddField", ], ) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher") self.assertOperationAttributes(changes, "testapp", 0, 2, name="Contract") self.assertOperationAttributes( changes, "testapp", 0, 3, model_name="author", name="publishers" ) def test_many_to_many_removed_before_through_model(self): """ Removing a ManyToManyField and the "through" model in the same change must remove the field before the model to maintain consistency. """ changes = self.get_changes( [ self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution, ], [self.book_with_no_author, self.author_name], ) # Remove both the through model and ManyToMany # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["RemoveField", "DeleteModel"] ) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="authors", model_name="book" ) self.assertOperationAttributes(changes, "otherapp", 0, 1, name="Attribution") def test_many_to_many_removed_before_through_model_2(self): """ Removing a model that contains a ManyToManyField and the "through" model in the same change must remove the field before the model to maintain consistency. """ changes = self.get_changes( [ self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution, ], [self.author_name], ) # Remove both the through model and ManyToMany # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["RemoveField", "DeleteModel", "DeleteModel"] ) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="authors", model_name="book" ) self.assertOperationAttributes(changes, "otherapp", 0, 1, name="Attribution") self.assertOperationAttributes(changes, "otherapp", 0, 2, name="Book") def test_m2m_w_through_multistep_remove(self): """ A model with a m2m field that specifies a "through" model cannot be removed in the same migration as that through model as the schema will pass through an inconsistent state. The autodetector should produce two migrations to avoid this issue. """ changes = self.get_changes( [self.author_with_m2m_through, self.publisher, self.contract], [self.publisher], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["RemoveField", "RemoveField", "DeleteModel", "DeleteModel"], ) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", model_name="contract" ) self.assertOperationAttributes( changes, "testapp", 0, 1, name="publisher", model_name="contract" ) self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author") self.assertOperationAttributes(changes, "testapp", 0, 3, name="Contract") def test_concrete_field_changed_to_many_to_many(self): """ #23938 - Changing a concrete field into a ManyToManyField first removes the concrete field and then adds the m2m field. """ changes = self.get_changes( [self.author_with_former_m2m], [self.author_with_m2m, self.publisher] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["CreateModel", "RemoveField", "AddField"] ) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Publisher") self.assertOperationAttributes( changes, "testapp", 0, 1, name="publishers", model_name="author" ) self.assertOperationAttributes( changes, "testapp", 0, 2, name="publishers", model_name="author" ) def test_many_to_many_changed_to_concrete_field(self): """ #23938 - Changing a ManyToManyField into a concrete field first removes the m2m field and then adds the concrete field. """ changes = self.get_changes( [self.author_with_m2m, self.publisher], [self.author_with_former_m2m] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["RemoveField", "DeleteModel", "AddField"] ) self.assertOperationAttributes( changes, "testapp", 0, 0, name="publishers", model_name="author" ) self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher") self.assertOperationAttributes( changes, "testapp", 0, 2, name="publishers", model_name="author" ) self.assertOperationFieldAttributes(changes, "testapp", 0, 2, max_length=100) def test_non_circular_foreignkey_dependency_removal(self): """ If two models with a ForeignKey from one to the other are removed at the same time, the autodetector should remove them in the correct order. """ changes = self.get_changes( [self.author_with_publisher, self.publisher_with_author], [] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["RemoveField", "DeleteModel", "DeleteModel"] ) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", model_name="publisher" ) self.assertOperationAttributes(changes, "testapp", 0, 1, name="Author") self.assertOperationAttributes(changes, "testapp", 0, 2, name="Publisher") def test_alter_model_options(self): """Changing a model's options should make a change.""" changes = self.get_changes([self.author_empty], [self.author_with_options]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"]) self.assertOperationAttributes( changes, "testapp", 0, 0, options={ "permissions": [("can_hire", "Can hire")], "verbose_name": "Authi", }, ) # Changing them back to empty should also make a change changes = self.get_changes([self.author_with_options], [self.author_empty]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", options={} ) def test_alter_model_options_proxy(self): """Changing a proxy model's options should also make a change.""" changes = self.get_changes( [self.author_proxy, self.author_empty], [self.author_proxy_options, self.author_empty], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="authorproxy", options={"verbose_name": "Super Author"}, ) def test_set_alter_order_with_respect_to(self): """Setting order_with_respect_to adds a field.""" changes = self.get_changes( [self.book, self.author_with_book], [self.book, self.author_with_book_order_wrt], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AlterOrderWithRespectTo"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", order_with_respect_to="book" ) def test_add_alter_order_with_respect_to(self): """ Setting order_with_respect_to when adding the FK too does things in the right order. """ changes = self.get_changes( [self.author_name], [self.book, self.author_with_book_order_wrt] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["AddField", "AlterOrderWithRespectTo"] ) self.assertOperationAttributes( changes, "testapp", 0, 0, model_name="author", name="book" ) self.assertOperationAttributes( changes, "testapp", 0, 1, name="author", order_with_respect_to="book" ) def test_remove_alter_order_with_respect_to(self): """ Removing order_with_respect_to when removing the FK too does things in the right order. """ changes = self.get_changes( [self.book, self.author_with_book_order_wrt], [self.author_name] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["AlterOrderWithRespectTo", "RemoveField"] ) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", order_with_respect_to=None ) self.assertOperationAttributes( changes, "testapp", 0, 1, model_name="author", name="book" ) def test_add_model_order_with_respect_to(self): """ Setting order_with_respect_to when adding the whole model does things in the right order. """ changes = self.get_changes([], [self.book, self.author_with_book_order_wrt]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="Author", options={"order_with_respect_to": "book"}, ) self.assertNotIn( "_order", [name for name, field in changes["testapp"][0].operations[0].fields], ) def test_add_model_order_with_respect_to_unique_together(self): changes = self.get_changes( [], [ self.book, ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book", models.CASCADE)), ], options={ "order_with_respect_to": "book", "unique_together": {("id", "_order")}, }, ), ], ) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="Author", options={ "order_with_respect_to": "book", "unique_together": {("id", "_order")}, }, ) def test_add_model_order_with_respect_to_index_constraint(self): tests = [ ( "AddIndex", { "indexes": [ models.Index(fields=["_order"], name="book_order_idx"), ] }, ), ( "AddConstraint", { "constraints": [ models.CheckConstraint( check=models.Q(_order__gt=1), name="book_order_gt_1", ), ] }, ), ] for operation, extra_option in tests: with self.subTest(operation=operation): after = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book", models.CASCADE)), ], options={ "order_with_respect_to": "book", **extra_option, }, ) changes = self.get_changes([], [self.book, after]) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, [ "CreateModel", operation, ], ) self.assertOperationAttributes( changes, "testapp", 0, 0, name="Author", options={"order_with_respect_to": "book"}, ) def test_set_alter_order_with_respect_to_index_constraint_unique_together(self): tests = [ ( "AddIndex", { "indexes": [ models.Index(fields=["_order"], name="book_order_idx"), ] }, ), ( "AddConstraint", { "constraints": [ models.CheckConstraint( check=models.Q(_order__gt=1), name="book_order_gt_1", ), ] }, ), ("AlterUniqueTogether", {"unique_together": {("id", "_order")}}), ] for operation, extra_option in tests: with self.subTest(operation=operation): after = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book", models.CASCADE)), ], options={ "order_with_respect_to": "book", **extra_option, }, ) changes = self.get_changes( [self.book, self.author_with_book], [self.book, after], ) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, [ "AlterOrderWithRespectTo", operation, ], ) def test_alter_model_managers(self): """ Changing the model managers adds a new operation. """ changes = self.get_changes([self.other_pony], [self.other_pony_food]) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["AlterModelManagers"]) self.assertOperationAttributes(changes, "otherapp", 0, 0, name="pony") self.assertEqual( [name for name, mgr in changes["otherapp"][0].operations[0].managers], ["food_qs", "food_mgr", "food_mgr_kwargs"], ) self.assertEqual( changes["otherapp"][0].operations[0].managers[1][1].args, ("a", "b", 1, 2) ) self.assertEqual( changes["otherapp"][0].operations[0].managers[2][1].args, ("x", "y", 3, 4) ) def test_swappable_first_inheritance(self): """Swappable models get their CreateModel first.""" changes = self.get_changes([], [self.custom_user, self.aardvark]) # Right number/type of migrations? self.assertNumberMigrations(changes, "thirdapp", 1) self.assertOperationTypes( changes, "thirdapp", 0, ["CreateModel", "CreateModel"] ) self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="CustomUser") self.assertOperationAttributes(changes, "thirdapp", 0, 1, name="Aardvark") def test_default_related_name_option(self): model_state = ModelState( "app", "model", [ ("id", models.AutoField(primary_key=True)), ], options={"default_related_name": "related_name"}, ) changes = self.get_changes([], [model_state]) self.assertNumberMigrations(changes, "app", 1) self.assertOperationTypes(changes, "app", 0, ["CreateModel"]) self.assertOperationAttributes( changes, "app", 0, 0, name="model", options={"default_related_name": "related_name"}, ) altered_model_state = ModelState( "app", "Model", [ ("id", models.AutoField(primary_key=True)), ], ) changes = self.get_changes([model_state], [altered_model_state]) self.assertNumberMigrations(changes, "app", 1) self.assertOperationTypes(changes, "app", 0, ["AlterModelOptions"]) self.assertOperationAttributes(changes, "app", 0, 0, name="model", options={}) @override_settings(AUTH_USER_MODEL="thirdapp.CustomUser") def test_swappable_first_setting(self): """Swappable models get their CreateModel first.""" with isolate_lru_cache(apps.get_swappable_settings_name): changes = self.get_changes([], [self.custom_user_no_inherit, self.aardvark]) # Right number/type of migrations? self.assertNumberMigrations(changes, "thirdapp", 1) self.assertOperationTypes( changes, "thirdapp", 0, ["CreateModel", "CreateModel"] ) self.assertOperationAttributes(changes, "thirdapp", 0, 0, name="CustomUser") self.assertOperationAttributes(changes, "thirdapp", 0, 1, name="Aardvark") def test_bases_first(self): """Bases of other models come first.""" changes = self.get_changes( [], [self.aardvark_based_on_author, self.author_name] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") self.assertOperationAttributes(changes, "testapp", 0, 1, name="Aardvark") def test_bases_first_mixed_case_app_label(self): app_label = "MiXedCaseApp" changes = self.get_changes( [], [ ModelState( app_label, "owner", [ ("id", models.AutoField(primary_key=True)), ], ), ModelState( app_label, "place", [ ("id", models.AutoField(primary_key=True)), ( "owner", models.ForeignKey("MiXedCaseApp.owner", models.CASCADE), ), ], ), ModelState(app_label, "restaurant", [], bases=("MiXedCaseApp.place",)), ], ) self.assertNumberMigrations(changes, app_label, 1) self.assertOperationTypes( changes, app_label, 0, [ "CreateModel", "CreateModel", "CreateModel", ], ) self.assertOperationAttributes(changes, app_label, 0, 0, name="owner") self.assertOperationAttributes(changes, app_label, 0, 1, name="place") self.assertOperationAttributes(changes, app_label, 0, 2, name="restaurant") def test_multiple_bases(self): """ Inheriting models doesn't move *_ptr fields into AddField operations. """ A = ModelState("app", "A", [("a_id", models.AutoField(primary_key=True))]) B = ModelState("app", "B", [("b_id", models.AutoField(primary_key=True))]) C = ModelState("app", "C", [], bases=("app.A", "app.B")) D = ModelState("app", "D", [], bases=("app.A", "app.B")) E = ModelState("app", "E", [], bases=("app.A", "app.B")) changes = self.get_changes([], [A, B, C, D, E]) # Right number/type of migrations? self.assertNumberMigrations(changes, "app", 1) self.assertOperationTypes( changes, "app", 0, ["CreateModel", "CreateModel", "CreateModel", "CreateModel", "CreateModel"], ) self.assertOperationAttributes(changes, "app", 0, 0, name="A") self.assertOperationAttributes(changes, "app", 0, 1, name="B") self.assertOperationAttributes(changes, "app", 0, 2, name="C") self.assertOperationAttributes(changes, "app", 0, 3, name="D") self.assertOperationAttributes(changes, "app", 0, 4, name="E") def test_proxy_bases_first(self): """Bases of proxies come first.""" changes = self.get_changes( [], [self.author_empty, self.author_proxy, self.author_proxy_proxy] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["CreateModel", "CreateModel", "CreateModel"] ) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") self.assertOperationAttributes(changes, "testapp", 0, 1, name="AuthorProxy") self.assertOperationAttributes( changes, "testapp", 0, 2, name="AAuthorProxyProxy" ) def test_pk_fk_included(self): """ A relation used as the primary key is kept as part of CreateModel. """ changes = self.get_changes([], [self.aardvark_pk_fk_author, self.author_name]) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author") self.assertOperationAttributes(changes, "testapp", 0, 1, name="Aardvark") def test_first_dependency(self): """ A dependency to an app with no migrations uses __first__. """ # Load graph loader = MigrationLoader(connection) before = self.make_project_state([]) after = self.make_project_state([self.book_migrations_fk]) after.real_apps = {"migrations"} autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes(graph=loader.graph) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book") self.assertMigrationDependencies( changes, "otherapp", 0, [("migrations", "__first__")] ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_last_dependency(self): """ A dependency to an app with existing migrations uses the last migration of that app. """ # Load graph loader = MigrationLoader(connection) before = self.make_project_state([]) after = self.make_project_state([self.book_migrations_fk]) after.real_apps = {"migrations"} autodetector = MigrationAutodetector(before, after) changes = autodetector._detect_changes(graph=loader.graph) # Right number/type of migrations? self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Book") self.assertMigrationDependencies( changes, "otherapp", 0, [("migrations", "0002_second")] ) def test_alter_fk_before_model_deletion(self): """ ForeignKeys are altered _before_ the model they used to refer to are deleted. """ changes = self.get_changes( [self.author_name, self.publisher_with_author], [self.aardvark_testapp, self.publisher_with_aardvark_author], ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["CreateModel", "AlterField", "DeleteModel"] ) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Aardvark") self.assertOperationAttributes(changes, "testapp", 0, 1, name="author") self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author") def test_fk_dependency_other_app(self): """ #23100 - ForeignKeys correctly depend on other apps' models. """ changes = self.get_changes( [self.author_name, self.book], [self.author_with_book, self.book] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AddField"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="book") self.assertMigrationDependencies( changes, "testapp", 0, [("otherapp", "__first__")] ) def test_alter_unique_together_fk_to_m2m(self): changes = self.get_changes( [self.author_name, self.book_unique_together], [ self.author_name, ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ManyToManyField("testapp.Author")), ("title", models.CharField(max_length=200)), ], ), ], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["AlterUniqueTogether", "RemoveField", "AddField"] ) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="book", unique_together=set() ) self.assertOperationAttributes( changes, "otherapp", 0, 1, model_name="book", name="author" ) self.assertOperationAttributes( changes, "otherapp", 0, 2, model_name="book", name="author" ) def test_alter_field_to_fk_dependency_other_app(self): changes = self.get_changes( [self.author_empty, self.book_with_no_author_fk], [self.author_empty, self.book], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["AlterField"]) self.assertMigrationDependencies( changes, "otherapp", 0, [("testapp", "__first__")] ) def test_circular_dependency_mixed_addcreate(self): """ #23315 - The dependency resolver knows to put all CreateModel before AddField and not become unsolvable. """ address = ModelState( "a", "Address", [ ("id", models.AutoField(primary_key=True)), ("country", models.ForeignKey("b.DeliveryCountry", models.CASCADE)), ], ) person = ModelState( "a", "Person", [ ("id", models.AutoField(primary_key=True)), ], ) apackage = ModelState( "b", "APackage", [ ("id", models.AutoField(primary_key=True)), ("person", models.ForeignKey("a.Person", models.CASCADE)), ], ) country = ModelState( "b", "DeliveryCountry", [ ("id", models.AutoField(primary_key=True)), ], ) changes = self.get_changes([], [address, person, apackage, country]) # Right number/type of migrations? self.assertNumberMigrations(changes, "a", 2) self.assertNumberMigrations(changes, "b", 1) self.assertOperationTypes(changes, "a", 0, ["CreateModel", "CreateModel"]) self.assertOperationTypes(changes, "a", 1, ["AddField"]) self.assertOperationTypes(changes, "b", 0, ["CreateModel", "CreateModel"]) @override_settings(AUTH_USER_MODEL="a.Tenant") def test_circular_dependency_swappable(self): """ #23322 - The dependency resolver knows to explicitly resolve swappable models. """ with isolate_lru_cache(apps.get_swappable_settings_name): tenant = ModelState( "a", "Tenant", [ ("id", models.AutoField(primary_key=True)), ("primary_address", models.ForeignKey("b.Address", models.CASCADE)), ], bases=(AbstractBaseUser,), ) address = ModelState( "b", "Address", [ ("id", models.AutoField(primary_key=True)), ( "tenant", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE), ), ], ) changes = self.get_changes([], [address, tenant]) # Right number/type of migrations? self.assertNumberMigrations(changes, "a", 2) self.assertOperationTypes(changes, "a", 0, ["CreateModel"]) self.assertOperationTypes(changes, "a", 1, ["AddField"]) self.assertMigrationDependencies(changes, "a", 0, []) self.assertMigrationDependencies( changes, "a", 1, [("a", "auto_1"), ("b", "auto_1")] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "b", 1) self.assertOperationTypes(changes, "b", 0, ["CreateModel"]) self.assertMigrationDependencies( changes, "b", 0, [("__setting__", "AUTH_USER_MODEL")] ) @override_settings(AUTH_USER_MODEL="b.Tenant") def test_circular_dependency_swappable2(self): """ #23322 - The dependency resolver knows to explicitly resolve swappable models but with the swappable not being the first migrated model. """ with isolate_lru_cache(apps.get_swappable_settings_name): address = ModelState( "a", "Address", [ ("id", models.AutoField(primary_key=True)), ( "tenant", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE), ), ], ) tenant = ModelState( "b", "Tenant", [ ("id", models.AutoField(primary_key=True)), ("primary_address", models.ForeignKey("a.Address", models.CASCADE)), ], bases=(AbstractBaseUser,), ) changes = self.get_changes([], [address, tenant]) # Right number/type of migrations? self.assertNumberMigrations(changes, "a", 2) self.assertOperationTypes(changes, "a", 0, ["CreateModel"]) self.assertOperationTypes(changes, "a", 1, ["AddField"]) self.assertMigrationDependencies(changes, "a", 0, []) self.assertMigrationDependencies( changes, "a", 1, [("__setting__", "AUTH_USER_MODEL"), ("a", "auto_1")] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "b", 1) self.assertOperationTypes(changes, "b", 0, ["CreateModel"]) self.assertMigrationDependencies(changes, "b", 0, [("a", "auto_1")]) @override_settings(AUTH_USER_MODEL="a.Person") def test_circular_dependency_swappable_self(self): """ #23322 - The dependency resolver knows to explicitly resolve swappable models. """ with isolate_lru_cache(apps.get_swappable_settings_name): person = ModelState( "a", "Person", [ ("id", models.AutoField(primary_key=True)), ( "parent1", models.ForeignKey( settings.AUTH_USER_MODEL, models.CASCADE, related_name="children", ), ), ], ) changes = self.get_changes([], [person]) # Right number/type of migrations? self.assertNumberMigrations(changes, "a", 1) self.assertOperationTypes(changes, "a", 0, ["CreateModel"]) self.assertMigrationDependencies(changes, "a", 0, []) @override_settings(AUTH_USER_MODEL="a.User") def test_swappable_circular_multi_mti(self): with isolate_lru_cache(apps.get_swappable_settings_name): parent = ModelState( "a", "Parent", [("user", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE))], ) child = ModelState("a", "Child", [], bases=("a.Parent",)) user = ModelState("a", "User", [], bases=(AbstractBaseUser, "a.Child")) changes = self.get_changes([], [parent, child, user]) self.assertNumberMigrations(changes, "a", 1) self.assertOperationTypes( changes, "a", 0, ["CreateModel", "CreateModel", "CreateModel", "AddField"] ) @mock.patch( "django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition", side_effect=AssertionError("Should not have prompted for not null addition"), ) def test_add_blank_textfield_and_charfield(self, mocked_ask_method): """ #23405 - Adding a NOT NULL and blank `CharField` or `TextField` without default should not prompt for a default. """ changes = self.get_changes( [self.author_empty], [self.author_with_biography_blank] ) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AddField", "AddField"]) self.assertOperationAttributes(changes, "testapp", 0, 0) @mock.patch( "django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition" ) def test_add_non_blank_textfield_and_charfield(self, mocked_ask_method): """ #23405 - Adding a NOT NULL and non-blank `CharField` or `TextField` without default should prompt for a default. """ changes = self.get_changes( [self.author_empty], [self.author_with_biography_non_blank] ) self.assertEqual(mocked_ask_method.call_count, 2) # Right number/type of migrations? self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["AddField", "AddField"]) self.assertOperationAttributes(changes, "testapp", 0, 0) def test_mti_inheritance_model_removal(self): Animal = ModelState( "app", "Animal", [ ("id", models.AutoField(primary_key=True)), ], ) Dog = ModelState("app", "Dog", [], bases=("app.Animal",)) changes = self.get_changes([Animal, Dog], [Animal]) self.assertNumberMigrations(changes, "app", 1) self.assertOperationTypes(changes, "app", 0, ["DeleteModel"]) self.assertOperationAttributes(changes, "app", 0, 0, name="Dog") def test_add_model_with_field_removed_from_base_model(self): """ Removing a base field takes place before adding a new inherited model that has a field with the same name. """ before = [ ModelState( "app", "readable", [ ("id", models.AutoField(primary_key=True)), ("title", models.CharField(max_length=200)), ], ), ] after = [ ModelState( "app", "readable", [ ("id", models.AutoField(primary_key=True)), ], ), ModelState( "app", "book", [ ("title", models.CharField(max_length=200)), ], bases=("app.readable",), ), ] changes = self.get_changes(before, after) self.assertNumberMigrations(changes, "app", 1) self.assertOperationTypes(changes, "app", 0, ["RemoveField", "CreateModel"]) self.assertOperationAttributes( changes, "app", 0, 0, name="title", model_name="readable" ) self.assertOperationAttributes(changes, "app", 0, 1, name="book") def test_parse_number(self): tests = [ ("no_number", None), ("0001_initial", 1), ("0002_model3", 2), ("0002_auto_20380101_1112", 2), ("0002_squashed_0003", 3), ("0002_model2_squashed_0003_other4", 3), ("0002_squashed_0003_squashed_0004", 4), ("0002_model2_squashed_0003_other4_squashed_0005_other6", 5), ("0002_custom_name_20380101_1112_squashed_0003_model", 3), ("2_squashed_4", 4), ] for migration_name, expected_number in tests: with self.subTest(migration_name=migration_name): self.assertEqual( MigrationAutodetector.parse_number(migration_name), expected_number, ) def test_add_custom_fk_with_hardcoded_to(self): class HardcodedForeignKey(models.ForeignKey): def __init__(self, *args, **kwargs): kwargs["to"] = "testapp.Author" super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["to"] return name, path, args, kwargs book_hardcoded_fk_to = ModelState( "testapp", "Book", [ ("author", HardcodedForeignKey(on_delete=models.CASCADE)), ], ) changes = self.get_changes( [self.author_empty], [self.author_empty, book_hardcoded_fk_to], ) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes(changes, "testapp", 0, 0, name="Book") @ignore_warnings(category=RemovedInDjango51Warning) class AutodetectorIndexTogetherTests(BaseAutodetectorTests): book_index_together = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "index_together": {("author", "title")}, }, ) book_index_together_2 = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "index_together": {("title", "author")}, }, ) book_index_together_3 = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("newfield", models.IntegerField()), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "index_together": {("title", "newfield")}, }, ) book_index_together_4 = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("newfield2", models.IntegerField()), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "index_together": {("title", "newfield2")}, }, ) def test_empty_index_together(self): """Empty index_together shouldn't generate a migration.""" # Explicitly testing for not specified, since this is the case after # a CreateModel operation w/o any definition on the original model model_state_not_specified = ModelState( "a", "model", [("id", models.AutoField(primary_key=True))] ) # Explicitly testing for None, since this was the issue in #23452 after # an AlterIndexTogether operation with e.g. () as value model_state_none = ModelState( "a", "model", [("id", models.AutoField(primary_key=True))], { "index_together": None, }, ) # Explicitly testing for the empty set, since we now always have sets. # During removal (('col1', 'col2'),) --> () this becomes set([]) model_state_empty = ModelState( "a", "model", [("id", models.AutoField(primary_key=True))], { "index_together": set(), }, ) def test(from_state, to_state, msg): changes = self.get_changes([from_state], [to_state]) if changes: ops = ", ".join( o.__class__.__name__ for o in changes["a"][0].operations ) self.fail("Created operation(s) %s from %s" % (ops, msg)) tests = ( ( model_state_not_specified, model_state_not_specified, '"not specified" to "not specified"', ), (model_state_not_specified, model_state_none, '"not specified" to "None"'), ( model_state_not_specified, model_state_empty, '"not specified" to "empty"', ), (model_state_none, model_state_not_specified, '"None" to "not specified"'), (model_state_none, model_state_none, '"None" to "None"'), (model_state_none, model_state_empty, '"None" to "empty"'), ( model_state_empty, model_state_not_specified, '"empty" to "not specified"', ), (model_state_empty, model_state_none, '"empty" to "None"'), (model_state_empty, model_state_empty, '"empty" to "empty"'), ) for t in tests: test(*t) def test_rename_index_together_to_index(self): changes = self.get_changes( [AutodetectorTests.author_empty, self.book_index_together], [AutodetectorTests.author_empty, AutodetectorTests.book_indexes], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["RenameIndex"]) self.assertOperationAttributes( changes, "otherapp", 0, 0, model_name="book", new_name="book_title_author_idx", old_fields=("author", "title"), ) def test_rename_index_together_to_index_extra_options(self): # Indexes with extra options don't match indexes in index_together. book_partial_index = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "indexes": [ models.Index( fields=["author", "title"], condition=models.Q(title__startswith="The"), name="book_title_author_idx", ) ], }, ) changes = self.get_changes( [AutodetectorTests.author_empty, self.book_index_together], [AutodetectorTests.author_empty, book_partial_index], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["AlterIndexTogether", "AddIndex"], ) def test_rename_index_together_to_index_order_fields(self): # Indexes with reordered fields don't match indexes in index_together. changes = self.get_changes( [AutodetectorTests.author_empty, self.book_index_together], [AutodetectorTests.author_empty, AutodetectorTests.book_unordered_indexes], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["AlterIndexTogether", "AddIndex"], ) def test_add_index_together(self): changes = self.get_changes( [AutodetectorTests.author_empty, AutodetectorTests.book], [AutodetectorTests.author_empty, self.book_index_together], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["AlterIndexTogether"]) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="book", index_together={("author", "title")} ) def test_remove_index_together(self): changes = self.get_changes( [AutodetectorTests.author_empty, self.book_index_together], [AutodetectorTests.author_empty, AutodetectorTests.book], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes(changes, "otherapp", 0, ["AlterIndexTogether"]) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="book", index_together=set() ) def test_index_together_remove_fk(self): changes = self.get_changes( [AutodetectorTests.author_empty, self.book_index_together], [AutodetectorTests.author_empty, AutodetectorTests.book_with_no_author], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["AlterIndexTogether", "RemoveField"], ) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="book", index_together=set() ) self.assertOperationAttributes( changes, "otherapp", 0, 1, model_name="book", name="author" ) def test_index_together_no_changes(self): """ index_together doesn't generate a migration if no changes have been made. """ changes = self.get_changes( [AutodetectorTests.author_empty, self.book_index_together], [AutodetectorTests.author_empty, self.book_index_together], ) self.assertEqual(len(changes), 0) def test_index_together_ordering(self): """index_together triggers on ordering changes.""" changes = self.get_changes( [AutodetectorTests.author_empty, self.book_index_together], [AutodetectorTests.author_empty, self.book_index_together_2], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["AlterIndexTogether"], ) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="book", index_together={("title", "author")}, ) def test_add_field_and_index_together(self): """ Added fields will be created before using them in index_together. """ changes = self.get_changes( [AutodetectorTests.author_empty, AutodetectorTests.book], [AutodetectorTests.author_empty, self.book_index_together_3], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["AddField", "AlterIndexTogether"], ) self.assertOperationAttributes( changes, "otherapp", 0, 1, name="book", index_together={("title", "newfield")}, ) def test_create_model_and_index_together(self): author = ModelState( "otherapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ], ) book_with_author = ModelState( "otherapp", "Book", [ ("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("otherapp.Author", models.CASCADE)), ("title", models.CharField(max_length=200)), ], { "index_together": {("title", "author")}, }, ) changes = self.get_changes( [AutodetectorTests.book_with_no_author], [author, book_with_author] ) self.assertEqual(len(changes["otherapp"]), 1) migration = changes["otherapp"][0] self.assertEqual(len(migration.operations), 3) self.assertOperationTypes( changes, "otherapp", 0, ["CreateModel", "AddField", "AlterIndexTogether"], ) def test_remove_field_and_index_together(self): """ Removed fields will be removed after updating index_together. """ changes = self.get_changes( [AutodetectorTests.author_empty, self.book_index_together_3], [AutodetectorTests.author_empty, self.book_index_together], ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["AlterIndexTogether", "RemoveField"], ) self.assertOperationAttributes( changes, "otherapp", 0, 0, name="book", index_together={("author", "title")}, ) self.assertOperationAttributes( changes, "otherapp", 0, 1, model_name="book", name="newfield", ) def test_alter_field_and_index_together(self): """Fields are altered after deleting some index_together.""" initial_author = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("age", models.IntegerField(db_index=True)), ], { "index_together": {("name",)}, }, ) author_reversed_constraints = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200, unique=True)), ("age", models.IntegerField()), ], { "index_together": {("age",)}, }, ) changes = self.get_changes([initial_author], [author_reversed_constraints]) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, [ "AlterIndexTogether", "AlterField", "AlterField", "AlterIndexTogether", ], ) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", index_together=set(), ) self.assertOperationAttributes( changes, "testapp", 0, 1, model_name="author", name="age", ) self.assertOperationAttributes( changes, "testapp", 0, 2, model_name="author", name="name", ) self.assertOperationAttributes( changes, "testapp", 0, 3, name="author", index_together={("age",)}, ) def test_partly_alter_index_together_increase(self): initial_author = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("age", models.IntegerField()), ], { "index_together": {("name",)}, }, ) author_new_constraints = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("age", models.IntegerField()), ], { "index_together": {("name",), ("age",)}, }, ) changes = self.get_changes([initial_author], [author_new_constraints]) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["AlterIndexTogether"], ) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", index_together={("name",), ("age",)}, ) def test_partly_alter_index_together_decrease(self): initial_author = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("age", models.IntegerField()), ], { "index_together": {("name",), ("age",)}, }, ) author_new_constraints = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("age", models.IntegerField()), ], { "index_together": {("age",)}, }, ) changes = self.get_changes([initial_author], [author_new_constraints]) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["AlterIndexTogether"], ) self.assertOperationAttributes( changes, "testapp", 0, 0, name="author", index_together={("age",)}, ) def test_rename_field_and_index_together(self): """Fields are renamed before updating index_together.""" changes = self.get_changes( [AutodetectorTests.author_empty, self.book_index_together_3], [AutodetectorTests.author_empty, self.book_index_together_4], MigrationQuestioner({"ask_rename": True}), ) self.assertNumberMigrations(changes, "otherapp", 1) self.assertOperationTypes( changes, "otherapp", 0, ["RenameField", "AlterIndexTogether"], ) self.assertOperationAttributes( changes, "otherapp", 0, 1, name="book", index_together={("title", "newfield2")}, ) def test_add_model_order_with_respect_to_index_together(self): changes = self.get_changes( [], [ AutodetectorTests.book, ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book", models.CASCADE)), ], options={ "order_with_respect_to": "book", "index_together": {("name", "_order")}, }, ), ], ) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"]) self.assertOperationAttributes( changes, "testapp", 0, 0, name="Author", options={ "order_with_respect_to": "book", "index_together": {("name", "_order")}, }, ) def test_set_alter_order_with_respect_to_index_together(self): after = ModelState( "testapp", "Author", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book", models.CASCADE)), ], options={ "order_with_respect_to": "book", "index_together": {("name", "_order")}, }, ) changes = self.get_changes( [AutodetectorTests.book, AutodetectorTests.author_with_book], [AutodetectorTests.book, after], ) self.assertNumberMigrations(changes, "testapp", 1) self.assertOperationTypes( changes, "testapp", 0, ["AlterOrderWithRespectTo", "AlterIndexTogether"], ) class MigrationSuggestNameTests(SimpleTestCase): def test_no_operations(self): class Migration(migrations.Migration): operations = [] migration = Migration("some_migration", "test_app") self.assertIs(migration.suggest_name().startswith("auto_"), True) def test_no_operations_initial(self): class Migration(migrations.Migration): initial = True operations = [] migration = Migration("some_migration", "test_app") self.assertEqual(migration.suggest_name(), "initial") def test_single_operation(self): class Migration(migrations.Migration): operations = [migrations.CreateModel("Person", fields=[])] migration = Migration("0001_initial", "test_app") self.assertEqual(migration.suggest_name(), "person") class Migration(migrations.Migration): operations = [migrations.DeleteModel("Person")] migration = Migration("0002_initial", "test_app") self.assertEqual(migration.suggest_name(), "delete_person") def test_single_operation_long_name(self): class Migration(migrations.Migration): operations = [migrations.CreateModel("A" * 53, fields=[])] migration = Migration("some_migration", "test_app") self.assertEqual(migration.suggest_name(), "a" * 53) def test_two_operations(self): class Migration(migrations.Migration): operations = [ migrations.CreateModel("Person", fields=[]), migrations.DeleteModel("Animal"), ] migration = Migration("some_migration", "test_app") self.assertEqual(migration.suggest_name(), "person_delete_animal") def test_two_create_models(self): class Migration(migrations.Migration): operations = [ migrations.CreateModel("Person", fields=[]), migrations.CreateModel("Animal", fields=[]), ] migration = Migration("0001_initial", "test_app") self.assertEqual(migration.suggest_name(), "person_animal") def test_two_create_models_with_initial_true(self): class Migration(migrations.Migration): initial = True operations = [ migrations.CreateModel("Person", fields=[]), migrations.CreateModel("Animal", fields=[]), ] migration = Migration("0001_initial", "test_app") self.assertEqual(migration.suggest_name(), "initial") def test_many_operations_suffix(self): class Migration(migrations.Migration): operations = [ migrations.CreateModel("Person1", fields=[]), migrations.CreateModel("Person2", fields=[]), migrations.CreateModel("Person3", fields=[]), migrations.DeleteModel("Person4"), migrations.DeleteModel("Person5"), ] migration = Migration("some_migration", "test_app") self.assertEqual( migration.suggest_name(), "person1_person2_person3_delete_person4_and_more", ) def test_operation_with_no_suggested_name(self): class Migration(migrations.Migration): operations = [ migrations.CreateModel("Person", fields=[]), migrations.RunSQL("SELECT 1 FROM person;"), ] migration = Migration("some_migration", "test_app") self.assertIs(migration.suggest_name().startswith("auto_"), True) def test_none_name(self): class Migration(migrations.Migration): operations = [migrations.RunSQL("SELECT 1 FROM person;")] migration = Migration("0001_initial", "test_app") suggest_name = migration.suggest_name() self.assertIs(suggest_name.startswith("auto_"), True) def test_none_name_with_initial_true(self): class Migration(migrations.Migration): initial = True operations = [migrations.RunSQL("SELECT 1 FROM person;")] migration = Migration("0001_initial", "test_app") self.assertEqual(migration.suggest_name(), "initial") def test_auto(self): migration = migrations.Migration("0001_initial", "test_app") suggest_name = migration.suggest_name() self.assertIs(suggest_name.startswith("auto_"), True)
03889a07b90859bf812235a6e5149c709019665952174c1840045058f4a63b42
from django.apps.registry import Apps from django.contrib.contenttypes.fields import GenericForeignKey from django.db import models from django.db.migrations.exceptions import InvalidBasesError from django.db.migrations.operations import ( AddField, AlterField, DeleteModel, RemoveField, ) from django.db.migrations.state import ( ModelState, ProjectState, get_related_models_recursive, ) from django.test import SimpleTestCase, ignore_warnings, override_settings from django.test.utils import isolate_apps from django.utils.deprecation import RemovedInDjango51Warning from .models import ( FoodManager, FoodQuerySet, ModelWithCustomBase, NoMigrationFoodManager, UnicodeModel, ) class StateTests(SimpleTestCase): """ Tests state construction, rendering and modification by operations. """ # RemovedInDjango51Warning, when deprecation ends, only remove # Meta.index_together from inline models. @ignore_warnings(category=RemovedInDjango51Warning) def test_create(self): """ Tests making a ProjectState from an Apps """ new_apps = Apps(["migrations"]) class Author(models.Model): name = models.CharField(max_length=255) bio = models.TextField() age = models.IntegerField(blank=True, null=True) class Meta: app_label = "migrations" apps = new_apps unique_together = ["name", "bio"] index_together = ["bio", "age"] # RemovedInDjango51Warning. class AuthorProxy(Author): class Meta: app_label = "migrations" apps = new_apps proxy = True ordering = ["name"] class SubAuthor(Author): width = models.FloatField(null=True) class Meta: app_label = "migrations" apps = new_apps class Book(models.Model): title = models.CharField(max_length=1000) author = models.ForeignKey(Author, models.CASCADE) contributors = models.ManyToManyField(Author) class Meta: app_label = "migrations" apps = new_apps verbose_name = "tome" db_table = "test_tome" indexes = [models.Index(fields=["title"])] class Food(models.Model): food_mgr = FoodManager("a", "b") food_qs = FoodQuerySet.as_manager() food_no_mgr = NoMigrationFoodManager("x", "y") class Meta: app_label = "migrations" apps = new_apps class FoodNoManagers(models.Model): class Meta: app_label = "migrations" apps = new_apps class FoodNoDefaultManager(models.Model): food_no_mgr = NoMigrationFoodManager("x", "y") food_mgr = FoodManager("a", "b") food_qs = FoodQuerySet.as_manager() class Meta: app_label = "migrations" apps = new_apps mgr1 = FoodManager("a", "b") mgr2 = FoodManager("x", "y", c=3, d=4) class FoodOrderedManagers(models.Model): # The managers on this model should be ordered by their creation # counter and not by the order in model body food_no_mgr = NoMigrationFoodManager("x", "y") food_mgr2 = mgr2 food_mgr1 = mgr1 class Meta: app_label = "migrations" apps = new_apps project_state = ProjectState.from_apps(new_apps) author_state = project_state.models["migrations", "author"] author_proxy_state = project_state.models["migrations", "authorproxy"] sub_author_state = project_state.models["migrations", "subauthor"] book_state = project_state.models["migrations", "book"] food_state = project_state.models["migrations", "food"] food_no_managers_state = project_state.models["migrations", "foodnomanagers"] food_no_default_manager_state = project_state.models[ "migrations", "foodnodefaultmanager" ] food_order_manager_state = project_state.models[ "migrations", "foodorderedmanagers" ] book_index = models.Index(fields=["title"]) book_index.set_name_with_model(Book) self.assertEqual(author_state.app_label, "migrations") self.assertEqual(author_state.name, "Author") self.assertEqual(list(author_state.fields), ["id", "name", "bio", "age"]) self.assertEqual(author_state.fields["name"].max_length, 255) self.assertIs(author_state.fields["bio"].null, False) self.assertIs(author_state.fields["age"].null, True) self.assertEqual( author_state.options, { "unique_together": {("name", "bio")}, "index_together": {("bio", "age")}, # RemovedInDjango51Warning. "indexes": [], "constraints": [], }, ) self.assertEqual(author_state.bases, (models.Model,)) self.assertEqual(book_state.app_label, "migrations") self.assertEqual(book_state.name, "Book") self.assertEqual( list(book_state.fields), ["id", "title", "author", "contributors"] ) self.assertEqual(book_state.fields["title"].max_length, 1000) self.assertIs(book_state.fields["author"].null, False) self.assertEqual( book_state.fields["contributors"].__class__.__name__, "ManyToManyField" ) self.assertEqual( book_state.options, { "verbose_name": "tome", "db_table": "test_tome", "indexes": [book_index], "constraints": [], }, ) self.assertEqual(book_state.bases, (models.Model,)) self.assertEqual(author_proxy_state.app_label, "migrations") self.assertEqual(author_proxy_state.name, "AuthorProxy") self.assertEqual(author_proxy_state.fields, {}) self.assertEqual( author_proxy_state.options, {"proxy": True, "ordering": ["name"], "indexes": [], "constraints": []}, ) self.assertEqual(author_proxy_state.bases, ("migrations.author",)) self.assertEqual(sub_author_state.app_label, "migrations") self.assertEqual(sub_author_state.name, "SubAuthor") self.assertEqual(len(sub_author_state.fields), 2) self.assertEqual(sub_author_state.bases, ("migrations.author",)) # The default manager is used in migrations self.assertEqual([name for name, mgr in food_state.managers], ["food_mgr"]) self.assertTrue(all(isinstance(name, str) for name, mgr in food_state.managers)) self.assertEqual(food_state.managers[0][1].args, ("a", "b", 1, 2)) # No explicit managers defined. Migrations will fall back to the default self.assertEqual(food_no_managers_state.managers, []) # food_mgr is used in migration but isn't the default mgr, hence add the # default self.assertEqual( [name for name, mgr in food_no_default_manager_state.managers], ["food_no_mgr", "food_mgr"], ) self.assertTrue( all( isinstance(name, str) for name, mgr in food_no_default_manager_state.managers ) ) self.assertEqual( food_no_default_manager_state.managers[0][1].__class__, models.Manager ) self.assertIsInstance(food_no_default_manager_state.managers[1][1], FoodManager) self.assertEqual( [name for name, mgr in food_order_manager_state.managers], ["food_mgr1", "food_mgr2"], ) self.assertTrue( all( isinstance(name, str) for name, mgr in food_order_manager_state.managers ) ) self.assertEqual( [mgr.args for name, mgr in food_order_manager_state.managers], [("a", "b", 1, 2), ("x", "y", 3, 4)], ) def test_custom_default_manager_added_to_the_model_state(self): """ When the default manager of the model is a custom manager, it needs to be added to the model state. """ new_apps = Apps(["migrations"]) custom_manager = models.Manager() class Author(models.Model): objects = models.TextField() authors = custom_manager class Meta: app_label = "migrations" apps = new_apps project_state = ProjectState.from_apps(new_apps) author_state = project_state.models["migrations", "author"] self.assertEqual(author_state.managers, [("authors", custom_manager)]) def test_custom_default_manager_named_objects_with_false_migration_flag(self): """ When a manager is added with a name of 'objects' but it does not have `use_in_migrations = True`, no migration should be added to the model state (#26643). """ new_apps = Apps(["migrations"]) class Author(models.Model): objects = models.Manager() class Meta: app_label = "migrations" apps = new_apps project_state = ProjectState.from_apps(new_apps) author_state = project_state.models["migrations", "author"] self.assertEqual(author_state.managers, []) def test_no_duplicate_managers(self): """ When a manager is added with `use_in_migrations = True` and a parent model had a manager with the same name and `use_in_migrations = True`, the parent's manager shouldn't appear in the model state (#26881). """ new_apps = Apps(["migrations"]) class PersonManager(models.Manager): use_in_migrations = True class Person(models.Model): objects = PersonManager() class Meta: abstract = True class BossManager(PersonManager): use_in_migrations = True class Boss(Person): objects = BossManager() class Meta: app_label = "migrations" apps = new_apps project_state = ProjectState.from_apps(new_apps) boss_state = project_state.models["migrations", "boss"] self.assertEqual(boss_state.managers, [("objects", Boss.objects)]) def test_custom_default_manager(self): new_apps = Apps(["migrations"]) class Author(models.Model): manager1 = models.Manager() manager2 = models.Manager() class Meta: app_label = "migrations" apps = new_apps default_manager_name = "manager2" project_state = ProjectState.from_apps(new_apps) author_state = project_state.models["migrations", "author"] self.assertEqual(author_state.options["default_manager_name"], "manager2") self.assertEqual(author_state.managers, [("manager2", Author.manager1)]) def test_custom_base_manager(self): new_apps = Apps(["migrations"]) class Author(models.Model): manager1 = models.Manager() manager2 = models.Manager() class Meta: app_label = "migrations" apps = new_apps base_manager_name = "manager2" class Author2(models.Model): manager1 = models.Manager() manager2 = models.Manager() class Meta: app_label = "migrations" apps = new_apps base_manager_name = "manager1" project_state = ProjectState.from_apps(new_apps) author_state = project_state.models["migrations", "author"] self.assertEqual(author_state.options["base_manager_name"], "manager2") self.assertEqual( author_state.managers, [ ("manager1", Author.manager1), ("manager2", Author.manager2), ], ) author2_state = project_state.models["migrations", "author2"] self.assertEqual(author2_state.options["base_manager_name"], "manager1") self.assertEqual( author2_state.managers, [ ("manager1", Author2.manager1), ], ) def test_apps_bulk_update(self): """ StateApps.bulk_update() should update apps.ready to False and reset the value afterward. """ project_state = ProjectState() apps = project_state.apps with apps.bulk_update(): self.assertFalse(apps.ready) self.assertTrue(apps.ready) with self.assertRaises(ValueError): with apps.bulk_update(): self.assertFalse(apps.ready) raise ValueError() self.assertTrue(apps.ready) def test_render(self): """ Tests rendering a ProjectState into an Apps. """ project_state = ProjectState() project_state.add_model( ModelState( app_label="migrations", name="Tag", fields=[ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ("hidden", models.BooleanField()), ], ) ) project_state.add_model( ModelState( app_label="migrations", name="SubTag", fields=[ ( "tag_ptr", models.OneToOneField( "migrations.Tag", models.CASCADE, auto_created=True, parent_link=True, primary_key=True, to_field="id", serialize=False, ), ), ("awesome", models.BooleanField()), ], bases=("migrations.Tag",), ) ) base_mgr = models.Manager() mgr1 = FoodManager("a", "b") mgr2 = FoodManager("x", "y", c=3, d=4) project_state.add_model( ModelState( app_label="migrations", name="Food", fields=[ ("id", models.AutoField(primary_key=True)), ], managers=[ # The ordering we really want is objects, mgr1, mgr2 ("default", base_mgr), ("food_mgr2", mgr2), ("food_mgr1", mgr1), ], ) ) new_apps = project_state.apps self.assertEqual( new_apps.get_model("migrations", "Tag")._meta.get_field("name").max_length, 100, ) self.assertIs( new_apps.get_model("migrations", "Tag")._meta.get_field("hidden").null, False, ) self.assertEqual( len(new_apps.get_model("migrations", "SubTag")._meta.local_fields), 2 ) Food = new_apps.get_model("migrations", "Food") self.assertEqual( [mgr.name for mgr in Food._meta.managers], ["default", "food_mgr1", "food_mgr2"], ) self.assertTrue(all(isinstance(mgr.name, str) for mgr in Food._meta.managers)) self.assertEqual( [mgr.__class__ for mgr in Food._meta.managers], [models.Manager, FoodManager, FoodManager], ) def test_render_model_inheritance(self): class Book(models.Model): title = models.CharField(max_length=1000) class Meta: app_label = "migrations" apps = Apps() class Novel(Book): class Meta: app_label = "migrations" apps = Apps() # First, test rendering individually apps = Apps(["migrations"]) # We shouldn't be able to render yet ms = ModelState.from_model(Novel) with self.assertRaises(InvalidBasesError): ms.render(apps) # Once the parent model is in the app registry, it should be fine ModelState.from_model(Book).render(apps) ModelState.from_model(Novel).render(apps) def test_render_model_with_multiple_inheritance(self): class Foo(models.Model): class Meta: app_label = "migrations" apps = Apps() class Bar(models.Model): class Meta: app_label = "migrations" apps = Apps() class FooBar(Foo, Bar): class Meta: app_label = "migrations" apps = Apps() class AbstractSubFooBar(FooBar): class Meta: abstract = True apps = Apps() class SubFooBar(AbstractSubFooBar): class Meta: app_label = "migrations" apps = Apps() apps = Apps(["migrations"]) # We shouldn't be able to render yet ms = ModelState.from_model(FooBar) with self.assertRaises(InvalidBasesError): ms.render(apps) # Once the parent models are in the app registry, it should be fine ModelState.from_model(Foo).render(apps) self.assertSequenceEqual(ModelState.from_model(Foo).bases, [models.Model]) ModelState.from_model(Bar).render(apps) self.assertSequenceEqual(ModelState.from_model(Bar).bases, [models.Model]) ModelState.from_model(FooBar).render(apps) self.assertSequenceEqual( ModelState.from_model(FooBar).bases, ["migrations.foo", "migrations.bar"] ) ModelState.from_model(SubFooBar).render(apps) self.assertSequenceEqual( ModelState.from_model(SubFooBar).bases, ["migrations.foobar"] ) def test_render_project_dependencies(self): """ The ProjectState render method correctly renders models to account for inter-model base dependencies. """ new_apps = Apps() class A(models.Model): class Meta: app_label = "migrations" apps = new_apps class B(A): class Meta: app_label = "migrations" apps = new_apps class C(B): class Meta: app_label = "migrations" apps = new_apps class D(A): class Meta: app_label = "migrations" apps = new_apps class E(B): class Meta: app_label = "migrations" apps = new_apps proxy = True class F(D): class Meta: app_label = "migrations" apps = new_apps proxy = True # Make a ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) project_state.add_model(ModelState.from_model(C)) project_state.add_model(ModelState.from_model(D)) project_state.add_model(ModelState.from_model(E)) project_state.add_model(ModelState.from_model(F)) final_apps = project_state.apps self.assertEqual(len(final_apps.get_models()), 6) # Now make an invalid ProjectState and make sure it fails project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) project_state.add_model(ModelState.from_model(C)) project_state.add_model(ModelState.from_model(F)) with self.assertRaises(InvalidBasesError): project_state.apps def test_render_unique_app_labels(self): """ The ProjectState render method doesn't raise an ImproperlyConfigured exception about unique labels if two dotted app names have the same last part. """ class A(models.Model): class Meta: app_label = "django.contrib.auth" class B(models.Model): class Meta: app_label = "vendor.auth" # Make a ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) self.assertEqual(len(project_state.apps.get_models()), 2) def test_reload_related_model_on_non_relational_fields(self): """ The model is reloaded even on changes that are not involved in relations. Other models pointing to or from it are also reloaded. """ project_state = ProjectState() project_state.apps # Render project state. project_state.add_model(ModelState("migrations", "A", [])) project_state.add_model( ModelState( "migrations", "B", [ ("a", models.ForeignKey("A", models.CASCADE)), ], ) ) project_state.add_model( ModelState( "migrations", "C", [ ("b", models.ForeignKey("B", models.CASCADE)), ("name", models.TextField()), ], ) ) project_state.add_model( ModelState( "migrations", "D", [ ("a", models.ForeignKey("A", models.CASCADE)), ], ) ) operation = AlterField( model_name="C", name="name", field=models.TextField(blank=True), ) operation.state_forwards("migrations", project_state) project_state.reload_model("migrations", "a", delay=True) A = project_state.apps.get_model("migrations.A") B = project_state.apps.get_model("migrations.B") D = project_state.apps.get_model("migrations.D") self.assertIs(B._meta.get_field("a").related_model, A) self.assertIs(D._meta.get_field("a").related_model, A) def test_reload_model_relationship_consistency(self): project_state = ProjectState() project_state.add_model(ModelState("migrations", "A", [])) project_state.add_model( ModelState( "migrations", "B", [ ("a", models.ForeignKey("A", models.CASCADE)), ], ) ) project_state.add_model( ModelState( "migrations", "C", [ ("b", models.ForeignKey("B", models.CASCADE)), ], ) ) A = project_state.apps.get_model("migrations.A") B = project_state.apps.get_model("migrations.B") C = project_state.apps.get_model("migrations.C") self.assertEqual([r.related_model for r in A._meta.related_objects], [B]) self.assertEqual([r.related_model for r in B._meta.related_objects], [C]) self.assertEqual([r.related_model for r in C._meta.related_objects], []) project_state.reload_model("migrations", "a", delay=True) A = project_state.apps.get_model("migrations.A") B = project_state.apps.get_model("migrations.B") C = project_state.apps.get_model("migrations.C") self.assertEqual([r.related_model for r in A._meta.related_objects], [B]) self.assertEqual([r.related_model for r in B._meta.related_objects], [C]) self.assertEqual([r.related_model for r in C._meta.related_objects], []) def test_add_relations(self): """ #24573 - Adding relations to existing models should reload the referenced models too. """ new_apps = Apps() class A(models.Model): class Meta: app_label = "something" apps = new_apps class B(A): class Meta: app_label = "something" apps = new_apps class C(models.Model): class Meta: app_label = "something" apps = new_apps project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) project_state.add_model(ModelState.from_model(C)) project_state.apps # We need to work with rendered models old_state = project_state.clone() model_a_old = old_state.apps.get_model("something", "A") model_b_old = old_state.apps.get_model("something", "B") model_c_old = old_state.apps.get_model("something", "C") # The relations between the old models are correct self.assertIs(model_a_old._meta.get_field("b").related_model, model_b_old) self.assertIs(model_b_old._meta.get_field("a_ptr").related_model, model_a_old) operation = AddField( "c", "to_a", models.OneToOneField( "something.A", models.CASCADE, related_name="from_c", ), ) operation.state_forwards("something", project_state) model_a_new = project_state.apps.get_model("something", "A") model_b_new = project_state.apps.get_model("something", "B") model_c_new = project_state.apps.get_model("something", "C") # All models have changed self.assertIsNot(model_a_old, model_a_new) self.assertIsNot(model_b_old, model_b_new) self.assertIsNot(model_c_old, model_c_new) # The relations between the old models still hold self.assertIs(model_a_old._meta.get_field("b").related_model, model_b_old) self.assertIs(model_b_old._meta.get_field("a_ptr").related_model, model_a_old) # The relations between the new models correct self.assertIs(model_a_new._meta.get_field("b").related_model, model_b_new) self.assertIs(model_b_new._meta.get_field("a_ptr").related_model, model_a_new) self.assertIs(model_a_new._meta.get_field("from_c").related_model, model_c_new) self.assertIs(model_c_new._meta.get_field("to_a").related_model, model_a_new) def test_remove_relations(self): """ #24225 - Relations between models are updated while remaining the relations and references for models of an old state. """ new_apps = Apps() class A(models.Model): class Meta: app_label = "something" apps = new_apps class B(models.Model): to_a = models.ForeignKey(A, models.CASCADE) class Meta: app_label = "something" apps = new_apps def get_model_a(state): return [ mod for mod in state.apps.get_models() if mod._meta.model_name == "a" ][0] project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1) old_state = project_state.clone() operation = RemoveField("b", "to_a") operation.state_forwards("something", project_state) # Model from old_state still has the relation model_a_old = get_model_a(old_state) model_a_new = get_model_a(project_state) self.assertIsNot(model_a_old, model_a_new) self.assertEqual(len(model_a_old._meta.related_objects), 1) self.assertEqual(len(model_a_new._meta.related_objects), 0) # Same test for deleted model project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) old_state = project_state.clone() operation = DeleteModel("b") operation.state_forwards("something", project_state) model_a_old = get_model_a(old_state) model_a_new = get_model_a(project_state) self.assertIsNot(model_a_old, model_a_new) self.assertEqual(len(model_a_old._meta.related_objects), 1) self.assertEqual(len(model_a_new._meta.related_objects), 0) def test_self_relation(self): """ #24513 - Modifying an object pointing to itself would cause it to be rendered twice and thus breaking its related M2M through objects. """ class A(models.Model): to_a = models.ManyToManyField("something.A", symmetrical=False) class Meta: app_label = "something" def get_model_a(state): return [ mod for mod in state.apps.get_models() if mod._meta.model_name == "a" ][0] project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1) old_state = project_state.clone() operation = AlterField( model_name="a", name="to_a", field=models.ManyToManyField("something.A", symmetrical=False, blank=True), ) # At this point the model would be rendered twice causing its related # M2M through objects to point to an old copy and thus breaking their # attribute lookup. operation.state_forwards("something", project_state) model_a_old = get_model_a(old_state) model_a_new = get_model_a(project_state) self.assertIsNot(model_a_old, model_a_new) # The old model's _meta is still consistent field_to_a_old = model_a_old._meta.get_field("to_a") self.assertEqual(field_to_a_old.m2m_field_name(), "from_a") self.assertEqual(field_to_a_old.m2m_reverse_field_name(), "to_a") self.assertIs(field_to_a_old.related_model, model_a_old) self.assertIs( field_to_a_old.remote_field.through._meta.get_field("to_a").related_model, model_a_old, ) self.assertIs( field_to_a_old.remote_field.through._meta.get_field("from_a").related_model, model_a_old, ) # The new model's _meta is still consistent field_to_a_new = model_a_new._meta.get_field("to_a") self.assertEqual(field_to_a_new.m2m_field_name(), "from_a") self.assertEqual(field_to_a_new.m2m_reverse_field_name(), "to_a") self.assertIs(field_to_a_new.related_model, model_a_new) self.assertIs( field_to_a_new.remote_field.through._meta.get_field("to_a").related_model, model_a_new, ) self.assertIs( field_to_a_new.remote_field.through._meta.get_field("from_a").related_model, model_a_new, ) def test_equality(self): """ == and != are implemented correctly. """ # Test two things that should be equal project_state = ProjectState() project_state.add_model( ModelState( "migrations", "Tag", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ("hidden", models.BooleanField()), ], {}, None, ) ) project_state.apps # Fill the apps cached property other_state = project_state.clone() self.assertEqual(project_state, project_state) self.assertEqual(project_state, other_state) self.assertIs(project_state != project_state, False) self.assertIs(project_state != other_state, False) self.assertNotEqual(project_state.apps, other_state.apps) # Make a very small change (max_len 99) and see if that affects it project_state = ProjectState() project_state.add_model( ModelState( "migrations", "Tag", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=99)), ("hidden", models.BooleanField()), ], {}, None, ) ) self.assertNotEqual(project_state, other_state) self.assertIs(project_state == other_state, False) def test_dangling_references_throw_error(self): new_apps = Apps() class Author(models.Model): name = models.TextField() class Meta: app_label = "migrations" apps = new_apps class Publisher(models.Model): name = models.TextField() class Meta: app_label = "migrations" apps = new_apps class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE) publisher = models.ForeignKey(Publisher, models.CASCADE) class Meta: app_label = "migrations" apps = new_apps class Magazine(models.Model): authors = models.ManyToManyField(Author) class Meta: app_label = "migrations" apps = new_apps # Make a valid ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(Author)) project_state.add_model(ModelState.from_model(Publisher)) project_state.add_model(ModelState.from_model(Book)) project_state.add_model(ModelState.from_model(Magazine)) self.assertEqual(len(project_state.apps.get_models()), 4) # now make an invalid one with a ForeignKey project_state = ProjectState() project_state.add_model(ModelState.from_model(Book)) msg = ( "The field migrations.Book.author was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model " "'author'.\n" "The field migrations.Book.publisher was declared with a lazy reference " "to 'migrations.publisher', but app 'migrations' doesn't provide model " "'publisher'." ) with self.assertRaisesMessage(ValueError, msg): project_state.apps # And another with ManyToManyField. project_state = ProjectState() project_state.add_model(ModelState.from_model(Magazine)) msg = ( "The field migrations.Magazine.authors was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model " "'author'.\n" "The field migrations.Magazine_authors.author was declared with a lazy " "reference to 'migrations.author', but app 'migrations' doesn't provide " "model 'author'." ) with self.assertRaisesMessage(ValueError, msg): project_state.apps # And now with multiple models and multiple fields. project_state.add_model(ModelState.from_model(Book)) msg = ( "The field migrations.Book.author was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model " "'author'.\n" "The field migrations.Book.publisher was declared with a lazy reference " "to 'migrations.publisher', but app 'migrations' doesn't provide model " "'publisher'.\n" "The field migrations.Magazine.authors was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model " "'author'.\n" "The field migrations.Magazine_authors.author was declared with a lazy " "reference to 'migrations.author', but app 'migrations' doesn't provide " "model 'author'." ) with self.assertRaisesMessage(ValueError, msg): project_state.apps def test_reference_mixed_case_app_label(self): new_apps = Apps() class Author(models.Model): class Meta: app_label = "MiXedCase_migrations" apps = new_apps class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE) class Meta: app_label = "MiXedCase_migrations" apps = new_apps class Magazine(models.Model): authors = models.ManyToManyField(Author) class Meta: app_label = "MiXedCase_migrations" apps = new_apps project_state = ProjectState() project_state.add_model(ModelState.from_model(Author)) project_state.add_model(ModelState.from_model(Book)) project_state.add_model(ModelState.from_model(Magazine)) self.assertEqual(len(project_state.apps.get_models()), 3) def test_real_apps(self): """ Including real apps can resolve dangling FK errors. This test relies on the fact that contenttypes is always loaded. """ new_apps = Apps() class TestModel(models.Model): ct = models.ForeignKey("contenttypes.ContentType", models.CASCADE) class Meta: app_label = "migrations" apps = new_apps # If we just stick it into an empty state it should fail project_state = ProjectState() project_state.add_model(ModelState.from_model(TestModel)) with self.assertRaises(ValueError): project_state.apps # If we include the real app it should succeed project_state = ProjectState(real_apps={"contenttypes"}) project_state.add_model(ModelState.from_model(TestModel)) rendered_state = project_state.apps self.assertEqual( len( [ x for x in rendered_state.get_models() if x._meta.app_label == "migrations" ] ), 1, ) def test_real_apps_non_set(self): with self.assertRaises(AssertionError): ProjectState(real_apps=["contenttypes"]) def test_ignore_order_wrt(self): """ Makes sure ProjectState doesn't include OrderWrt fields when making from existing models. """ new_apps = Apps() class Author(models.Model): name = models.TextField() class Meta: app_label = "migrations" apps = new_apps class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE) class Meta: app_label = "migrations" apps = new_apps order_with_respect_to = "author" # Make a valid ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(Author)) project_state.add_model(ModelState.from_model(Book)) self.assertEqual( list(project_state.models["migrations", "book"].fields), ["id", "author"], ) def test_modelstate_get_field_order_wrt(self): new_apps = Apps() class Author(models.Model): name = models.TextField() class Meta: app_label = "migrations" apps = new_apps class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE) class Meta: app_label = "migrations" apps = new_apps order_with_respect_to = "author" model_state = ModelState.from_model(Book) order_wrt_field = model_state.get_field("_order") self.assertIsInstance(order_wrt_field, models.ForeignKey) self.assertEqual(order_wrt_field.related_model, "migrations.author") def test_modelstate_get_field_no_order_wrt_order_field(self): new_apps = Apps() class HistoricalRecord(models.Model): _order = models.PositiveSmallIntegerField() class Meta: app_label = "migrations" apps = new_apps model_state = ModelState.from_model(HistoricalRecord) order_field = model_state.get_field("_order") self.assertIsNone(order_field.related_model) self.assertIsInstance(order_field, models.PositiveSmallIntegerField) def test_manager_refer_correct_model_version(self): """ #24147 - Managers refer to the correct version of a historical model """ project_state = ProjectState() project_state.add_model( ModelState( app_label="migrations", name="Tag", fields=[ ("id", models.AutoField(primary_key=True)), ("hidden", models.BooleanField()), ], managers=[ ("food_mgr", FoodManager("a", "b")), ("food_qs", FoodQuerySet.as_manager()), ], ) ) old_model = project_state.apps.get_model("migrations", "tag") new_state = project_state.clone() operation = RemoveField("tag", "hidden") operation.state_forwards("migrations", new_state) new_model = new_state.apps.get_model("migrations", "tag") self.assertIsNot(old_model, new_model) self.assertIs(old_model, old_model.food_mgr.model) self.assertIs(old_model, old_model.food_qs.model) self.assertIs(new_model, new_model.food_mgr.model) self.assertIs(new_model, new_model.food_qs.model) self.assertIsNot(old_model.food_mgr, new_model.food_mgr) self.assertIsNot(old_model.food_qs, new_model.food_qs) self.assertIsNot(old_model.food_mgr.model, new_model.food_mgr.model) self.assertIsNot(old_model.food_qs.model, new_model.food_qs.model) def test_choices_iterator(self): """ #24483 - ProjectState.from_apps should not destructively consume Field.choices iterators. """ new_apps = Apps(["migrations"]) choices = [("a", "A"), ("b", "B")] class Author(models.Model): name = models.CharField(max_length=255) choice = models.CharField(max_length=255, choices=iter(choices)) class Meta: app_label = "migrations" apps = new_apps ProjectState.from_apps(new_apps) choices_field = Author._meta.get_field("choice") self.assertEqual(list(choices_field.choices), choices) class StateRelationsTests(SimpleTestCase): def get_base_project_state(self): new_apps = Apps() class User(models.Model): class Meta: app_label = "tests" apps = new_apps class Comment(models.Model): text = models.TextField() user = models.ForeignKey(User, models.CASCADE) comments = models.ManyToManyField("self") class Meta: app_label = "tests" apps = new_apps class Post(models.Model): text = models.TextField() authors = models.ManyToManyField(User) class Meta: app_label = "tests" apps = new_apps project_state = ProjectState() project_state.add_model(ModelState.from_model(User)) project_state.add_model(ModelState.from_model(Comment)) project_state.add_model(ModelState.from_model(Post)) return project_state def test_relations_population(self): tests = [ ( "add_model", [ ModelState( app_label="migrations", name="Tag", fields=[("id", models.AutoField(primary_key=True))], ), ], ), ("remove_model", ["tests", "comment"]), ("rename_model", ["tests", "comment", "opinion"]), ( "add_field", [ "tests", "post", "next_post", models.ForeignKey("self", models.CASCADE), True, ], ), ("remove_field", ["tests", "post", "text"]), ("rename_field", ["tests", "comment", "user", "author"]), ( "alter_field", [ "tests", "comment", "user", models.IntegerField(), True, ], ), ] for method, args in tests: with self.subTest(method=method): project_state = self.get_base_project_state() getattr(project_state, method)(*args) # ProjectState's `_relations` are populated on `relations` access. self.assertIsNone(project_state._relations) self.assertEqual(project_state.relations, project_state._relations) self.assertIsNotNone(project_state._relations) def test_add_model(self): project_state = self.get_base_project_state() self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) self.assertEqual( list(project_state.relations["tests", "comment"]), [("tests", "comment")], ) self.assertNotIn(("tests", "post"), project_state.relations) def test_add_model_no_relations(self): project_state = ProjectState() project_state.add_model( ModelState( app_label="migrations", name="Tag", fields=[("id", models.AutoField(primary_key=True))], ) ) self.assertEqual(project_state.relations, {}) def test_add_model_other_app(self): project_state = self.get_base_project_state() self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) project_state.add_model( ModelState( app_label="tests_other", name="comment", fields=[ ("id", models.AutoField(primary_key=True)), ("user", models.ForeignKey("tests.user", models.CASCADE)), ], ) ) self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post"), ("tests_other", "comment")], ) def test_remove_model(self): project_state = self.get_base_project_state() self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) self.assertEqual( list(project_state.relations["tests", "comment"]), [("tests", "comment")], ) project_state.remove_model("tests", "comment") self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "post")], ) self.assertNotIn(("tests", "comment"), project_state.relations) project_state.remove_model("tests", "post") self.assertEqual(project_state.relations, {}) project_state.remove_model("tests", "user") self.assertEqual(project_state.relations, {}) def test_rename_model(self): project_state = self.get_base_project_state() self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) self.assertEqual( list(project_state.relations["tests", "comment"]), [("tests", "comment")], ) related_field = project_state.relations["tests", "user"]["tests", "comment"] project_state.rename_model("tests", "comment", "opinion") self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "post"), ("tests", "opinion")], ) self.assertEqual( list(project_state.relations["tests", "opinion"]), [("tests", "opinion")], ) self.assertNotIn(("tests", "comment"), project_state.relations) self.assertEqual( project_state.relations["tests", "user"]["tests", "opinion"], related_field, ) project_state.rename_model("tests", "user", "author") self.assertEqual( list(project_state.relations["tests", "author"]), [("tests", "post"), ("tests", "opinion")], ) self.assertNotIn(("tests", "user"), project_state.relations) def test_rename_model_no_relations(self): project_state = self.get_base_project_state() self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) related_field = project_state.relations["tests", "user"]["tests", "post"] self.assertNotIn(("tests", "post"), project_state.relations) # Rename a model without relations. project_state.rename_model("tests", "post", "blog") self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "blog")], ) self.assertNotIn(("tests", "blog"), project_state.relations) self.assertEqual( related_field, project_state.relations["tests", "user"]["tests", "blog"], ) def test_add_field(self): project_state = self.get_base_project_state() self.assertNotIn(("tests", "post"), project_state.relations) # Add a self-referential foreign key. new_field = models.ForeignKey("self", models.CASCADE) project_state.add_field( "tests", "post", "next_post", new_field, preserve_default=True, ) self.assertEqual( list(project_state.relations["tests", "post"]), [("tests", "post")], ) self.assertEqual( project_state.relations["tests", "post"]["tests", "post"], {"next_post": new_field}, ) # Add a foreign key. new_field = models.ForeignKey("tests.post", models.CASCADE) project_state.add_field( "tests", "comment", "post", new_field, preserve_default=True, ) self.assertEqual( list(project_state.relations["tests", "post"]), [("tests", "post"), ("tests", "comment")], ) self.assertEqual( project_state.relations["tests", "post"]["tests", "comment"], {"post": new_field}, ) def test_add_field_m2m_with_through(self): project_state = self.get_base_project_state() project_state.add_model( ModelState( app_label="tests", name="Tag", fields=[("id", models.AutoField(primary_key=True))], ) ) project_state.add_model( ModelState( app_label="tests", name="PostTag", fields=[ ("id", models.AutoField(primary_key=True)), ("post", models.ForeignKey("tests.post", models.CASCADE)), ("tag", models.ForeignKey("tests.tag", models.CASCADE)), ], ) ) self.assertEqual( list(project_state.relations["tests", "post"]), [("tests", "posttag")], ) self.assertEqual( list(project_state.relations["tests", "tag"]), [("tests", "posttag")], ) # Add a many-to-many field with the through model. new_field = models.ManyToManyField("tests.tag", through="tests.posttag") project_state.add_field( "tests", "post", "tags", new_field, preserve_default=True, ) self.assertEqual( list(project_state.relations["tests", "post"]), [("tests", "posttag")], ) self.assertEqual( list(project_state.relations["tests", "tag"]), [("tests", "posttag"), ("tests", "post")], ) self.assertEqual( project_state.relations["tests", "tag"]["tests", "post"], {"tags": new_field}, ) def test_remove_field(self): project_state = self.get_base_project_state() self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) # Remove a many-to-many field. project_state.remove_field("tests", "post", "authors") self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment")], ) # Remove a foreign key. project_state.remove_field("tests", "comment", "user") self.assertEqual(project_state.relations["tests", "user"], {}) def test_remove_field_no_relations(self): project_state = self.get_base_project_state() self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) # Remove a non-relation field. project_state.remove_field("tests", "post", "text") self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) def test_rename_field(self): project_state = self.get_base_project_state() field = project_state.models["tests", "comment"].fields["user"] self.assertEqual( project_state.relations["tests", "user"]["tests", "comment"], {"user": field}, ) project_state.rename_field("tests", "comment", "user", "author") renamed_field = project_state.models["tests", "comment"].fields["author"] self.assertEqual( project_state.relations["tests", "user"]["tests", "comment"], {"author": renamed_field}, ) self.assertEqual(field, renamed_field) def test_rename_field_no_relations(self): project_state = self.get_base_project_state() self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) # Rename a non-relation field. project_state.rename_field("tests", "post", "text", "description") self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) def test_alter_field(self): project_state = self.get_base_project_state() self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) # Alter a foreign key to a non-relation field. project_state.alter_field( "tests", "comment", "user", models.IntegerField(), preserve_default=True, ) self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "post")], ) # Alter a non-relation field to a many-to-many field. m2m_field = models.ManyToManyField("tests.user") project_state.alter_field( "tests", "comment", "user", m2m_field, preserve_default=True, ) self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "post"), ("tests", "comment")], ) self.assertEqual( project_state.relations["tests", "user"]["tests", "comment"], {"user": m2m_field}, ) def test_alter_field_m2m_to_fk(self): project_state = self.get_base_project_state() project_state.add_model( ModelState( app_label="tests_other", name="user_other", fields=[("id", models.AutoField(primary_key=True))], ) ) self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) self.assertNotIn(("tests_other", "user_other"), project_state.relations) # Alter a many-to-many field to a foreign key. foreign_key = models.ForeignKey("tests_other.user_other", models.CASCADE) project_state.alter_field( "tests", "post", "authors", foreign_key, preserve_default=True, ) self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment")], ) self.assertEqual( list(project_state.relations["tests_other", "user_other"]), [("tests", "post")], ) self.assertEqual( project_state.relations["tests_other", "user_other"]["tests", "post"], {"authors": foreign_key}, ) def test_many_relations_to_same_model(self): project_state = self.get_base_project_state() new_field = models.ForeignKey("tests.user", models.CASCADE) project_state.add_field( "tests", "comment", "reviewer", new_field, preserve_default=True, ) self.assertEqual( list(project_state.relations["tests", "user"]), [("tests", "comment"), ("tests", "post")], ) comment_rels = project_state.relations["tests", "user"]["tests", "comment"] # Two foreign keys to the same model. self.assertEqual(len(comment_rels), 2) self.assertEqual(comment_rels["reviewer"], new_field) # Rename the second foreign key. project_state.rename_field("tests", "comment", "reviewer", "supervisor") self.assertEqual(len(comment_rels), 2) self.assertEqual(comment_rels["supervisor"], new_field) # Remove the first foreign key. project_state.remove_field("tests", "comment", "user") self.assertEqual(comment_rels, {"supervisor": new_field}) class ModelStateTests(SimpleTestCase): def test_custom_model_base(self): state = ModelState.from_model(ModelWithCustomBase) self.assertEqual(state.bases, (models.Model,)) def test_bound_field_sanity_check(self): field = models.CharField(max_length=1) field.model = models.Model with self.assertRaisesMessage( ValueError, 'ModelState.fields cannot be bound to a model - "field" is.' ): ModelState("app", "Model", [("field", field)]) def test_sanity_check_to(self): field = models.ForeignKey(UnicodeModel, models.CASCADE) with self.assertRaisesMessage( ValueError, 'ModelState.fields cannot refer to a model class - "field.to" does. ' "Use a string reference instead.", ): ModelState("app", "Model", [("field", field)]) def test_sanity_check_through(self): field = models.ManyToManyField("UnicodeModel") field.remote_field.through = UnicodeModel with self.assertRaisesMessage( ValueError, 'ModelState.fields cannot refer to a model class - "field.through" does. ' "Use a string reference instead.", ): ModelState("app", "Model", [("field", field)]) def test_sanity_index_name(self): field = models.IntegerField() options = {"indexes": [models.Index(fields=["field"])]} msg = ( "Indexes passed to ModelState require a name attribute. <Index: " "fields=['field']> doesn't have one." ) with self.assertRaisesMessage(ValueError, msg): ModelState("app", "Model", [("field", field)], options=options) def test_fields_immutability(self): """ Rendering a model state doesn't alter its internal fields. """ apps = Apps() field = models.CharField(max_length=1) state = ModelState("app", "Model", [("name", field)]) Model = state.render(apps) self.assertNotEqual(Model._meta.get_field("name"), field) def test_repr(self): field = models.CharField(max_length=1) state = ModelState( "app", "Model", [("name", field)], bases=["app.A", "app.B", "app.C"] ) self.assertEqual(repr(state), "<ModelState: 'app.Model'>") project_state = ProjectState() project_state.add_model(state) with self.assertRaisesMessage( InvalidBasesError, "Cannot resolve bases for [<ModelState: 'app.Model'>]" ): project_state.apps def test_fields_ordering_equality(self): state = ModelState( "migrations", "Tag", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ("hidden", models.BooleanField()), ], ) reordered_state = ModelState( "migrations", "Tag", [ ("id", models.AutoField(primary_key=True)), # Purposely re-ordered. ("hidden", models.BooleanField()), ("name", models.CharField(max_length=100)), ], ) self.assertEqual(state, reordered_state) @override_settings(TEST_SWAPPABLE_MODEL="migrations.SomeFakeModel") def test_create_swappable(self): """ Tests making a ProjectState from an Apps with a swappable model """ new_apps = Apps(["migrations"]) class Author(models.Model): name = models.CharField(max_length=255) bio = models.TextField() age = models.IntegerField(blank=True, null=True) class Meta: app_label = "migrations" apps = new_apps swappable = "TEST_SWAPPABLE_MODEL" author_state = ModelState.from_model(Author) self.assertEqual(author_state.app_label, "migrations") self.assertEqual(author_state.name, "Author") self.assertEqual(list(author_state.fields), ["id", "name", "bio", "age"]) self.assertEqual(author_state.fields["name"].max_length, 255) self.assertIs(author_state.fields["bio"].null, False) self.assertIs(author_state.fields["age"].null, True) self.assertEqual( author_state.options, {"swappable": "TEST_SWAPPABLE_MODEL", "indexes": [], "constraints": []}, ) self.assertEqual(author_state.bases, (models.Model,)) self.assertEqual(author_state.managers, []) @override_settings(TEST_SWAPPABLE_MODEL="migrations.SomeFakeModel") def test_create_swappable_from_abstract(self): """ A swappable model inheriting from a hierarchy: concrete -> abstract -> concrete. """ new_apps = Apps(["migrations"]) class SearchableLocation(models.Model): keywords = models.CharField(max_length=256) class Meta: app_label = "migrations" apps = new_apps class Station(SearchableLocation): name = models.CharField(max_length=128) class Meta: abstract = True class BusStation(Station): bus_routes = models.CharField(max_length=128) inbound = models.BooleanField(default=False) class Meta(Station.Meta): app_label = "migrations" apps = new_apps swappable = "TEST_SWAPPABLE_MODEL" station_state = ModelState.from_model(BusStation) self.assertEqual(station_state.app_label, "migrations") self.assertEqual(station_state.name, "BusStation") self.assertEqual( list(station_state.fields), ["searchablelocation_ptr", "name", "bus_routes", "inbound"], ) self.assertEqual(station_state.fields["name"].max_length, 128) self.assertIs(station_state.fields["bus_routes"].null, False) self.assertEqual( station_state.options, { "abstract": False, "swappable": "TEST_SWAPPABLE_MODEL", "indexes": [], "constraints": [], }, ) self.assertEqual(station_state.bases, ("migrations.searchablelocation",)) self.assertEqual(station_state.managers, []) @override_settings(TEST_SWAPPABLE_MODEL="migrations.SomeFakeModel") def test_custom_manager_swappable(self): """ Tests making a ProjectState from unused models with custom managers """ new_apps = Apps(["migrations"]) class Food(models.Model): food_mgr = FoodManager("a", "b") food_qs = FoodQuerySet.as_manager() food_no_mgr = NoMigrationFoodManager("x", "y") class Meta: app_label = "migrations" apps = new_apps swappable = "TEST_SWAPPABLE_MODEL" food_state = ModelState.from_model(Food) # The default manager is used in migrations self.assertEqual([name for name, mgr in food_state.managers], ["food_mgr"]) self.assertEqual(food_state.managers[0][1].args, ("a", "b", 1, 2)) @isolate_apps("migrations", "django.contrib.contenttypes") def test_order_with_respect_to_private_field(self): class PrivateFieldModel(models.Model): content_type = models.ForeignKey("contenttypes.ContentType", models.CASCADE) object_id = models.PositiveIntegerField() private = GenericForeignKey() class Meta: order_with_respect_to = "private" state = ModelState.from_model(PrivateFieldModel) self.assertNotIn("order_with_respect_to", state.options) @isolate_apps("migrations") def test_abstract_model_children_inherit_indexes(self): class Abstract(models.Model): name = models.CharField(max_length=50) class Meta: app_label = "migrations" abstract = True indexes = [models.Index(fields=["name"])] class Child1(Abstract): pass class Child2(Abstract): pass child1_state = ModelState.from_model(Child1) child2_state = ModelState.from_model(Child2) index_names = [index.name for index in child1_state.options["indexes"]] self.assertEqual(index_names, ["migrations__name_b0afd7_idx"]) index_names = [index.name for index in child2_state.options["indexes"]] self.assertEqual(index_names, ["migrations__name_016466_idx"]) # Modifying the state doesn't modify the index on the model. child1_state.options["indexes"][0].name = "bar" self.assertEqual(Child1._meta.indexes[0].name, "migrations__name_b0afd7_idx") @isolate_apps("migrations") def test_explicit_index_name(self): class TestModel(models.Model): name = models.CharField(max_length=50) class Meta: app_label = "migrations" indexes = [models.Index(fields=["name"], name="foo_idx")] model_state = ModelState.from_model(TestModel) index_names = [index.name for index in model_state.options["indexes"]] self.assertEqual(index_names, ["foo_idx"]) @isolate_apps("migrations") def test_from_model_constraints(self): class ModelWithConstraints(models.Model): size = models.IntegerField() class Meta: constraints = [ models.CheckConstraint(check=models.Q(size__gt=1), name="size_gt_1") ] state = ModelState.from_model(ModelWithConstraints) model_constraints = ModelWithConstraints._meta.constraints state_constraints = state.options["constraints"] self.assertEqual(model_constraints, state_constraints) self.assertIsNot(model_constraints, state_constraints) self.assertIsNot(model_constraints[0], state_constraints[0]) class RelatedModelsTests(SimpleTestCase): def setUp(self): self.apps = Apps(["migrations.related_models_app"]) def create_model( self, name, foreign_keys=[], bases=(), abstract=False, proxy=False ): test_name = "related_models_app" assert not (abstract and proxy) meta_contents = { "abstract": abstract, "app_label": test_name, "apps": self.apps, "proxy": proxy, } meta = type("Meta", (), meta_contents) if not bases: bases = (models.Model,) body = { "Meta": meta, "__module__": "__fake__", } fname_base = fname = "%s_%%d" % name.lower() for i, fk in enumerate(foreign_keys, 1): fname = fname_base % i body[fname] = fk return type(name, bases, body) def assertRelated(self, model, needle): self.assertEqual( get_related_models_recursive(model), {(n._meta.app_label, n._meta.model_name) for n in needle}, ) def test_unrelated(self): A = self.create_model("A") B = self.create_model("B") self.assertRelated(A, []) self.assertRelated(B, []) def test_direct_fk(self): A = self.create_model( "A", foreign_keys=[models.ForeignKey("B", models.CASCADE)] ) B = self.create_model("B") self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_direct_hidden_fk(self): A = self.create_model( "A", foreign_keys=[models.ForeignKey("B", models.CASCADE, related_name="+")] ) B = self.create_model("B") self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_fk_through_proxy(self): A = self.create_model("A") B = self.create_model("B", bases=(A,), proxy=True) C = self.create_model("C", bases=(B,), proxy=True) D = self.create_model( "D", foreign_keys=[models.ForeignKey("C", models.CASCADE)] ) self.assertRelated(A, [B, C, D]) self.assertRelated(B, [A, C, D]) self.assertRelated(C, [A, B, D]) self.assertRelated(D, [A, B, C]) def test_nested_fk(self): A = self.create_model( "A", foreign_keys=[models.ForeignKey("B", models.CASCADE)] ) B = self.create_model( "B", foreign_keys=[models.ForeignKey("C", models.CASCADE)] ) C = self.create_model("C") self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_two_sided(self): A = self.create_model( "A", foreign_keys=[models.ForeignKey("B", models.CASCADE)] ) B = self.create_model( "B", foreign_keys=[models.ForeignKey("A", models.CASCADE)] ) self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_circle(self): A = self.create_model( "A", foreign_keys=[models.ForeignKey("B", models.CASCADE)] ) B = self.create_model( "B", foreign_keys=[models.ForeignKey("C", models.CASCADE)] ) C = self.create_model( "C", foreign_keys=[models.ForeignKey("A", models.CASCADE)] ) self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,)) self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_nested_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,)) C = self.create_model("C", bases=(B,)) self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_multiple_bases(self): A = self.create_model("A") B = self.create_model("B") C = self.create_model( "C", bases=( A, B, ), ) self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_multiple_nested_bases(self): A = self.create_model("A") B = self.create_model("B") C = self.create_model( "C", bases=( A, B, ), ) D = self.create_model("D") E = self.create_model("E", bases=(D,)) F = self.create_model( "F", bases=( C, E, ), ) Y = self.create_model("Y") Z = self.create_model("Z", bases=(Y,)) self.assertRelated(A, [B, C, D, E, F]) self.assertRelated(B, [A, C, D, E, F]) self.assertRelated(C, [A, B, D, E, F]) self.assertRelated(D, [A, B, C, E, F]) self.assertRelated(E, [A, B, C, D, F]) self.assertRelated(F, [A, B, C, D, E]) self.assertRelated(Y, [Z]) self.assertRelated(Z, [Y]) def test_base_to_base_fk(self): A = self.create_model( "A", foreign_keys=[models.ForeignKey("Y", models.CASCADE)] ) B = self.create_model("B", bases=(A,)) Y = self.create_model("Y") Z = self.create_model("Z", bases=(Y,)) self.assertRelated(A, [B, Y, Z]) self.assertRelated(B, [A, Y, Z]) self.assertRelated(Y, [A, B, Z]) self.assertRelated(Z, [A, B, Y]) def test_base_to_subclass_fk(self): A = self.create_model( "A", foreign_keys=[models.ForeignKey("Z", models.CASCADE)] ) B = self.create_model("B", bases=(A,)) Y = self.create_model("Y") Z = self.create_model("Z", bases=(Y,)) self.assertRelated(A, [B, Y, Z]) self.assertRelated(B, [A, Y, Z]) self.assertRelated(Y, [A, B, Z]) self.assertRelated(Z, [A, B, Y]) def test_direct_m2m(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField("B")]) B = self.create_model("B") self.assertRelated(A, [A.a_1.rel.through, B]) self.assertRelated(B, [A, A.a_1.rel.through]) def test_direct_m2m_self(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField("A")]) self.assertRelated(A, [A.a_1.rel.through]) def test_intermediate_m2m_self(self): A = self.create_model( "A", foreign_keys=[models.ManyToManyField("A", through="T")] ) T = self.create_model( "T", foreign_keys=[ models.ForeignKey("A", models.CASCADE), models.ForeignKey("A", models.CASCADE), ], ) self.assertRelated(A, [T]) self.assertRelated(T, [A]) def test_intermediate_m2m(self): A = self.create_model( "A", foreign_keys=[models.ManyToManyField("B", through="T")] ) B = self.create_model("B") T = self.create_model( "T", foreign_keys=[ models.ForeignKey("A", models.CASCADE), models.ForeignKey("B", models.CASCADE), ], ) self.assertRelated(A, [B, T]) self.assertRelated(B, [A, T]) self.assertRelated(T, [A, B]) def test_intermediate_m2m_extern_fk(self): A = self.create_model( "A", foreign_keys=[models.ManyToManyField("B", through="T")] ) B = self.create_model("B") Z = self.create_model("Z") T = self.create_model( "T", foreign_keys=[ models.ForeignKey("A", models.CASCADE), models.ForeignKey("B", models.CASCADE), models.ForeignKey("Z", models.CASCADE), ], ) self.assertRelated(A, [B, T, Z]) self.assertRelated(B, [A, T, Z]) self.assertRelated(T, [A, B, Z]) self.assertRelated(Z, [A, B, T]) def test_intermediate_m2m_base(self): A = self.create_model( "A", foreign_keys=[models.ManyToManyField("B", through="T")] ) B = self.create_model("B") S = self.create_model("S") T = self.create_model( "T", foreign_keys=[ models.ForeignKey("A", models.CASCADE), models.ForeignKey("B", models.CASCADE), ], bases=(S,), ) self.assertRelated(A, [B, S, T]) self.assertRelated(B, [A, S, T]) self.assertRelated(S, [A, B, T]) self.assertRelated(T, [A, B, S]) def test_generic_fk(self): A = self.create_model( "A", foreign_keys=[ models.ForeignKey("B", models.CASCADE), GenericForeignKey(), ], ) B = self.create_model( "B", foreign_keys=[ models.ForeignKey("C", models.CASCADE), ], ) self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_abstract_base(self): A = self.create_model("A", abstract=True) B = self.create_model("B", bases=(A,)) self.assertRelated(A, [B]) self.assertRelated(B, []) def test_nested_abstract_base(self): A = self.create_model("A", abstract=True) B = self.create_model("B", bases=(A,), abstract=True) C = self.create_model("C", bases=(B,)) self.assertRelated(A, [B, C]) self.assertRelated(B, [C]) self.assertRelated(C, []) def test_proxy_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,), proxy=True) self.assertRelated(A, [B]) self.assertRelated(B, []) def test_nested_proxy_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,), proxy=True) C = self.create_model("C", bases=(B,), proxy=True) self.assertRelated(A, [B, C]) self.assertRelated(B, [C]) self.assertRelated(C, []) def test_multiple_mixed_bases(self): A = self.create_model("A", abstract=True) M = self.create_model("M") P = self.create_model("P") Q = self.create_model("Q", bases=(P,), proxy=True) Z = self.create_model("Z", bases=(A, M, Q)) # M has a pointer O2O field p_ptr to P self.assertRelated(A, [M, P, Q, Z]) self.assertRelated(M, [P, Q, Z]) self.assertRelated(P, [M, Q, Z]) self.assertRelated(Q, [M, P, Z]) self.assertRelated(Z, [M, P, Q])
137ba40d94b3d6fb6cb06b26bd5e882960dc65cf507187bf27abb89f3ccb8310
import datetime import math import re from decimal import Decimal from django.core.exceptions import FieldError from django.db import connection from django.db.models import ( Avg, Case, Count, DateField, DateTimeField, DecimalField, DurationField, Exists, F, FloatField, IntegerField, Max, Min, OuterRef, Q, StdDev, Subquery, Sum, TimeField, Value, Variance, When, ) from django.db.models.expressions import Func, RawSQL from django.db.models.functions import ( Cast, Coalesce, Greatest, Now, Pi, TruncDate, TruncHour, ) from django.test import TestCase from django.test.testcases import skipUnlessDBFeature from django.test.utils import Approximate, CaptureQueriesContext from django.utils import timezone from .models import Author, Book, Publisher, Store class NowUTC(Now): template = "CURRENT_TIMESTAMP" output_field = DateTimeField() def as_sql(self, compiler, connection, **extra_context): if connection.features.test_now_utc_template: extra_context["template"] = connection.features.test_now_utc_template return super().as_sql(compiler, connection, **extra_context) class AggregateTestCase(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34) cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35) cls.a3 = Author.objects.create(name="Brad Dayley", age=45) cls.a4 = Author.objects.create(name="James Bennett", age=29) cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37) cls.a6 = Author.objects.create(name="Paul Bissex", age=29) cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25) cls.a8 = Author.objects.create(name="Peter Norvig", age=57) cls.a9 = Author.objects.create(name="Stuart Russell", age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create( name="Apress", num_awards=3, duration=datetime.timedelta(days=1) ) cls.p2 = Publisher.objects.create( name="Sams", num_awards=1, duration=datetime.timedelta(days=2) ) cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7) cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn="159059725", name="The Definitive Guide to Django: Web Development Done Right", pages=447, rating=4.5, price=Decimal("30.00"), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6), ) cls.b2 = Book.objects.create( isbn="067232959", name="Sams Teach Yourself Django in 24 Hours", pages=528, rating=3.0, price=Decimal("23.09"), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3), ) cls.b3 = Book.objects.create( isbn="159059996", name="Practical Django Projects", pages=300, rating=4.0, price=Decimal("29.69"), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23), ) cls.b4 = Book.objects.create( isbn="013235613", name="Python Web Development with Django", pages=350, rating=4.0, price=Decimal("29.69"), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3), ) cls.b5 = Book.objects.create( isbn="013790395", name="Artificial Intelligence: A Modern Approach", pages=1132, rating=4.0, price=Decimal("82.80"), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15), ) cls.b6 = Book.objects.create( isbn="155860191", name=( "Paradigms of Artificial Intelligence Programming: Case Studies in " "Common Lisp" ), pages=946, rating=5.0, price=Decimal("75.00"), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15), ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name="Amazon.com", original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59), ) s2 = Store.objects.create( name="Books.com", original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59), ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30), ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def test_empty_aggregate(self): self.assertEqual(Author.objects.aggregate(), {}) def test_aggregate_in_order_by(self): msg = ( "Using an aggregate in order_by() without also including it in " "annotate() is not allowed: Avg(F(book__rating)" ) with self.assertRaisesMessage(FieldError, msg): Author.objects.values("age").order_by(Avg("book__rating")) def test_single_aggregate(self): vals = Author.objects.aggregate(Avg("age")) self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)}) def test_multiple_aggregates(self): vals = Author.objects.aggregate(Sum("age"), Avg("age")) self.assertEqual( vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)} ) def test_filter_aggregate(self): vals = Author.objects.filter(age__gt=29).aggregate(Sum("age")) self.assertEqual(vals, {"age__sum": 254}) def test_related_aggregate(self): vals = Author.objects.aggregate(Avg("friends__age")) self.assertEqual(vals, {"friends__age__avg": Approximate(34.07, places=2)}) vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age")) self.assertEqual(vals, {"authors__age__avg": Approximate(38.2857, places=2)}) vals = Author.objects.filter(name__contains="a").aggregate(Avg("book__rating")) self.assertEqual(vals, {"book__rating__avg": 4.0}) vals = Book.objects.aggregate(Sum("publisher__num_awards")) self.assertEqual(vals, {"publisher__num_awards__sum": 30}) vals = Publisher.objects.aggregate(Sum("book__price")) self.assertEqual(vals, {"book__price__sum": Decimal("270.27")}) def test_aggregate_multi_join(self): vals = Store.objects.aggregate(Max("books__authors__age")) self.assertEqual(vals, {"books__authors__age__max": 57}) vals = Author.objects.aggregate(Min("book__publisher__num_awards")) self.assertEqual(vals, {"book__publisher__num_awards__min": 1}) def test_aggregate_alias(self): vals = Store.objects.filter(name="Amazon.com").aggregate( amazon_mean=Avg("books__rating") ) self.assertEqual(vals, {"amazon_mean": Approximate(4.08, places=2)}) def test_aggregate_transform(self): vals = Store.objects.aggregate(min_month=Min("original_opening__month")) self.assertEqual(vals, {"min_month": 3}) def test_aggregate_join_transform(self): vals = Publisher.objects.aggregate(min_year=Min("book__pubdate__year")) self.assertEqual(vals, {"min_year": 1991}) def test_annotate_basic(self): self.assertQuerysetEqual( Book.objects.annotate().order_by("pk"), [ "The Definitive Guide to Django: Web Development Done Right", "Sams Teach Yourself Django in 24 Hours", "Practical Django Projects", "Python Web Development with Django", "Artificial Intelligence: A Modern Approach", "Paradigms of Artificial Intelligence Programming: Case Studies in " "Common Lisp", ], lambda b: b.name, ) books = Book.objects.annotate(mean_age=Avg("authors__age")) b = books.get(pk=self.b1.pk) self.assertEqual( b.name, "The Definitive Guide to Django: Web Development Done Right" ) self.assertEqual(b.mean_age, 34.5) def test_annotate_defer(self): qs = ( Book.objects.annotate(page_sum=Sum("pages")) .defer("name") .filter(pk=self.b1.pk) ) rows = [ ( self.b1.id, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right", ) ] self.assertQuerysetEqual( qs.order_by("pk"), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name) ) def test_annotate_defer_select_related(self): qs = ( Book.objects.select_related("contact") .annotate(page_sum=Sum("pages")) .defer("name") .filter(pk=self.b1.pk) ) rows = [ ( self.b1.id, "159059725", 447, "Adrian Holovaty", "The Definitive Guide to Django: Web Development Done Right", ) ] self.assertQuerysetEqual( qs.order_by("pk"), rows, lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name), ) def test_annotate_m2m(self): books = ( Book.objects.filter(rating__lt=4.5) .annotate(Avg("authors__age")) .order_by("name") ) self.assertQuerysetEqual( books, [ ("Artificial Intelligence: A Modern Approach", 51.5), ("Practical Django Projects", 29.0), ("Python Web Development with Django", Approximate(30.3, places=1)), ("Sams Teach Yourself Django in 24 Hours", 45.0), ], lambda b: (b.name, b.authors__age__avg), ) books = Book.objects.annotate(num_authors=Count("authors")).order_by("name") self.assertQuerysetEqual( books, [ ("Artificial Intelligence: A Modern Approach", 2), ( "Paradigms of Artificial Intelligence Programming: Case Studies in " "Common Lisp", 1, ), ("Practical Django Projects", 1), ("Python Web Development with Django", 3), ("Sams Teach Yourself Django in 24 Hours", 1), ("The Definitive Guide to Django: Web Development Done Right", 2), ], lambda b: (b.name, b.num_authors), ) def test_backwards_m2m_annotate(self): authors = ( Author.objects.filter(name__contains="a") .annotate(Avg("book__rating")) .order_by("name") ) self.assertQuerysetEqual( authors, [ ("Adrian Holovaty", 4.5), ("Brad Dayley", 3.0), ("Jacob Kaplan-Moss", 4.5), ("James Bennett", 4.0), ("Paul Bissex", 4.0), ("Stuart Russell", 4.0), ], lambda a: (a.name, a.book__rating__avg), ) authors = Author.objects.annotate(num_books=Count("book")).order_by("name") self.assertQuerysetEqual( authors, [ ("Adrian Holovaty", 1), ("Brad Dayley", 1), ("Jacob Kaplan-Moss", 1), ("James Bennett", 1), ("Jeffrey Forcier", 1), ("Paul Bissex", 1), ("Peter Norvig", 2), ("Stuart Russell", 1), ("Wesley J. Chun", 1), ], lambda a: (a.name, a.num_books), ) def test_reverse_fkey_annotate(self): books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name") self.assertQuerysetEqual( books, [ ("Artificial Intelligence: A Modern Approach", 7), ( "Paradigms of Artificial Intelligence Programming: Case Studies in " "Common Lisp", 9, ), ("Practical Django Projects", 3), ("Python Web Development with Django", 7), ("Sams Teach Yourself Django in 24 Hours", 1), ("The Definitive Guide to Django: Web Development Done Right", 3), ], lambda b: (b.name, b.publisher__num_awards__sum), ) publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name") self.assertQuerysetEqual( publishers, [ ("Apress", Decimal("59.69")), ("Jonno's House of Books", None), ("Morgan Kaufmann", Decimal("75.00")), ("Prentice Hall", Decimal("112.49")), ("Sams", Decimal("23.09")), ], lambda p: (p.name, p.book__price__sum), ) def test_annotate_values(self): books = list( Book.objects.filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values() ) self.assertEqual( books, [ { "contact_id": self.a1.id, "id": self.b1.id, "isbn": "159059725", "mean_age": 34.5, "name": ( "The Definitive Guide to Django: Web Development Done Right" ), "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": self.p1.id, "rating": 4.5, } ], ) books = ( Book.objects.filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values("pk", "isbn", "mean_age") ) self.assertEqual( list(books), [ { "pk": self.b1.pk, "isbn": "159059725", "mean_age": 34.5, } ], ) books = ( Book.objects.filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values("name") ) self.assertEqual( list(books), [{"name": "The Definitive Guide to Django: Web Development Done Right"}], ) books = ( Book.objects.filter(pk=self.b1.pk) .values() .annotate(mean_age=Avg("authors__age")) ) self.assertEqual( list(books), [ { "contact_id": self.a1.id, "id": self.b1.id, "isbn": "159059725", "mean_age": 34.5, "name": ( "The Definitive Guide to Django: Web Development Done Right" ), "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": self.p1.id, "rating": 4.5, } ], ) books = ( Book.objects.values("rating") .annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")) .order_by("rating") ) self.assertEqual( list(books), [ { "rating": 3.0, "n_authors": 1, "mean_age": 45.0, }, { "rating": 4.0, "n_authors": 6, "mean_age": Approximate(37.16, places=1), }, { "rating": 4.5, "n_authors": 2, "mean_age": 34.5, }, { "rating": 5.0, "n_authors": 1, "mean_age": 57.0, }, ], ) authors = Author.objects.annotate(Avg("friends__age")).order_by("name") self.assertQuerysetEqual( authors, [ ("Adrian Holovaty", 32.0), ("Brad Dayley", None), ("Jacob Kaplan-Moss", 29.5), ("James Bennett", 34.0), ("Jeffrey Forcier", 27.0), ("Paul Bissex", 31.0), ("Peter Norvig", 46.0), ("Stuart Russell", 57.0), ("Wesley J. Chun", Approximate(33.66, places=1)), ], lambda a: (a.name, a.friends__age__avg), ) def test_count(self): vals = Book.objects.aggregate(Count("rating")) self.assertEqual(vals, {"rating__count": 6}) def test_count_star(self): with self.assertNumQueries(1) as ctx: Book.objects.aggregate(n=Count("*")) sql = ctx.captured_queries[0]["sql"] self.assertIn("SELECT COUNT(*) ", sql) def test_count_distinct_expression(self): aggs = Book.objects.aggregate( distinct_ratings=Count( Case(When(pages__gt=300, then="rating")), distinct=True ), ) self.assertEqual(aggs["distinct_ratings"], 4) def test_distinct_on_aggregate(self): for aggregate, expected_result in ( (Avg, 4.125), (Count, 4), (Sum, 16.5), ): with self.subTest(aggregate=aggregate.__name__): books = Book.objects.aggregate( ratings=aggregate("rating", distinct=True) ) self.assertEqual(books["ratings"], expected_result) def test_non_grouped_annotation_not_in_group_by(self): """ An annotation not included in values() before an aggregate should be excluded from the group by clause. """ qs = ( Book.objects.annotate(xprice=F("price")) .filter(rating=4.0) .values("rating") .annotate(count=Count("publisher_id", distinct=True)) .values("count", "rating") .order_by("count") ) self.assertEqual(list(qs), [{"rating": 4.0, "count": 2}]) def test_grouped_annotation_in_group_by(self): """ An annotation included in values() before an aggregate should be included in the group by clause. """ qs = ( Book.objects.annotate(xprice=F("price")) .filter(rating=4.0) .values("rating", "xprice") .annotate(count=Count("publisher_id", distinct=True)) .values("count", "rating") .order_by("count") ) self.assertEqual( list(qs), [ {"rating": 4.0, "count": 1}, {"rating": 4.0, "count": 2}, ], ) def test_fkey_aggregate(self): explicit = list(Author.objects.annotate(Count("book__id"))) implicit = list(Author.objects.annotate(Count("book"))) self.assertCountEqual(explicit, implicit) def test_annotate_ordering(self): books = ( Book.objects.values("rating") .annotate(oldest=Max("authors__age")) .order_by("oldest", "rating") ) self.assertEqual( list(books), [ {"rating": 4.5, "oldest": 35}, {"rating": 3.0, "oldest": 45}, {"rating": 4.0, "oldest": 57}, {"rating": 5.0, "oldest": 57}, ], ) books = ( Book.objects.values("rating") .annotate(oldest=Max("authors__age")) .order_by("-oldest", "-rating") ) self.assertEqual( list(books), [ {"rating": 5.0, "oldest": 57}, {"rating": 4.0, "oldest": 57}, {"rating": 3.0, "oldest": 45}, {"rating": 4.5, "oldest": 35}, ], ) def test_aggregate_annotation(self): vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate( Avg("num_authors") ) self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)}) def test_avg_duration_field(self): # Explicit `output_field`. self.assertEqual( Publisher.objects.aggregate(Avg("duration", output_field=DurationField())), {"duration__avg": datetime.timedelta(days=1, hours=12)}, ) # Implicit `output_field`. self.assertEqual( Publisher.objects.aggregate(Avg("duration")), {"duration__avg": datetime.timedelta(days=1, hours=12)}, ) def test_sum_duration_field(self): self.assertEqual( Publisher.objects.aggregate(Sum("duration", output_field=DurationField())), {"duration__sum": datetime.timedelta(days=3)}, ) def test_sum_distinct_aggregate(self): """ Sum on a distinct() QuerySet should aggregate only the distinct items. """ authors = Author.objects.filter(book__in=[self.b5, self.b6]) self.assertEqual(authors.count(), 3) distinct_authors = authors.distinct() self.assertEqual(distinct_authors.count(), 2) # Selected author ages are 57 and 46 age_sum = distinct_authors.aggregate(Sum("age")) self.assertEqual(age_sum["age__sum"], 103) def test_filtering(self): p = Publisher.objects.create(name="Expensive Publisher", num_awards=0) Book.objects.create( name="ExpensiveBook1", pages=1, isbn="111", rating=3.5, price=Decimal("1000"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 1), ) Book.objects.create( name="ExpensiveBook2", pages=1, isbn="222", rating=4.0, price=Decimal("1000"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 2), ) Book.objects.create( name="ExpensiveBook3", pages=1, isbn="333", rating=4.5, price=Decimal("35"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 3), ) publishers = ( Publisher.objects.annotate(num_books=Count("book__id")) .filter(num_books__gt=1) .order_by("pk") ) self.assertQuerysetEqual( publishers, ["Apress", "Prentice Hall", "Expensive Publisher"], lambda p: p.name, ) publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by( "pk" ) self.assertQuerysetEqual( publishers, [ "Apress", "Apress", "Sams", "Prentice Hall", "Expensive Publisher", ], lambda p: p.name, ) publishers = ( Publisher.objects.annotate(num_books=Count("book__id")) .filter(num_books__gt=1, book__price__lt=Decimal("40.0")) .order_by("pk") ) self.assertQuerysetEqual( publishers, ["Apress", "Prentice Hall", "Expensive Publisher"], lambda p: p.name, ) publishers = ( Publisher.objects.filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) .order_by("pk") ) self.assertQuerysetEqual(publishers, ["Apress"], lambda p: p.name) publishers = ( Publisher.objects.annotate(num_books=Count("book")) .filter(num_books__range=[1, 3]) .order_by("pk") ) self.assertQuerysetEqual( publishers, [ "Apress", "Sams", "Prentice Hall", "Morgan Kaufmann", "Expensive Publisher", ], lambda p: p.name, ) publishers = ( Publisher.objects.annotate(num_books=Count("book")) .filter(num_books__range=[1, 2]) .order_by("pk") ) self.assertQuerysetEqual( publishers, ["Apress", "Sams", "Prentice Hall", "Morgan Kaufmann"], lambda p: p.name, ) publishers = ( Publisher.objects.annotate(num_books=Count("book")) .filter(num_books__in=[1, 3]) .order_by("pk") ) self.assertQuerysetEqual( publishers, ["Sams", "Morgan Kaufmann", "Expensive Publisher"], lambda p: p.name, ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter( num_books__isnull=True ) self.assertEqual(len(publishers), 0) def test_annotation(self): vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id")) self.assertEqual(vals, {"friends__id__count": 2}) books = ( Book.objects.annotate(num_authors=Count("authors__name")) .filter(num_authors__exact=2) .order_by("pk") ) self.assertQuerysetEqual( books, [ "The Definitive Guide to Django: Web Development Done Right", "Artificial Intelligence: A Modern Approach", ], lambda b: b.name, ) authors = ( Author.objects.annotate(num_friends=Count("friends__id", distinct=True)) .filter(num_friends=0) .order_by("pk") ) self.assertQuerysetEqual(authors, ["Brad Dayley"], lambda a: a.name) publishers = ( Publisher.objects.annotate(num_books=Count("book__id")) .filter(num_books__gt=1) .order_by("pk") ) self.assertQuerysetEqual( publishers, ["Apress", "Prentice Hall"], lambda p: p.name ) publishers = ( Publisher.objects.filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) ) self.assertQuerysetEqual(publishers, ["Apress"], lambda p: p.name) books = Book.objects.annotate(num_authors=Count("authors__id")).filter( authors__name__contains="Norvig", num_authors__gt=1 ) self.assertQuerysetEqual( books, ["Artificial Intelligence: A Modern Approach"], lambda b: b.name ) def test_more_aggregation(self): a = Author.objects.get(name__contains="Norvig") b = Book.objects.get(name__contains="Done Right") b.authors.add(a) b.save() vals = ( Book.objects.annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) .aggregate(Avg("rating")) ) self.assertEqual(vals, {"rating__avg": 4.25}) def test_even_more_aggregate(self): publishers = ( Publisher.objects.annotate( earliest_book=Min("book__pubdate"), ) .exclude(earliest_book=None) .order_by("earliest_book") .values( "earliest_book", "num_awards", "id", "name", ) ) self.assertEqual( list(publishers), [ { "earliest_book": datetime.date(1991, 10, 15), "num_awards": 9, "id": self.p4.id, "name": "Morgan Kaufmann", }, { "earliest_book": datetime.date(1995, 1, 15), "num_awards": 7, "id": self.p3.id, "name": "Prentice Hall", }, { "earliest_book": datetime.date(2007, 12, 6), "num_awards": 3, "id": self.p1.id, "name": "Apress", }, { "earliest_book": datetime.date(2008, 3, 3), "num_awards": 1, "id": self.p2.id, "name": "Sams", }, ], ) vals = Store.objects.aggregate( Max("friday_night_closing"), Min("original_opening") ) self.assertEqual( vals, { "friday_night_closing__max": datetime.time(23, 59, 59), "original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14), }, ) def test_annotate_values_list(self): books = ( Book.objects.filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("pk", "isbn", "mean_age") ) self.assertEqual(list(books), [(self.b1.id, "159059725", 34.5)]) books = ( Book.objects.filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("isbn") ) self.assertEqual(list(books), [("159059725",)]) books = ( Book.objects.filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("mean_age") ) self.assertEqual(list(books), [(34.5,)]) books = ( Book.objects.filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("mean_age", flat=True) ) self.assertEqual(list(books), [34.5]) books = ( Book.objects.values_list("price") .annotate(count=Count("price")) .order_by("-count", "price") ) self.assertEqual( list(books), [ (Decimal("29.69"), 2), (Decimal("23.09"), 1), (Decimal("30"), 1), (Decimal("75"), 1), (Decimal("82.8"), 1), ], ) def test_dates_with_aggregation(self): """ .dates() returns a distinct set of dates when applied to a QuerySet with aggregation. Refs #18056. Previously, .dates() would return distinct (date_kind, aggregation) sets, in this case (year, num_authors), so 2008 would be returned twice because there are books from 2008 with a different number of authors. """ dates = Book.objects.annotate(num_authors=Count("authors")).dates( "pubdate", "year" ) self.assertSequenceEqual( dates, [ datetime.date(1991, 1, 1), datetime.date(1995, 1, 1), datetime.date(2007, 1, 1), datetime.date(2008, 1, 1), ], ) def test_values_aggregation(self): # Refs #20782 max_rating = Book.objects.values("rating").aggregate(max_rating=Max("rating")) self.assertEqual(max_rating["max_rating"], 5) max_books_per_rating = ( Book.objects.values("rating") .annotate(books_per_rating=Count("id")) .aggregate(Max("books_per_rating")) ) self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3}) def test_ticket17424(self): """ Doing exclude() on a foreign model after annotate() doesn't crash. """ all_books = list(Book.objects.values_list("pk", flat=True).order_by("pk")) annotated_books = Book.objects.order_by("pk").annotate(one=Count("id")) # The value doesn't matter, we just need any negative # constraint on a related model that's a noop. excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__") # Try to generate query tree str(excluded_books.query) self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk) # Check internal state self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type) self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type) def test_ticket12886(self): """ Aggregation over sliced queryset works correctly. """ qs = Book.objects.order_by("-rating")[0:3] vals = qs.aggregate(average_top3_rating=Avg("rating"))["average_top3_rating"] self.assertAlmostEqual(vals, 4.5, places=2) def test_ticket11881(self): """ Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or select_related() stuff. """ qs = ( Book.objects.select_for_update() .order_by("pk") .select_related("publisher") .annotate(max_pk=Max("pk")) ) with CaptureQueriesContext(connection) as captured_queries: qs.aggregate(avg_pk=Avg("max_pk")) self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]["sql"].lower() self.assertNotIn("for update", qstr) forced_ordering = connection.ops.force_no_ordering() if forced_ordering: # If the backend needs to force an ordering we make sure it's # the only "ORDER BY" clause present in the query. self.assertEqual( re.findall(r"order by (\w+)", qstr), [", ".join(f[1][0] for f in forced_ordering).lower()], ) else: self.assertNotIn("order by", qstr) self.assertEqual(qstr.count(" join "), 0) def test_decimal_max_digits_has_no_effect(self): Book.objects.all().delete() a1 = Author.objects.first() p1 = Publisher.objects.first() thedate = timezone.now() for i in range(10): Book.objects.create( isbn="abcde{}".format(i), name="none", pages=10, rating=4.0, price=9999.98, contact=a1, publisher=p1, pubdate=thedate, ) book = Book.objects.aggregate(price_sum=Sum("price")) self.assertEqual(book["price_sum"], Decimal("99999.80")) def test_nonaggregate_aggregation_throws(self): with self.assertRaisesMessage(TypeError, "fail is not an aggregate expression"): Book.objects.aggregate(fail=F("price")) def test_nonfield_annotation(self): book = Book.objects.annotate(val=Max(Value(2))).first() self.assertEqual(book.val, 2) book = Book.objects.annotate( val=Max(Value(2), output_field=IntegerField()) ).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first() self.assertEqual(book.val, 2) def test_annotation_expressions(self): authors = Author.objects.annotate( combined_ages=Sum(F("age") + F("friends__age")) ).order_by("name") authors2 = Author.objects.annotate( combined_ages=Sum("age") + Sum("friends__age") ).order_by("name") for qs in (authors, authors2): self.assertQuerysetEqual( qs, [ ("Adrian Holovaty", 132), ("Brad Dayley", None), ("Jacob Kaplan-Moss", 129), ("James Bennett", 63), ("Jeffrey Forcier", 128), ("Paul Bissex", 120), ("Peter Norvig", 103), ("Stuart Russell", 103), ("Wesley J. Chun", 176), ], lambda a: (a.name, a.combined_ages), ) def test_aggregation_expressions(self): a1 = Author.objects.aggregate(av_age=Sum("age") / Count("*")) a2 = Author.objects.aggregate(av_age=Sum("age") / Count("age")) a3 = Author.objects.aggregate(av_age=Avg("age")) self.assertEqual(a1, {"av_age": 37}) self.assertEqual(a2, {"av_age": 37}) self.assertEqual(a3, {"av_age": Approximate(37.4, places=1)}) def test_avg_decimal_field(self): v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price")))[ "avg_price" ] self.assertIsInstance(v, Decimal) self.assertEqual(v, Approximate(Decimal("47.39"), places=2)) def test_order_of_precedence(self): p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price") + 2) * 3) self.assertEqual(p1, {"avg_price": Approximate(Decimal("148.18"), places=2)}) p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg("price") + 2 * 3) self.assertEqual(p2, {"avg_price": Approximate(Decimal("53.39"), places=2)}) def test_combine_different_types(self): msg = ( "Cannot infer type of '+' expression involving these types: FloatField, " "DecimalField. You must set output_field." ) qs = Book.objects.annotate(sums=Sum("rating") + Sum("pages") + Sum("price")) with self.assertRaisesMessage(FieldError, msg): qs.first() with self.assertRaisesMessage(FieldError, msg): qs.first() b1 = Book.objects.annotate( sums=Sum(F("rating") + F("pages") + F("price"), output_field=IntegerField()) ).get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) b2 = Book.objects.annotate( sums=Sum(F("rating") + F("pages") + F("price"), output_field=FloatField()) ).get(pk=self.b4.pk) self.assertEqual(b2.sums, 383.69) b3 = Book.objects.annotate( sums=Sum(F("rating") + F("pages") + F("price"), output_field=DecimalField()) ).get(pk=self.b4.pk) self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2)) def test_complex_aggregations_require_kwarg(self): with self.assertRaisesMessage( TypeError, "Complex annotations require an alias" ): Author.objects.annotate(Sum(F("age") + F("friends__age"))) with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"): Author.objects.aggregate(Sum("age") / Count("age")) with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"): Author.objects.aggregate(Sum(1)) def test_aggregate_over_complex_annotation(self): qs = Author.objects.annotate(combined_ages=Sum(F("age") + F("friends__age"))) age = qs.aggregate(max_combined_age=Max("combined_ages")) self.assertEqual(age["max_combined_age"], 176) age = qs.aggregate(max_combined_age_doubled=Max("combined_ages") * 2) self.assertEqual(age["max_combined_age_doubled"], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max("combined_ages") + Max("combined_ages") ) self.assertEqual(age["max_combined_age_doubled"], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"), sum_combined_age=Sum("combined_ages"), ) self.assertEqual(age["max_combined_age_doubled"], 176 * 2) self.assertEqual(age["sum_combined_age"], 954) age = qs.aggregate( max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"), sum_combined_age_doubled=Sum("combined_ages") + Sum("combined_ages"), ) self.assertEqual(age["max_combined_age_doubled"], 176 * 2) self.assertEqual(age["sum_combined_age_doubled"], 954 * 2) def test_values_annotation_with_expression(self): # ensure the F() is promoted to the group by clause qs = Author.objects.values("name").annotate(another_age=Sum("age") + F("age")) a = qs.get(name="Adrian Holovaty") self.assertEqual(a["another_age"], 68) qs = qs.annotate(friend_count=Count("friends")) a = qs.get(name="Adrian Holovaty") self.assertEqual(a["friend_count"], 2) qs = ( qs.annotate(combined_age=Sum("age") + F("friends__age")) .filter(name="Adrian Holovaty") .order_by("-combined_age") ) self.assertEqual( list(qs), [ { "name": "Adrian Holovaty", "another_age": 68, "friend_count": 1, "combined_age": 69, }, { "name": "Adrian Holovaty", "another_age": 68, "friend_count": 1, "combined_age": 63, }, ], ) vals = qs.values("name", "combined_age") self.assertEqual( list(vals), [ {"name": "Adrian Holovaty", "combined_age": 69}, {"name": "Adrian Holovaty", "combined_age": 63}, ], ) def test_annotate_values_aggregate(self): alias_age = ( Author.objects.annotate(age_alias=F("age")) .values( "age_alias", ) .aggregate(sum_age=Sum("age_alias")) ) age = Author.objects.values("age").aggregate(sum_age=Sum("age")) self.assertEqual(alias_age["sum_age"], age["sum_age"]) def test_annotate_over_annotate(self): author = ( Author.objects.annotate(age_alias=F("age")) .annotate(sum_age=Sum("age_alias")) .get(name="Adrian Holovaty") ) other_author = Author.objects.annotate(sum_age=Sum("age")).get( name="Adrian Holovaty" ) self.assertEqual(author.sum_age, other_author.sum_age) def test_aggregate_over_aggregate(self): msg = "Cannot compute Avg('age'): 'age' is an aggregate" with self.assertRaisesMessage(FieldError, msg): Author.objects.annotate(age_alias=F("age"),).aggregate( age=Sum(F("age")), avg_age=Avg(F("age")), ) def test_annotated_aggregate_over_annotated_aggregate(self): with self.assertRaisesMessage( FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate" ): Book.objects.annotate(Max("id")).annotate(Sum("id__max")) class MyMax(Max): def as_sql(self, compiler, connection): self.set_source_expressions(self.get_source_expressions()[0:1]) return super().as_sql(compiler, connection) with self.assertRaisesMessage( FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate" ): Book.objects.annotate(Max("id")).annotate(my_max=MyMax("id__max", "price")) def test_multi_arg_aggregate(self): class MyMax(Max): output_field = DecimalField() def as_sql(self, compiler, connection): copy = self.copy() copy.set_source_expressions(copy.get_source_expressions()[0:1]) return super(MyMax, copy).as_sql(compiler, connection) with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"): Book.objects.aggregate(MyMax("pages", "price")) with self.assertRaisesMessage( TypeError, "Complex annotations require an alias" ): Book.objects.annotate(MyMax("pages", "price")) Book.objects.aggregate(max_field=MyMax("pages", "price")) def test_add_implementation(self): class MySum(Sum): pass # test completely changing how the output is rendered def lower_case_function_override(self, compiler, connection): sql, params = compiler.compile(self.source_expressions[0]) substitutions = { "function": self.function.lower(), "expressions": sql, "distinct": "", } substitutions.update(self.extra) return self.template % substitutions, params setattr(MySum, "as_" + connection.vendor, lower_case_function_override) qs = Book.objects.annotate( sums=MySum( F("rating") + F("pages") + F("price"), output_field=IntegerField() ) ) self.assertEqual(str(qs.query).count("sum("), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test changing the dict and delegating def lower_case_function_super(self, compiler, connection): self.extra["function"] = self.function.lower() return super(MySum, self).as_sql(compiler, connection) setattr(MySum, "as_" + connection.vendor, lower_case_function_super) qs = Book.objects.annotate( sums=MySum( F("rating") + F("pages") + F("price"), output_field=IntegerField() ) ) self.assertEqual(str(qs.query).count("sum("), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test overriding all parts of the template def be_evil(self, compiler, connection): substitutions = {"function": "MAX", "expressions": "2", "distinct": ""} substitutions.update(self.extra) return self.template % substitutions, () setattr(MySum, "as_" + connection.vendor, be_evil) qs = Book.objects.annotate( sums=MySum( F("rating") + F("pages") + F("price"), output_field=IntegerField() ) ) self.assertEqual(str(qs.query).count("MAX("), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 2) def test_complex_values_aggregation(self): max_rating = Book.objects.values("rating").aggregate( double_max_rating=Max("rating") + Max("rating") ) self.assertEqual(max_rating["double_max_rating"], 5 * 2) max_books_per_rating = ( Book.objects.values("rating") .annotate(books_per_rating=Count("id") + 5) .aggregate(Max("books_per_rating")) ) self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3 + 5}) def test_expression_on_aggregation(self): qs = ( Publisher.objects.annotate( price_or_median=Greatest( Avg("book__rating", output_field=DecimalField()), Avg("book__price") ) ) .filter(price_or_median__gte=F("num_awards")) .order_by("num_awards") ) self.assertQuerysetEqual(qs, [1, 3, 7, 9], lambda v: v.num_awards) qs2 = ( Publisher.objects.annotate( rating_or_num_awards=Greatest( Avg("book__rating"), F("num_awards"), output_field=FloatField() ) ) .filter(rating_or_num_awards__gt=F("num_awards")) .order_by("num_awards") ) self.assertQuerysetEqual(qs2, [1, 3], lambda v: v.num_awards) def test_arguments_must_be_expressions(self): msg = "QuerySet.aggregate() received non-expression(s): %s." with self.assertRaisesMessage(TypeError, msg % FloatField()): Book.objects.aggregate(FloatField()) with self.assertRaisesMessage(TypeError, msg % True): Book.objects.aggregate(is_book=True) with self.assertRaisesMessage( TypeError, msg % ", ".join([str(FloatField()), "True"]) ): Book.objects.aggregate(FloatField(), Avg("price"), is_book=True) def test_aggregation_subquery_annotation(self): """Subquery annotations are excluded from the GROUP BY if they are not explicitly grouped against.""" latest_book_pubdate_qs = ( Book.objects.filter(publisher=OuterRef("pk")) .order_by("-pubdate") .values("pubdate")[:1] ) publisher_qs = Publisher.objects.annotate( latest_book_pubdate=Subquery(latest_book_pubdate_qs), ).annotate(count=Count("book")) with self.assertNumQueries(1) as ctx: list(publisher_qs) self.assertEqual(ctx[0]["sql"].count("SELECT"), 2) # The GROUP BY should not be by alias either. self.assertEqual(ctx[0]["sql"].lower().count("latest_book_pubdate"), 1) def test_aggregation_subquery_annotation_exists(self): latest_book_pubdate_qs = ( Book.objects.filter(publisher=OuterRef("pk")) .order_by("-pubdate") .values("pubdate")[:1] ) publisher_qs = Publisher.objects.annotate( latest_book_pubdate=Subquery(latest_book_pubdate_qs), count=Count("book"), ) self.assertTrue(publisher_qs.exists()) def test_aggregation_filter_exists(self): publishers_having_more_than_one_book_qs = ( Book.objects.values("publisher") .annotate(cnt=Count("isbn")) .filter(cnt__gt=1) ) query = publishers_having_more_than_one_book_qs.query.exists( using=connection.alias ) _, _, group_by = query.get_compiler(connection=connection).pre_sql_setup() self.assertEqual(len(group_by), 1) def test_aggregation_exists_annotation(self): published_books = Book.objects.filter(publisher=OuterRef("pk")) publisher_qs = Publisher.objects.annotate( published_book=Exists(published_books), count=Count("book"), ).values_list("name", flat=True) self.assertCountEqual( list(publisher_qs), [ "Apress", "Morgan Kaufmann", "Jonno's House of Books", "Prentice Hall", "Sams", ], ) def test_aggregation_subquery_annotation_values(self): """ Subquery annotations and external aliases are excluded from the GROUP BY if they are not selected. """ books_qs = ( Book.objects.annotate( first_author_the_same_age=Subquery( Author.objects.filter( age=OuterRef("contact__friends__age"), ) .order_by("age") .values("id")[:1], ) ) .filter( publisher=self.p1, first_author_the_same_age__isnull=False, ) .annotate( min_age=Min("contact__friends__age"), ) .values("name", "min_age") .order_by("name") ) self.assertEqual( list(books_qs), [ {"name": "Practical Django Projects", "min_age": 34}, { "name": ( "The Definitive Guide to Django: Web Development Done Right" ), "min_age": 29, }, ], ) def test_aggregation_subquery_annotation_values_collision(self): books_rating_qs = Book.objects.filter( publisher=OuterRef("pk"), price=Decimal("29.69"), ).values("rating") publisher_qs = ( Publisher.objects.filter( book__contact__age__gt=20, name=self.p1.name, ) .annotate( rating=Subquery(books_rating_qs), contacts_count=Count("book__contact"), ) .values("rating") .annotate(total_count=Count("rating")) ) self.assertEqual( list(publisher_qs), [ {"rating": 4.0, "total_count": 2}, ], ) @skipUnlessDBFeature("supports_subqueries_in_group_by") def test_aggregation_subquery_annotation_multivalued(self): """ Subquery annotations must be included in the GROUP BY if they use potentially multivalued relations (contain the LOOKUP_SEP). """ subquery_qs = Author.objects.filter( pk=OuterRef("pk"), book__name=OuterRef("book__name"), ).values("pk") author_qs = Author.objects.annotate( subquery_id=Subquery(subquery_qs), ).annotate(count=Count("book")) self.assertEqual(author_qs.count(), Author.objects.count()) def test_aggregation_order_by_not_selected_annotation_values(self): result_asc = [ self.b4.pk, self.b3.pk, self.b1.pk, self.b2.pk, self.b5.pk, self.b6.pk, ] result_desc = result_asc[::-1] tests = [ ("min_related_age", result_asc), ("-min_related_age", result_desc), (F("min_related_age"), result_asc), (F("min_related_age").asc(), result_asc), (F("min_related_age").desc(), result_desc), ] for ordering, expected_result in tests: with self.subTest(ordering=ordering): books_qs = ( Book.objects.annotate( min_age=Min("authors__age"), ) .annotate( min_related_age=Coalesce("min_age", "contact__age"), ) .order_by(ordering) .values_list("pk", flat=True) ) self.assertEqual(list(books_qs), expected_result) @skipUnlessDBFeature("supports_subqueries_in_group_by") def test_group_by_subquery_annotation(self): """ Subquery annotations are included in the GROUP BY if they are grouped against. """ long_books_count_qs = ( Book.objects.filter( publisher=OuterRef("pk"), pages__gt=400, ) .values("publisher") .annotate(count=Count("pk")) .values("count") ) groups = [ Subquery(long_books_count_qs), long_books_count_qs, long_books_count_qs.query, ] for group in groups: with self.subTest(group=group.__class__.__name__): long_books_count_breakdown = Publisher.objects.values_list( group, ).annotate(total=Count("*")) self.assertEqual(dict(long_books_count_breakdown), {None: 1, 1: 4}) @skipUnlessDBFeature("supports_subqueries_in_group_by") def test_group_by_exists_annotation(self): """ Exists annotations are included in the GROUP BY if they are grouped against. """ long_books_qs = Book.objects.filter( publisher=OuterRef("pk"), pages__gt=800, ) has_long_books_breakdown = Publisher.objects.values_list( Exists(long_books_qs), ).annotate(total=Count("*")) self.assertEqual(dict(has_long_books_breakdown), {True: 2, False: 3}) @skipUnlessDBFeature("supports_subqueries_in_group_by") def test_aggregation_subquery_annotation_related_field(self): publisher = Publisher.objects.create(name=self.a9.name, num_awards=2) book = Book.objects.create( isbn="159059999", name="Test book.", pages=819, rating=2.5, price=Decimal("14.44"), contact=self.a9, publisher=publisher, pubdate=datetime.date(2019, 12, 6), ) book.authors.add(self.a5, self.a6, self.a7) books_qs = ( Book.objects.annotate( contact_publisher=Subquery( Publisher.objects.filter( pk=OuterRef("publisher"), name=OuterRef("contact__name"), ).values("name")[:1], ) ) .filter( contact_publisher__isnull=False, ) .annotate(count=Count("authors")) ) self.assertSequenceEqual(books_qs, [book]) # FIXME: GROUP BY doesn't need to include a subquery with # non-multivalued JOINs, see Col.possibly_multivalued (refs #31150): # with self.assertNumQueries(1) as ctx: # self.assertSequenceEqual(books_qs, [book]) # self.assertEqual(ctx[0]['sql'].count('SELECT'), 2) @skipUnlessDBFeature("supports_subqueries_in_group_by") def test_aggregation_nested_subquery_outerref(self): publisher_with_same_name = Publisher.objects.filter( id__in=Subquery( Publisher.objects.filter( name=OuterRef(OuterRef("publisher__name")), ).values("id"), ), ).values(publisher_count=Count("id"))[:1] books_breakdown = Book.objects.annotate( publisher_count=Subquery(publisher_with_same_name), authors_count=Count("authors"), ).values_list("publisher_count", flat=True) self.assertSequenceEqual(books_breakdown, [1] * 6) def test_filter_in_subquery_or_aggregation(self): """ Filtering against an aggregate requires the usage of the HAVING clause. If such a filter is unionized to a non-aggregate one the latter will also need to be moved to the HAVING clause and have its grouping columns used in the GROUP BY. When this is done with a subquery the specialized logic in charge of using outer reference columns to group should be used instead of the subquery itself as the latter might return multiple rows. """ authors = Author.objects.annotate( Count("book"), ).filter(Q(book__count__gt=0) | Q(pk__in=Book.objects.values("authors"))) self.assertQuerysetEqual(authors, Author.objects.all(), ordered=False) def test_aggregation_random_ordering(self): """Random() is not included in the GROUP BY when used for ordering.""" authors = Author.objects.annotate(contact_count=Count("book")).order_by("?") self.assertQuerysetEqual( authors, [ ("Adrian Holovaty", 1), ("Jacob Kaplan-Moss", 1), ("Brad Dayley", 1), ("James Bennett", 1), ("Jeffrey Forcier", 1), ("Paul Bissex", 1), ("Wesley J. Chun", 1), ("Stuart Russell", 1), ("Peter Norvig", 2), ], lambda a: (a.name, a.contact_count), ordered=False, ) def test_empty_result_optimization(self): with self.assertNumQueries(0): self.assertEqual( Publisher.objects.none().aggregate( sum_awards=Sum("num_awards"), books_count=Count("book"), ), { "sum_awards": None, "books_count": 0, }, ) # Expression without empty_result_set_value forces queries to be # executed even if they would return an empty result set. raw_books_count = Func("book", function="COUNT") raw_books_count.contains_aggregate = True with self.assertNumQueries(1): self.assertEqual( Publisher.objects.none().aggregate( sum_awards=Sum("num_awards"), books_count=raw_books_count, ), { "sum_awards": None, "books_count": 0, }, ) def test_coalesced_empty_result_set(self): with self.assertNumQueries(0): self.assertEqual( Publisher.objects.none().aggregate( sum_awards=Coalesce(Sum("num_awards"), 0), )["sum_awards"], 0, ) # Multiple expressions. with self.assertNumQueries(0): self.assertEqual( Publisher.objects.none().aggregate( sum_awards=Coalesce(Sum("num_awards"), None, 0), )["sum_awards"], 0, ) # Nested coalesce. with self.assertNumQueries(0): self.assertEqual( Publisher.objects.none().aggregate( sum_awards=Coalesce(Coalesce(Sum("num_awards"), None), 0), )["sum_awards"], 0, ) # Expression coalesce. with self.assertNumQueries(1): self.assertIsInstance( Store.objects.none().aggregate( latest_opening=Coalesce( Max("original_opening"), RawSQL("CURRENT_TIMESTAMP", []), ), )["latest_opening"], datetime.datetime, ) def test_aggregation_default_unsupported_by_count(self): msg = "Count does not allow default." with self.assertRaisesMessage(TypeError, msg): Count("age", default=0) def test_aggregation_default_unset(self): for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]: with self.subTest(Aggregate): result = Author.objects.filter(age__gt=100).aggregate( value=Aggregate("age"), ) self.assertIsNone(result["value"]) def test_aggregation_default_zero(self): for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]: with self.subTest(Aggregate): result = Author.objects.filter(age__gt=100).aggregate( value=Aggregate("age", default=0), ) self.assertEqual(result["value"], 0) def test_aggregation_default_integer(self): for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]: with self.subTest(Aggregate): result = Author.objects.filter(age__gt=100).aggregate( value=Aggregate("age", default=21), ) self.assertEqual(result["value"], 21) def test_aggregation_default_expression(self): for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]: with self.subTest(Aggregate): result = Author.objects.filter(age__gt=100).aggregate( value=Aggregate("age", default=Value(5) * Value(7)), ) self.assertEqual(result["value"], 35) def test_aggregation_default_group_by(self): qs = ( Publisher.objects.values("name") .annotate( books=Count("book"), pages=Sum("book__pages", default=0), ) .filter(books=0) ) self.assertSequenceEqual( qs, [{"name": "Jonno's House of Books", "books": 0, "pages": 0}], ) def test_aggregation_default_compound_expression(self): # Scale rating to a percentage; default to 50% if no books published. formula = Avg("book__rating", default=2.5) * 20.0 queryset = Publisher.objects.annotate(rating=formula).order_by("name") self.assertSequenceEqual( queryset.values("name", "rating"), [ {"name": "Apress", "rating": 85.0}, {"name": "Jonno's House of Books", "rating": 50.0}, {"name": "Morgan Kaufmann", "rating": 100.0}, {"name": "Prentice Hall", "rating": 80.0}, {"name": "Sams", "rating": 60.0}, ], ) def test_aggregation_default_using_time_from_python(self): expr = Min( "store__friday_night_closing", filter=~Q(store__name="Amazon.com"), default=datetime.time(17), ) if connection.vendor == "mysql": # Workaround for #30224 for MySQL & MariaDB. expr.default = Cast(expr.default, TimeField()) queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn") self.assertSequenceEqual( queryset.values("isbn", "oldest_store_opening"), [ {"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)}, { "isbn": "013790395", "oldest_store_opening": datetime.time(23, 59, 59), }, {"isbn": "067232959", "oldest_store_opening": datetime.time(17)}, {"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)}, { "isbn": "159059725", "oldest_store_opening": datetime.time(23, 59, 59), }, {"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)}, ], ) def test_aggregation_default_using_time_from_database(self): now = timezone.now().astimezone(datetime.timezone.utc) expr = Min( "store__friday_night_closing", filter=~Q(store__name="Amazon.com"), default=TruncHour(NowUTC(), output_field=TimeField()), ) queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn") self.assertSequenceEqual( queryset.values("isbn", "oldest_store_opening"), [ {"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)}, { "isbn": "013790395", "oldest_store_opening": datetime.time(23, 59, 59), }, {"isbn": "067232959", "oldest_store_opening": datetime.time(now.hour)}, {"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)}, { "isbn": "159059725", "oldest_store_opening": datetime.time(23, 59, 59), }, {"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)}, ], ) def test_aggregation_default_using_date_from_python(self): expr = Min("book__pubdate", default=datetime.date(1970, 1, 1)) if connection.vendor == "mysql": # Workaround for #30224 for MySQL & MariaDB. expr.default = Cast(expr.default, DateField()) queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name") self.assertSequenceEqual( queryset.values("name", "earliest_pubdate"), [ {"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)}, { "name": "Jonno's House of Books", "earliest_pubdate": datetime.date(1970, 1, 1), }, { "name": "Morgan Kaufmann", "earliest_pubdate": datetime.date(1991, 10, 15), }, { "name": "Prentice Hall", "earliest_pubdate": datetime.date(1995, 1, 15), }, {"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)}, ], ) def test_aggregation_default_using_date_from_database(self): now = timezone.now().astimezone(datetime.timezone.utc) expr = Min("book__pubdate", default=TruncDate(NowUTC())) queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name") self.assertSequenceEqual( queryset.values("name", "earliest_pubdate"), [ {"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)}, {"name": "Jonno's House of Books", "earliest_pubdate": now.date()}, { "name": "Morgan Kaufmann", "earliest_pubdate": datetime.date(1991, 10, 15), }, { "name": "Prentice Hall", "earliest_pubdate": datetime.date(1995, 1, 15), }, {"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)}, ], ) def test_aggregation_default_using_datetime_from_python(self): expr = Min( "store__original_opening", filter=~Q(store__name="Amazon.com"), default=datetime.datetime(1970, 1, 1), ) if connection.vendor == "mysql": # Workaround for #30224 for MySQL & MariaDB. expr.default = Cast(expr.default, DateTimeField()) queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn") self.assertSequenceEqual( queryset.values("isbn", "oldest_store_opening"), [ { "isbn": "013235613", "oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14), }, { "isbn": "013790395", "oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37), }, { "isbn": "067232959", "oldest_store_opening": datetime.datetime(1970, 1, 1), }, { "isbn": "155860191", "oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14), }, { "isbn": "159059725", "oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37), }, { "isbn": "159059996", "oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14), }, ], ) def test_aggregation_default_using_datetime_from_database(self): now = timezone.now().astimezone(datetime.timezone.utc) expr = Min( "store__original_opening", filter=~Q(store__name="Amazon.com"), default=TruncHour(NowUTC(), output_field=DateTimeField()), ) queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn") self.assertSequenceEqual( queryset.values("isbn", "oldest_store_opening"), [ { "isbn": "013235613", "oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14), }, { "isbn": "013790395", "oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37), }, { "isbn": "067232959", "oldest_store_opening": now.replace( minute=0, second=0, microsecond=0, tzinfo=None ), }, { "isbn": "155860191", "oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14), }, { "isbn": "159059725", "oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37), }, { "isbn": "159059996", "oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14), }, ], ) def test_aggregation_default_using_duration_from_python(self): result = Publisher.objects.filter(num_awards__gt=3).aggregate( value=Sum("duration", default=datetime.timedelta(0)), ) self.assertEqual(result["value"], datetime.timedelta(0)) def test_aggregation_default_using_duration_from_database(self): result = Publisher.objects.filter(num_awards__gt=3).aggregate( value=Sum("duration", default=Now() - Now()), ) self.assertEqual(result["value"], datetime.timedelta(0)) def test_aggregation_default_using_decimal_from_python(self): result = Book.objects.filter(rating__lt=3.0).aggregate( value=Sum("price", default=Decimal("0.00")), ) self.assertEqual(result["value"], Decimal("0.00")) def test_aggregation_default_using_decimal_from_database(self): result = Book.objects.filter(rating__lt=3.0).aggregate( value=Sum("price", default=Pi()), ) self.assertAlmostEqual(result["value"], Decimal.from_float(math.pi), places=6) def test_aggregation_default_passed_another_aggregate(self): result = Book.objects.aggregate( value=Sum("price", filter=Q(rating__lt=3.0), default=Avg("pages") / 10.0), ) self.assertAlmostEqual(result["value"], Decimal("61.72"), places=2) def test_aggregation_default_after_annotation(self): result = Publisher.objects.annotate( double_num_awards=F("num_awards") * 2, ).aggregate(value=Sum("double_num_awards", default=0)) self.assertEqual(result["value"], 40) def test_aggregation_default_not_in_aggregate(self): result = Publisher.objects.annotate( avg_rating=Avg("book__rating", default=2.5), ).aggregate(Sum("num_awards")) self.assertEqual(result["num_awards__sum"], 20) def test_exists_none_with_aggregate(self): qs = Book.objects.annotate( count=Count("id"), exists=Exists(Author.objects.none()), ) self.assertEqual(len(qs), 6) def test_alias_sql_injection(self): crafted_alias = """injected_name" from "aggregation_author"; --""" msg = ( "Column aliases cannot contain whitespace characters, quotation marks, " "semicolons, or SQL comments." ) with self.assertRaisesMessage(ValueError, msg): Author.objects.aggregate(**{crafted_alias: Avg("age")}) def test_exists_extra_where_with_aggregate(self): qs = Book.objects.annotate( count=Count("id"), exists=Exists(Author.objects.extra(where=["1=0"])), ) self.assertEqual(len(qs), 6)
a20219b912e8f09a2c401281b94e36c83eaf7e6d6c53bc5cc956d7cd6e48d514
import datetime from unittest import mock from django.contrib.postgres.indexes import OpClass from django.core.exceptions import ValidationError from django.db import IntegrityError, NotSupportedError, connection, transaction from django.db.models import ( CheckConstraint, Deferrable, F, Func, IntegerField, Q, UniqueConstraint, ) from django.db.models.fields.json import KeyTextTransform from django.db.models.functions import Cast, Left, Lower from django.test import ignore_warnings, modify_settings, skipUnlessDBFeature from django.utils import timezone from django.utils.deprecation import RemovedInDjango50Warning from . import PostgreSQLTestCase from .models import HotelReservation, IntegerArrayModel, RangesModel, Room, Scene try: from psycopg2.extras import DateRange, NumericRange from django.contrib.postgres.constraints import ExclusionConstraint from django.contrib.postgres.fields import ( DateTimeRangeField, RangeBoundary, RangeOperators, ) except ImportError: pass @modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"}) class SchemaTests(PostgreSQLTestCase): get_opclass_query = """ SELECT opcname, c.relname FROM pg_opclass AS oc JOIN pg_index as i on oc.oid = ANY(i.indclass) JOIN pg_class as c on c.oid = i.indexrelid WHERE c.relname = %s """ def get_constraints(self, table): """Get the constraints on the table using a new cursor.""" with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def test_check_constraint_range_value(self): constraint_name = "ints_between" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = CheckConstraint( check=Q(ints__contained_by=NumericRange(10, 30)), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(20, 50)) RangesModel.objects.create(ints=(10, 30)) def test_check_constraint_daterange_contains(self): constraint_name = "dates_contains" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = CheckConstraint( check=Q(dates__contains=F("dates_inner")), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) date_1 = datetime.date(2016, 1, 1) date_2 = datetime.date(2016, 1, 4) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create( dates=(date_1, date_2), dates_inner=(date_1, date_2.replace(day=5)), ) RangesModel.objects.create( dates=(date_1, date_2), dates_inner=(date_1, date_2), ) def test_check_constraint_datetimerange_contains(self): constraint_name = "timestamps_contains" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = CheckConstraint( check=Q(timestamps__contains=F("timestamps_inner")), name=constraint_name, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) datetime_1 = datetime.datetime(2016, 1, 1) datetime_2 = datetime.datetime(2016, 1, 2, 12) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create( timestamps=(datetime_1, datetime_2), timestamps_inner=(datetime_1, datetime_2.replace(hour=13)), ) RangesModel.objects.create( timestamps=(datetime_1, datetime_2), timestamps_inner=(datetime_1, datetime_2), ) def test_opclass(self): constraint = UniqueConstraint( name="test_opclass", fields=["scene"], opclasses=["varchar_pattern_ops"], ) with connection.schema_editor() as editor: editor.add_constraint(Scene, constraint) self.assertIn(constraint.name, self.get_constraints(Scene._meta.db_table)) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query, [constraint.name]) self.assertEqual( cursor.fetchall(), [("varchar_pattern_ops", constraint.name)], ) # Drop the constraint. with connection.schema_editor() as editor: editor.remove_constraint(Scene, constraint) self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table)) def test_opclass_multiple_columns(self): constraint = UniqueConstraint( name="test_opclass_multiple", fields=["scene", "setting"], opclasses=["varchar_pattern_ops", "text_pattern_ops"], ) with connection.schema_editor() as editor: editor.add_constraint(Scene, constraint) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query, [constraint.name]) expected_opclasses = ( ("varchar_pattern_ops", constraint.name), ("text_pattern_ops", constraint.name), ) self.assertCountEqual(cursor.fetchall(), expected_opclasses) def test_opclass_partial(self): constraint = UniqueConstraint( name="test_opclass_partial", fields=["scene"], opclasses=["varchar_pattern_ops"], condition=Q(setting__contains="Sir Bedemir's Castle"), ) with connection.schema_editor() as editor: editor.add_constraint(Scene, constraint) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query, [constraint.name]) self.assertCountEqual( cursor.fetchall(), [("varchar_pattern_ops", constraint.name)], ) @skipUnlessDBFeature("supports_covering_indexes") def test_opclass_include(self): constraint = UniqueConstraint( name="test_opclass_include", fields=["scene"], opclasses=["varchar_pattern_ops"], include=["setting"], ) with connection.schema_editor() as editor: editor.add_constraint(Scene, constraint) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query, [constraint.name]) self.assertCountEqual( cursor.fetchall(), [("varchar_pattern_ops", constraint.name)], ) @skipUnlessDBFeature("supports_expression_indexes") def test_opclass_func(self): constraint = UniqueConstraint( OpClass(Lower("scene"), name="text_pattern_ops"), name="test_opclass_func", ) with connection.schema_editor() as editor: editor.add_constraint(Scene, constraint) constraints = self.get_constraints(Scene._meta.db_table) self.assertIs(constraints[constraint.name]["unique"], True) self.assertIn(constraint.name, constraints) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query, [constraint.name]) self.assertEqual( cursor.fetchall(), [("text_pattern_ops", constraint.name)], ) Scene.objects.create(scene="Scene 10", setting="The dark forest of Ewing") with self.assertRaises(IntegrityError), transaction.atomic(): Scene.objects.create(scene="ScEnE 10", setting="Sir Bedemir's Castle") Scene.objects.create(scene="Scene 5", setting="Sir Bedemir's Castle") # Drop the constraint. with connection.schema_editor() as editor: editor.remove_constraint(Scene, constraint) self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table)) Scene.objects.create(scene="ScEnE 10", setting="Sir Bedemir's Castle") @modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"}) class ExclusionConstraintTests(PostgreSQLTestCase): def get_constraints(self, table): """Get the constraints on the table using a new cursor.""" with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def test_invalid_condition(self): msg = "ExclusionConstraint.condition must be a Q instance." with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type="GIST", name="exclude_invalid_condition", expressions=[(F("datespan"), RangeOperators.OVERLAPS)], condition=F("invalid"), ) def test_invalid_index_type(self): msg = "Exclusion constraints only support GiST or SP-GiST indexes." with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type="gin", name="exclude_invalid_index_type", expressions=[(F("datespan"), RangeOperators.OVERLAPS)], ) def test_invalid_expressions(self): msg = "The expressions must be a list of 2-tuples." for expressions in (["foo"], [("foo")], [("foo_1", "foo_2", "foo_3")]): with self.subTest(expressions), self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( index_type="GIST", name="exclude_invalid_expressions", expressions=expressions, ) def test_empty_expressions(self): msg = "At least one expression is required to define an exclusion constraint." for empty_expressions in (None, []): with self.subTest(empty_expressions), self.assertRaisesMessage( ValueError, msg ): ExclusionConstraint( index_type="GIST", name="exclude_empty_expressions", expressions=empty_expressions, ) def test_invalid_deferrable(self): msg = "ExclusionConstraint.deferrable must be a Deferrable instance." with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( name="exclude_invalid_deferrable", expressions=[(F("datespan"), RangeOperators.OVERLAPS)], deferrable="invalid", ) def test_deferrable_with_condition(self): msg = "ExclusionConstraint with conditions cannot be deferred." with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( name="exclude_invalid_condition", expressions=[(F("datespan"), RangeOperators.OVERLAPS)], condition=Q(cancelled=False), deferrable=Deferrable.DEFERRED, ) def test_invalid_include_type(self): msg = "ExclusionConstraint.include must be a list or tuple." with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( name="exclude_invalid_include", expressions=[(F("datespan"), RangeOperators.OVERLAPS)], include="invalid", ) @ignore_warnings(category=RemovedInDjango50Warning) def test_invalid_opclasses_type(self): msg = "ExclusionConstraint.opclasses must be a list or tuple." with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( name="exclude_invalid_opclasses", expressions=[(F("datespan"), RangeOperators.OVERLAPS)], opclasses="invalid", ) @ignore_warnings(category=RemovedInDjango50Warning) def test_opclasses_and_expressions_same_length(self): msg = ( "ExclusionConstraint.expressions and " "ExclusionConstraint.opclasses must have the same number of " "elements." ) with self.assertRaisesMessage(ValueError, msg): ExclusionConstraint( name="exclude_invalid_expressions_opclasses_length", expressions=[(F("datespan"), RangeOperators.OVERLAPS)], opclasses=["foo", "bar"], ) def test_repr(self): constraint = ExclusionConstraint( name="exclude_overlapping", expressions=[ (F("datespan"), RangeOperators.OVERLAPS), (F("room"), RangeOperators.EQUAL), ], ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type='GIST' expressions=[" "(F(datespan), '&&'), (F(room), '=')] name='exclude_overlapping'>", ) constraint = ExclusionConstraint( name="exclude_overlapping", expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)], condition=Q(cancelled=False), index_type="SPGiST", ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type='SPGiST' expressions=[" "(F(datespan), '-|-')] name='exclude_overlapping' " "condition=(AND: ('cancelled', False))>", ) constraint = ExclusionConstraint( name="exclude_overlapping", expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)], deferrable=Deferrable.IMMEDIATE, ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type='GIST' expressions=[" "(F(datespan), '-|-')] name='exclude_overlapping' " "deferrable=Deferrable.IMMEDIATE>", ) constraint = ExclusionConstraint( name="exclude_overlapping", expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)], include=["cancelled", "room"], ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type='GIST' expressions=[" "(F(datespan), '-|-')] name='exclude_overlapping' " "include=('cancelled', 'room')>", ) constraint = ExclusionConstraint( name="exclude_overlapping", expressions=[ (OpClass("datespan", name="range_ops"), RangeOperators.ADJACENT_TO), ], ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type='GIST' expressions=[" "(OpClass(F(datespan), name=range_ops), '-|-')] " "name='exclude_overlapping'>", ) def test_eq(self): constraint_1 = ExclusionConstraint( name="exclude_overlapping", expressions=[ (F("datespan"), RangeOperators.OVERLAPS), (F("room"), RangeOperators.EQUAL), ], condition=Q(cancelled=False), ) constraint_2 = ExclusionConstraint( name="exclude_overlapping", expressions=[ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], ) constraint_3 = ExclusionConstraint( name="exclude_overlapping", expressions=[("datespan", RangeOperators.OVERLAPS)], condition=Q(cancelled=False), ) constraint_4 = ExclusionConstraint( name="exclude_overlapping", expressions=[ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], deferrable=Deferrable.DEFERRED, ) constraint_5 = ExclusionConstraint( name="exclude_overlapping", expressions=[ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], deferrable=Deferrable.IMMEDIATE, ) constraint_6 = ExclusionConstraint( name="exclude_overlapping", expressions=[ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], deferrable=Deferrable.IMMEDIATE, include=["cancelled"], ) constraint_7 = ExclusionConstraint( name="exclude_overlapping", expressions=[ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], include=["cancelled"], ) with ignore_warnings(category=RemovedInDjango50Warning): constraint_8 = ExclusionConstraint( name="exclude_overlapping", expressions=[ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], include=["cancelled"], opclasses=["range_ops", "range_ops"], ) constraint_9 = ExclusionConstraint( name="exclude_overlapping", expressions=[ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], opclasses=["range_ops", "range_ops"], ) self.assertNotEqual(constraint_2, constraint_9) self.assertNotEqual(constraint_7, constraint_8) constraint_10 = ExclusionConstraint( name="exclude_overlapping", expressions=[ (F("datespan"), RangeOperators.OVERLAPS), (F("room"), RangeOperators.EQUAL), ], condition=Q(cancelled=False), violation_error_message="custom error", ) constraint_11 = ExclusionConstraint( name="exclude_overlapping", expressions=[ (F("datespan"), RangeOperators.OVERLAPS), (F("room"), RangeOperators.EQUAL), ], condition=Q(cancelled=False), violation_error_message="other custom error", ) self.assertEqual(constraint_1, constraint_1) self.assertEqual(constraint_1, mock.ANY) self.assertNotEqual(constraint_1, constraint_2) self.assertNotEqual(constraint_1, constraint_3) self.assertNotEqual(constraint_1, constraint_4) self.assertNotEqual(constraint_1, constraint_10) self.assertNotEqual(constraint_2, constraint_3) self.assertNotEqual(constraint_2, constraint_4) self.assertNotEqual(constraint_2, constraint_7) self.assertNotEqual(constraint_4, constraint_5) self.assertNotEqual(constraint_5, constraint_6) self.assertNotEqual(constraint_1, object()) self.assertNotEqual(constraint_10, constraint_11) self.assertEqual(constraint_10, constraint_10) def test_deconstruct(self): constraint = ExclusionConstraint( name="exclude_overlapping", expressions=[ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], ) path, args, kwargs = constraint.deconstruct() self.assertEqual( path, "django.contrib.postgres.constraints.ExclusionConstraint" ) self.assertEqual(args, ()) self.assertEqual( kwargs, { "name": "exclude_overlapping", "expressions": [ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], }, ) def test_deconstruct_index_type(self): constraint = ExclusionConstraint( name="exclude_overlapping", index_type="SPGIST", expressions=[ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], ) path, args, kwargs = constraint.deconstruct() self.assertEqual( path, "django.contrib.postgres.constraints.ExclusionConstraint" ) self.assertEqual(args, ()) self.assertEqual( kwargs, { "name": "exclude_overlapping", "index_type": "SPGIST", "expressions": [ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], }, ) def test_deconstruct_condition(self): constraint = ExclusionConstraint( name="exclude_overlapping", expressions=[ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], condition=Q(cancelled=False), ) path, args, kwargs = constraint.deconstruct() self.assertEqual( path, "django.contrib.postgres.constraints.ExclusionConstraint" ) self.assertEqual(args, ()) self.assertEqual( kwargs, { "name": "exclude_overlapping", "expressions": [ ("datespan", RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], "condition": Q(cancelled=False), }, ) def test_deconstruct_deferrable(self): constraint = ExclusionConstraint( name="exclude_overlapping", expressions=[("datespan", RangeOperators.OVERLAPS)], deferrable=Deferrable.DEFERRED, ) path, args, kwargs = constraint.deconstruct() self.assertEqual( path, "django.contrib.postgres.constraints.ExclusionConstraint" ) self.assertEqual(args, ()) self.assertEqual( kwargs, { "name": "exclude_overlapping", "expressions": [("datespan", RangeOperators.OVERLAPS)], "deferrable": Deferrable.DEFERRED, }, ) def test_deconstruct_include(self): constraint = ExclusionConstraint( name="exclude_overlapping", expressions=[("datespan", RangeOperators.OVERLAPS)], include=["cancelled", "room"], ) path, args, kwargs = constraint.deconstruct() self.assertEqual( path, "django.contrib.postgres.constraints.ExclusionConstraint" ) self.assertEqual(args, ()) self.assertEqual( kwargs, { "name": "exclude_overlapping", "expressions": [("datespan", RangeOperators.OVERLAPS)], "include": ("cancelled", "room"), }, ) @ignore_warnings(category=RemovedInDjango50Warning) def test_deconstruct_opclasses(self): constraint = ExclusionConstraint( name="exclude_overlapping", expressions=[("datespan", RangeOperators.OVERLAPS)], opclasses=["range_ops"], ) path, args, kwargs = constraint.deconstruct() self.assertEqual( path, "django.contrib.postgres.constraints.ExclusionConstraint" ) self.assertEqual(args, ()) self.assertEqual( kwargs, { "name": "exclude_overlapping", "expressions": [("datespan", RangeOperators.OVERLAPS)], "opclasses": ["range_ops"], }, ) def _test_range_overlaps(self, constraint): # Create exclusion constraint. self.assertNotIn( constraint.name, self.get_constraints(HotelReservation._meta.db_table) ) with connection.schema_editor() as editor: editor.add_constraint(HotelReservation, constraint) self.assertIn( constraint.name, self.get_constraints(HotelReservation._meta.db_table) ) # Add initial reservations. room101 = Room.objects.create(number=101) room102 = Room.objects.create(number=102) datetimes = [ timezone.datetime(2018, 6, 20), timezone.datetime(2018, 6, 24), timezone.datetime(2018, 6, 26), timezone.datetime(2018, 6, 28), timezone.datetime(2018, 6, 29), ] reservation = HotelReservation.objects.create( datespan=DateRange(datetimes[0].date(), datetimes[1].date()), start=datetimes[0], end=datetimes[1], room=room102, ) constraint.validate(HotelReservation, reservation) HotelReservation.objects.create( datespan=DateRange(datetimes[1].date(), datetimes[3].date()), start=datetimes[1], end=datetimes[3], room=room102, ) HotelReservation.objects.create( datespan=DateRange(datetimes[3].date(), datetimes[4].date()), start=datetimes[3], end=datetimes[4], room=room102, cancelled=True, ) # Overlap dates. with self.assertRaises(IntegrityError), transaction.atomic(): reservation = HotelReservation( datespan=(datetimes[1].date(), datetimes[2].date()), start=datetimes[1], end=datetimes[2], room=room102, ) msg = f"Constraint “{constraint.name}” is violated." with self.assertRaisesMessage(ValidationError, msg): constraint.validate(HotelReservation, reservation) reservation.save() # Valid range. other_valid_reservations = [ # Other room. HotelReservation( datespan=(datetimes[1].date(), datetimes[2].date()), start=datetimes[1], end=datetimes[2], room=room101, ), # Cancelled reservation. HotelReservation( datespan=(datetimes[1].date(), datetimes[1].date()), start=datetimes[1], end=datetimes[2], room=room102, cancelled=True, ), # Other adjacent dates. HotelReservation( datespan=(datetimes[3].date(), datetimes[4].date()), start=datetimes[3], end=datetimes[4], room=room102, ), ] for reservation in other_valid_reservations: constraint.validate(HotelReservation, reservation) HotelReservation.objects.bulk_create(other_valid_reservations) # Excluded fields. constraint.validate( HotelReservation, HotelReservation( datespan=(datetimes[1].date(), datetimes[2].date()), start=datetimes[1], end=datetimes[2], room=room102, ), exclude={"room"}, ) constraint.validate( HotelReservation, HotelReservation( datespan=(datetimes[1].date(), datetimes[2].date()), start=datetimes[1], end=datetimes[2], room=room102, ), exclude={"datespan", "start", "end", "room"}, ) @ignore_warnings(category=RemovedInDjango50Warning) def test_range_overlaps_custom_opclasses(self): class TsTzRange(Func): function = "TSTZRANGE" output_field = DateTimeRangeField() constraint = ExclusionConstraint( name="exclude_overlapping_reservations_custom", expressions=[ (TsTzRange("start", "end", RangeBoundary()), RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], condition=Q(cancelled=False), opclasses=["range_ops", "gist_int4_ops"], ) self._test_range_overlaps(constraint) def test_range_overlaps_custom(self): class TsTzRange(Func): function = "TSTZRANGE" output_field = DateTimeRangeField() constraint = ExclusionConstraint( name="exclude_overlapping_reservations_custom_opclass", expressions=[ ( OpClass(TsTzRange("start", "end", RangeBoundary()), "range_ops"), RangeOperators.OVERLAPS, ), (OpClass("room", "gist_int4_ops"), RangeOperators.EQUAL), ], condition=Q(cancelled=False), ) self._test_range_overlaps(constraint) def test_range_overlaps(self): constraint = ExclusionConstraint( name="exclude_overlapping_reservations", expressions=[ (F("datespan"), RangeOperators.OVERLAPS), ("room", RangeOperators.EQUAL), ], condition=Q(cancelled=False), ) self._test_range_overlaps(constraint) def test_range_adjacent(self): constraint_name = "ints_adjacent" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) RangesModel.objects.create(ints=(20, 50)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(10, 20)) RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60)) # Drop the constraint. with connection.schema_editor() as editor: editor.remove_constraint(RangesModel, constraint) self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) def test_validate_range_adjacent(self): constraint = ExclusionConstraint( name="ints_adjacent", expressions=[("ints", RangeOperators.ADJACENT_TO)], violation_error_message="Custom error message.", ) range_obj = RangesModel.objects.create(ints=(20, 50)) constraint.validate(RangesModel, range_obj) msg = "Custom error message." with self.assertRaisesMessage(ValidationError, msg): constraint.validate(RangesModel, RangesModel(ints=(10, 20))) constraint.validate(RangesModel, RangesModel(ints=(10, 19))) constraint.validate(RangesModel, RangesModel(ints=(51, 60))) constraint.validate(RangesModel, RangesModel(ints=(10, 20)), exclude={"ints"}) def test_expressions_with_params(self): constraint_name = "scene_left_equal" self.assertNotIn(constraint_name, self.get_constraints(Scene._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[(Left("scene", 4), RangeOperators.EQUAL)], ) with connection.schema_editor() as editor: editor.add_constraint(Scene, constraint) self.assertIn(constraint_name, self.get_constraints(Scene._meta.db_table)) def test_expressions_with_key_transform(self): constraint_name = "exclude_overlapping_reservations_smoking" constraint = ExclusionConstraint( name=constraint_name, expressions=[ (F("datespan"), RangeOperators.OVERLAPS), (KeyTextTransform("smoking", "requirements"), RangeOperators.EQUAL), ], ) with connection.schema_editor() as editor: editor.add_constraint(HotelReservation, constraint) self.assertIn( constraint_name, self.get_constraints(HotelReservation._meta.db_table), ) def test_index_transform(self): constraint_name = "first_index_equal" constraint = ExclusionConstraint( name=constraint_name, expressions=[("field__0", RangeOperators.EQUAL)], ) with connection.schema_editor() as editor: editor.add_constraint(IntegerArrayModel, constraint) self.assertIn( constraint_name, self.get_constraints(IntegerArrayModel._meta.db_table), ) def test_range_adjacent_initially_deferred(self): constraint_name = "ints_adjacent_deferred" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], deferrable=Deferrable.DEFERRED, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) RangesModel.objects.create(ints=(20, 50)) adjacent_range = RangesModel.objects.create(ints=(10, 20)) # Constraint behavior can be changed with SET CONSTRAINTS. with self.assertRaises(IntegrityError): with transaction.atomic(), connection.cursor() as cursor: quoted_name = connection.ops.quote_name(constraint_name) cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name) # Remove adjacent range before the end of transaction. adjacent_range.delete() RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60)) def test_range_adjacent_gist_include(self): constraint_name = "ints_adjacent_gist_include" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], index_type="gist", include=["decimals", "ints"], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) RangesModel.objects.create(ints=(20, 50)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(10, 20)) RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60)) @skipUnlessDBFeature("supports_covering_spgist_indexes") def test_range_adjacent_spgist_include(self): constraint_name = "ints_adjacent_spgist_include" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], index_type="spgist", include=["decimals", "ints"], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) RangesModel.objects.create(ints=(20, 50)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(10, 20)) RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60)) def test_range_adjacent_gist_include_condition(self): constraint_name = "ints_adjacent_gist_include_condition" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], index_type="gist", include=["decimals"], condition=Q(id__gte=100), ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) @skipUnlessDBFeature("supports_covering_spgist_indexes") def test_range_adjacent_spgist_include_condition(self): constraint_name = "ints_adjacent_spgist_include_condition" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], index_type="spgist", include=["decimals"], condition=Q(id__gte=100), ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) def test_range_adjacent_gist_include_deferrable(self): constraint_name = "ints_adjacent_gist_include_deferrable" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], index_type="gist", include=["decimals"], deferrable=Deferrable.DEFERRED, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) @skipUnlessDBFeature("supports_covering_spgist_indexes") def test_range_adjacent_spgist_include_deferrable(self): constraint_name = "ints_adjacent_spgist_include_deferrable" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], index_type="spgist", include=["decimals"], deferrable=Deferrable.DEFERRED, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) def test_spgist_include_not_supported(self): constraint_name = "ints_adjacent_spgist_include_not_supported" constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], index_type="spgist", include=["id"], ) msg = ( "Covering exclusion constraints using an SP-GiST index require " "PostgreSQL 14+." ) with connection.schema_editor() as editor: with mock.patch( "django.db.backends.postgresql.features.DatabaseFeatures." "supports_covering_spgist_indexes", False, ): with self.assertRaisesMessage(NotSupportedError, msg): editor.add_constraint(RangesModel, constraint) def test_range_adjacent_opclass(self): constraint_name = "ints_adjacent_opclass" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[ (OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO), ], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) constraints = self.get_constraints(RangesModel._meta.db_table) self.assertIn(constraint_name, constraints) with editor.connection.cursor() as cursor: cursor.execute(SchemaTests.get_opclass_query, [constraint_name]) self.assertEqual( cursor.fetchall(), [("range_ops", constraint_name)], ) RangesModel.objects.create(ints=(20, 50)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(10, 20)) RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60)) # Drop the constraint. with connection.schema_editor() as editor: editor.remove_constraint(RangesModel, constraint) self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) def test_range_adjacent_opclass_condition(self): constraint_name = "ints_adjacent_opclass_condition" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[ (OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO), ], condition=Q(id__gte=100), ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) def test_range_adjacent_opclass_deferrable(self): constraint_name = "ints_adjacent_opclass_deferrable" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[ (OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO), ], deferrable=Deferrable.DEFERRED, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) def test_range_adjacent_gist_opclass_include(self): constraint_name = "ints_adjacent_gist_opclass_include" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[ (OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO), ], index_type="gist", include=["decimals"], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) @skipUnlessDBFeature("supports_covering_spgist_indexes") def test_range_adjacent_spgist_opclass_include(self): constraint_name = "ints_adjacent_spgist_opclass_include" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[ (OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO), ], index_type="spgist", include=["decimals"], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) def test_range_equal_cast(self): constraint_name = "exclusion_equal_room_cast" self.assertNotIn(constraint_name, self.get_constraints(Room._meta.db_table)) constraint = ExclusionConstraint( name=constraint_name, expressions=[(Cast("number", IntegerField()), RangeOperators.EQUAL)], ) with connection.schema_editor() as editor: editor.add_constraint(Room, constraint) self.assertIn(constraint_name, self.get_constraints(Room._meta.db_table)) @modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"}) class ExclusionConstraintOpclassesDepracationTests(PostgreSQLTestCase): def get_constraints(self, table): """Get the constraints on the table using a new cursor.""" with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def test_warning(self): msg = ( "The opclasses argument is deprecated in favor of using " "django.contrib.postgres.indexes.OpClass in " "ExclusionConstraint.expressions." ) with self.assertWarnsMessage(RemovedInDjango50Warning, msg): ExclusionConstraint( name="exclude_overlapping", expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)], opclasses=["range_ops"], ) @ignore_warnings(category=RemovedInDjango50Warning) def test_repr(self): constraint = ExclusionConstraint( name="exclude_overlapping", expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)], opclasses=["range_ops"], ) self.assertEqual( repr(constraint), "<ExclusionConstraint: index_type='GIST' expressions=[" "(F(datespan), '-|-')] name='exclude_overlapping' " "opclasses=['range_ops']>", ) @ignore_warnings(category=RemovedInDjango50Warning) def test_range_adjacent_opclasses(self): constraint_name = "ints_adjacent_opclasses" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], opclasses=["range_ops"], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) constraints = self.get_constraints(RangesModel._meta.db_table) self.assertIn(constraint_name, constraints) with editor.connection.cursor() as cursor: cursor.execute(SchemaTests.get_opclass_query, [constraint.name]) self.assertEqual( cursor.fetchall(), [("range_ops", constraint.name)], ) RangesModel.objects.create(ints=(20, 50)) with self.assertRaises(IntegrityError), transaction.atomic(): RangesModel.objects.create(ints=(10, 20)) RangesModel.objects.create(ints=(10, 19)) RangesModel.objects.create(ints=(51, 60)) # Drop the constraint. with connection.schema_editor() as editor: editor.remove_constraint(RangesModel, constraint) self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) @ignore_warnings(category=RemovedInDjango50Warning) def test_range_adjacent_opclasses_condition(self): constraint_name = "ints_adjacent_opclasses_condition" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], opclasses=["range_ops"], condition=Q(id__gte=100), ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) @ignore_warnings(category=RemovedInDjango50Warning) def test_range_adjacent_opclasses_deferrable(self): constraint_name = "ints_adjacent_opclasses_deferrable" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], opclasses=["range_ops"], deferrable=Deferrable.DEFERRED, ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) @ignore_warnings(category=RemovedInDjango50Warning) def test_range_adjacent_gist_opclasses_include(self): constraint_name = "ints_adjacent_gist_opclasses_include" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], index_type="gist", opclasses=["range_ops"], include=["decimals"], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table)) @ignore_warnings(category=RemovedInDjango50Warning) @skipUnlessDBFeature("supports_covering_spgist_indexes") def test_range_adjacent_spgist_opclasses_include(self): constraint_name = "ints_adjacent_spgist_opclasses_include" self.assertNotIn( constraint_name, self.get_constraints(RangesModel._meta.db_table) ) constraint = ExclusionConstraint( name=constraint_name, expressions=[("ints", RangeOperators.ADJACENT_TO)], index_type="spgist", opclasses=["range_ops"], include=["decimals"], ) with connection.schema_editor() as editor: editor.add_constraint(RangesModel, constraint) self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
76930b60a3f244c915d7aea8663d315d37660bd155770e0ba5c25a89e742ec60
import os import re from io import StringIO from unittest import mock, skipUnless from django.core.management import call_command from django.db import connection from django.db.backends.base.introspection import TableInfo from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature from .models import PeopleMoreData, test_collation def inspectdb_tables_only(table_name): """ Limit introspection to tables created for models of this app. Some databases such as Oracle are extremely slow at introspection. """ return table_name.startswith("inspectdb_") def inspectdb_views_only(table_name): return table_name.startswith("inspectdb_") and table_name.endswith( ("_materialized", "_view") ) def special_table_only(table_name): return table_name.startswith("inspectdb_special") class InspectDBTestCase(TestCase): unique_re = re.compile(r".*unique_together = \((.+),\).*") def test_stealth_table_name_filter_option(self): out = StringIO() call_command("inspectdb", table_name_filter=inspectdb_tables_only, stdout=out) error_message = ( "inspectdb has examined a table that should have been filtered out." ) # contrib.contenttypes is one of the apps always installed when running # the Django test suite, check that one of its tables hasn't been # inspected self.assertNotIn( "class DjangoContentType(models.Model):", out.getvalue(), msg=error_message ) def test_table_option(self): """ inspectdb can inspect a subset of tables by passing the table names as arguments. """ out = StringIO() call_command("inspectdb", "inspectdb_people", stdout=out) output = out.getvalue() self.assertIn("class InspectdbPeople(models.Model):", output) self.assertNotIn("InspectdbPeopledata", output) def make_field_type_asserter(self): """ Call inspectdb and return a function to validate a field type in its output. """ out = StringIO() call_command("inspectdb", "inspectdb_columntypes", stdout=out) output = out.getvalue() def assertFieldType(name, definition): out_def = re.search(r"^\s*%s = (models.*)$" % name, output, re.MULTILINE)[1] self.assertEqual(definition, out_def) return assertFieldType def test_field_types(self): """Test introspection of various Django field types""" assertFieldType = self.make_field_type_asserter() introspected_field_types = connection.features.introspected_field_types char_field_type = introspected_field_types["CharField"] # Inspecting Oracle DB doesn't produce correct results (#19884): # - it reports fields as blank=True when they aren't. if ( not connection.features.interprets_empty_strings_as_nulls and char_field_type == "CharField" ): assertFieldType("char_field", "models.CharField(max_length=10)") assertFieldType( "null_char_field", "models.CharField(max_length=10, blank=True, null=True)", ) assertFieldType("email_field", "models.CharField(max_length=254)") assertFieldType("file_field", "models.CharField(max_length=100)") assertFieldType("file_path_field", "models.CharField(max_length=100)") assertFieldType("slug_field", "models.CharField(max_length=50)") assertFieldType("text_field", "models.TextField()") assertFieldType("url_field", "models.CharField(max_length=200)") if char_field_type == "TextField": assertFieldType("char_field", "models.TextField()") assertFieldType( "null_char_field", "models.TextField(blank=True, null=True)" ) assertFieldType("email_field", "models.TextField()") assertFieldType("file_field", "models.TextField()") assertFieldType("file_path_field", "models.TextField()") assertFieldType("slug_field", "models.TextField()") assertFieldType("text_field", "models.TextField()") assertFieldType("url_field", "models.TextField()") assertFieldType("date_field", "models.DateField()") assertFieldType("date_time_field", "models.DateTimeField()") if introspected_field_types["GenericIPAddressField"] == "GenericIPAddressField": assertFieldType("gen_ip_address_field", "models.GenericIPAddressField()") elif not connection.features.interprets_empty_strings_as_nulls: assertFieldType("gen_ip_address_field", "models.CharField(max_length=39)") assertFieldType( "time_field", "models.%s()" % introspected_field_types["TimeField"] ) if connection.features.has_native_uuid_field: assertFieldType("uuid_field", "models.UUIDField()") elif not connection.features.interprets_empty_strings_as_nulls: assertFieldType("uuid_field", "models.CharField(max_length=32)") @skipUnlessDBFeature("can_introspect_json_field", "supports_json_field") def test_json_field(self): out = StringIO() call_command("inspectdb", "inspectdb_jsonfieldcolumntype", stdout=out) output = out.getvalue() if not connection.features.interprets_empty_strings_as_nulls: self.assertIn("json_field = models.JSONField()", output) self.assertIn( "null_json_field = models.JSONField(blank=True, null=True)", output ) @skipUnlessDBFeature("supports_collation_on_charfield") @skipUnless(test_collation, "Language collations are not supported.") def test_char_field_db_collation(self): out = StringIO() call_command("inspectdb", "inspectdb_charfielddbcollation", stdout=out) output = out.getvalue() if not connection.features.interprets_empty_strings_as_nulls: self.assertIn( "char_field = models.CharField(max_length=10, " "db_collation='%s')" % test_collation, output, ) else: self.assertIn( "char_field = models.CharField(max_length=10, " "db_collation='%s', blank=True, null=True)" % test_collation, output, ) @skipUnlessDBFeature("supports_collation_on_textfield") @skipUnless(test_collation, "Language collations are not supported.") def test_text_field_db_collation(self): out = StringIO() call_command("inspectdb", "inspectdb_textfielddbcollation", stdout=out) output = out.getvalue() if not connection.features.interprets_empty_strings_as_nulls: self.assertIn( "text_field = models.TextField(db_collation='%s')" % test_collation, output, ) else: self.assertIn( "text_field = models.TextField(db_collation='%s, blank=True, " "null=True)" % test_collation, output, ) def test_number_field_types(self): """Test introspection of various Django field types""" assertFieldType = self.make_field_type_asserter() introspected_field_types = connection.features.introspected_field_types auto_field_type = connection.features.introspected_field_types["AutoField"] if auto_field_type != "AutoField": assertFieldType( "id", "models.%s(primary_key=True) # AutoField?" % auto_field_type ) assertFieldType( "big_int_field", "models.%s()" % introspected_field_types["BigIntegerField"] ) bool_field_type = introspected_field_types["BooleanField"] assertFieldType("bool_field", "models.{}()".format(bool_field_type)) assertFieldType( "null_bool_field", "models.{}(blank=True, null=True)".format(bool_field_type), ) if connection.vendor != "sqlite": assertFieldType( "decimal_field", "models.DecimalField(max_digits=6, decimal_places=1)" ) else: # Guessed arguments on SQLite, see #5014 assertFieldType( "decimal_field", "models.DecimalField(max_digits=10, decimal_places=5) " "# max_digits and decimal_places have been guessed, " "as this database handles decimal fields as float", ) assertFieldType("float_field", "models.FloatField()") assertFieldType( "int_field", "models.%s()" % introspected_field_types["IntegerField"] ) assertFieldType( "pos_int_field", "models.%s()" % introspected_field_types["PositiveIntegerField"], ) assertFieldType( "pos_big_int_field", "models.%s()" % introspected_field_types["PositiveBigIntegerField"], ) assertFieldType( "pos_small_int_field", "models.%s()" % introspected_field_types["PositiveSmallIntegerField"], ) assertFieldType( "small_int_field", "models.%s()" % introspected_field_types["SmallIntegerField"], ) @skipUnlessDBFeature("can_introspect_foreign_keys") def test_attribute_name_not_python_keyword(self): out = StringIO() call_command("inspectdb", table_name_filter=inspectdb_tables_only, stdout=out) output = out.getvalue() error_message = ( "inspectdb generated an attribute name which is a Python keyword" ) # Recursive foreign keys should be set to 'self' self.assertIn("parent = models.ForeignKey('self', models.DO_NOTHING)", output) self.assertNotIn( "from = models.ForeignKey(InspectdbPeople, models.DO_NOTHING)", output, msg=error_message, ) # As InspectdbPeople model is defined after InspectdbMessage, it should # be quoted. self.assertIn( "from_field = models.ForeignKey('InspectdbPeople', models.DO_NOTHING, " "db_column='from_id')", output, ) self.assertIn( "people_pk = models.OneToOneField(InspectdbPeople, models.DO_NOTHING, " "primary_key=True)", output, ) self.assertIn( "people_unique = models.OneToOneField(InspectdbPeople, models.DO_NOTHING)", output, ) @skipUnlessDBFeature("can_introspect_foreign_keys") def test_foreign_key_to_field(self): out = StringIO() call_command("inspectdb", "inspectdb_foreignkeytofield", stdout=out) self.assertIn( "to_field_fk = models.ForeignKey('InspectdbPeoplemoredata', " "models.DO_NOTHING, to_field='people_unique_id')", out.getvalue(), ) def test_digits_column_name_introspection(self): """Introspection of column names consist/start with digits (#16536/#17676)""" char_field_type = connection.features.introspected_field_types["CharField"] out = StringIO() call_command("inspectdb", "inspectdb_digitsincolumnname", stdout=out) output = out.getvalue() error_message = "inspectdb generated a model field name which is a number" self.assertNotIn( " 123 = models.%s" % char_field_type, output, msg=error_message ) self.assertIn("number_123 = models.%s" % char_field_type, output) error_message = ( "inspectdb generated a model field name which starts with a digit" ) self.assertNotIn( " 4extra = models.%s" % char_field_type, output, msg=error_message ) self.assertIn("number_4extra = models.%s" % char_field_type, output) self.assertNotIn( " 45extra = models.%s" % char_field_type, output, msg=error_message ) self.assertIn("number_45extra = models.%s" % char_field_type, output) def test_special_column_name_introspection(self): """ Introspection of column names containing special characters, unsuitable for Python identifiers """ out = StringIO() call_command("inspectdb", table_name_filter=special_table_only, stdout=out) output = out.getvalue() base_name = connection.introspection.identifier_converter("Field") integer_field_type = connection.features.introspected_field_types[ "IntegerField" ] self.assertIn("field = models.%s()" % integer_field_type, output) self.assertIn( "field_field = models.%s(db_column='%s_')" % (integer_field_type, base_name), output, ) self.assertIn( "field_field_0 = models.%s(db_column='%s__')" % (integer_field_type, base_name), output, ) self.assertIn( "field_field_1 = models.%s(db_column='__field')" % integer_field_type, output, ) self.assertIn( "prc_x = models.{}(db_column='prc(%) x')".format(integer_field_type), output ) self.assertIn("tamaño = models.%s()" % integer_field_type, output) def test_table_name_introspection(self): """ Introspection of table names containing special characters, unsuitable for Python identifiers """ out = StringIO() call_command("inspectdb", table_name_filter=special_table_only, stdout=out) output = out.getvalue() self.assertIn("class InspectdbSpecialTableName(models.Model):", output) @skipUnlessDBFeature("supports_expression_indexes") def test_table_with_func_unique_constraint(self): out = StringIO() call_command("inspectdb", "inspectdb_funcuniqueconstraint", stdout=out) output = out.getvalue() self.assertIn("class InspectdbFuncuniqueconstraint(models.Model):", output) def test_managed_models(self): """ By default the command generates models with `Meta.managed = False`. """ out = StringIO() call_command("inspectdb", "inspectdb_columntypes", stdout=out) output = out.getvalue() self.longMessage = False self.assertIn( " managed = False", output, msg="inspectdb should generate unmanaged models.", ) def test_unique_together_meta(self): out = StringIO() call_command("inspectdb", "inspectdb_uniquetogether", stdout=out) output = out.getvalue() self.assertIn(" unique_together = (('", output) unique_together_match = self.unique_re.findall(output) # There should be one unique_together tuple. self.assertEqual(len(unique_together_match), 1) fields = unique_together_match[0] # Fields with db_column = field name. self.assertIn("('field1', 'field2')", fields) # Fields from columns whose names are Python keywords. self.assertIn("('field1', 'field2')", fields) # Fields whose names normalize to the same Python field name and hence # are given an integer suffix. self.assertIn("('non_unique_column', 'non_unique_column_0')", fields) @skipUnless(connection.vendor == "postgresql", "PostgreSQL specific SQL") def test_unsupported_unique_together(self): """Unsupported index types (COALESCE here) are skipped.""" with connection.cursor() as c: c.execute( "CREATE UNIQUE INDEX Findex ON %s " "(id, people_unique_id, COALESCE(message_id, -1))" % PeopleMoreData._meta.db_table ) try: out = StringIO() call_command( "inspectdb", table_name_filter=lambda tn: tn.startswith( PeopleMoreData._meta.db_table ), stdout=out, ) output = out.getvalue() self.assertIn("# A unique constraint could not be introspected.", output) self.assertEqual( self.unique_re.findall(output), ["('id', 'people_unique')"] ) finally: with connection.cursor() as c: c.execute("DROP INDEX Findex") @skipUnless( connection.vendor == "sqlite", "Only patched sqlite's DatabaseIntrospection.data_types_reverse for this test", ) def test_custom_fields(self): """ Introspection of columns with a custom field (#21090) """ out = StringIO() with mock.patch( "django.db.connection.introspection.data_types_reverse." "base_data_types_reverse", { "text": "myfields.TextField", "bigint": "BigIntegerField", }, ): call_command("inspectdb", "inspectdb_columntypes", stdout=out) output = out.getvalue() self.assertIn("text_field = myfields.TextField()", output) self.assertIn("big_int_field = models.BigIntegerField()", output) def test_introspection_errors(self): """ Introspection errors should not crash the command, and the error should be visible in the output. """ out = StringIO() with mock.patch( "django.db.connection.introspection.get_table_list", return_value=[TableInfo(name="nonexistent", type="t")], ): call_command("inspectdb", stdout=out) output = out.getvalue() self.assertIn("# Unable to inspect table 'nonexistent'", output) # The error message depends on the backend self.assertIn("# The error was:", output) def test_same_relations(self): out = StringIO() call_command("inspectdb", "inspectdb_message", stdout=out) self.assertIn( "author = models.ForeignKey('InspectdbPeople', models.DO_NOTHING, " "related_name='inspectdbmessage_author_set')", out.getvalue(), ) class InspectDBTransactionalTests(TransactionTestCase): available_apps = ["inspectdb"] def test_include_views(self): """inspectdb --include-views creates models for database views.""" with connection.cursor() as cursor: cursor.execute( "CREATE VIEW inspectdb_people_view AS " "SELECT id, name FROM inspectdb_people" ) out = StringIO() view_model = "class InspectdbPeopleView(models.Model):" view_managed = "managed = False # Created from a view." try: call_command( "inspectdb", table_name_filter=inspectdb_views_only, stdout=out, ) no_views_output = out.getvalue() self.assertNotIn(view_model, no_views_output) self.assertNotIn(view_managed, no_views_output) call_command( "inspectdb", table_name_filter=inspectdb_views_only, include_views=True, stdout=out, ) with_views_output = out.getvalue() self.assertIn(view_model, with_views_output) self.assertIn(view_managed, with_views_output) finally: with connection.cursor() as cursor: cursor.execute("DROP VIEW inspectdb_people_view") @skipUnlessDBFeature("can_introspect_materialized_views") def test_include_materialized_views(self): """inspectdb --include-views creates models for materialized views.""" with connection.cursor() as cursor: cursor.execute( "CREATE MATERIALIZED VIEW inspectdb_people_materialized AS " "SELECT id, name FROM inspectdb_people" ) out = StringIO() view_model = "class InspectdbPeopleMaterialized(models.Model):" view_managed = "managed = False # Created from a view." try: call_command( "inspectdb", table_name_filter=inspectdb_views_only, stdout=out, ) no_views_output = out.getvalue() self.assertNotIn(view_model, no_views_output) self.assertNotIn(view_managed, no_views_output) call_command( "inspectdb", table_name_filter=inspectdb_views_only, include_views=True, stdout=out, ) with_views_output = out.getvalue() self.assertIn(view_model, with_views_output) self.assertIn(view_managed, with_views_output) finally: with connection.cursor() as cursor: cursor.execute("DROP MATERIALIZED VIEW inspectdb_people_materialized") @skipUnless(connection.vendor == "postgresql", "PostgreSQL specific SQL") def test_include_partitions(self): """inspectdb --include-partitions creates models for partitions.""" with connection.cursor() as cursor: cursor.execute( """\ CREATE TABLE inspectdb_partition_parent (name text not null) PARTITION BY LIST (left(upper(name), 1)) """ ) cursor.execute( """\ CREATE TABLE inspectdb_partition_child PARTITION OF inspectdb_partition_parent FOR VALUES IN ('A', 'B', 'C') """ ) out = StringIO() partition_model_parent = "class InspectdbPartitionParent(models.Model):" partition_model_child = "class InspectdbPartitionChild(models.Model):" partition_managed = "managed = False # Created from a partition." try: call_command( "inspectdb", table_name_filter=inspectdb_tables_only, stdout=out ) no_partitions_output = out.getvalue() self.assertIn(partition_model_parent, no_partitions_output) self.assertNotIn(partition_model_child, no_partitions_output) self.assertNotIn(partition_managed, no_partitions_output) call_command( "inspectdb", table_name_filter=inspectdb_tables_only, include_partitions=True, stdout=out, ) with_partitions_output = out.getvalue() self.assertIn(partition_model_parent, with_partitions_output) self.assertIn(partition_model_child, with_partitions_output) self.assertIn(partition_managed, with_partitions_output) finally: with connection.cursor() as cursor: cursor.execute("DROP TABLE IF EXISTS inspectdb_partition_child") cursor.execute("DROP TABLE IF EXISTS inspectdb_partition_parent") @skipUnless(connection.vendor == "postgresql", "PostgreSQL specific SQL") def test_foreign_data_wrapper(self): with connection.cursor() as cursor: cursor.execute("CREATE EXTENSION IF NOT EXISTS file_fdw") cursor.execute( "CREATE SERVER inspectdb_server FOREIGN DATA WRAPPER file_fdw" ) cursor.execute( """\ CREATE FOREIGN TABLE inspectdb_iris_foreign_table ( petal_length real, petal_width real, sepal_length real, sepal_width real ) SERVER inspectdb_server OPTIONS ( filename %s ) """, [os.devnull], ) out = StringIO() foreign_table_model = "class InspectdbIrisForeignTable(models.Model):" foreign_table_managed = "managed = False" try: call_command( "inspectdb", table_name_filter=inspectdb_tables_only, stdout=out, ) output = out.getvalue() self.assertIn(foreign_table_model, output) self.assertIn(foreign_table_managed, output) finally: with connection.cursor() as cursor: cursor.execute( "DROP FOREIGN TABLE IF EXISTS inspectdb_iris_foreign_table" ) cursor.execute("DROP SERVER IF EXISTS inspectdb_server") cursor.execute("DROP EXTENSION IF EXISTS file_fdw") @skipUnlessDBFeature("create_test_table_with_composite_primary_key") def test_composite_primary_key(self): table_name = "test_table_composite_pk" with connection.cursor() as cursor: cursor.execute( connection.features.create_test_table_with_composite_primary_key ) out = StringIO() if connection.vendor == "sqlite": field_type = connection.features.introspected_field_types["AutoField"] else: field_type = connection.features.introspected_field_types["IntegerField"] try: call_command("inspectdb", table_name, stdout=out) output = out.getvalue() self.assertIn( f"column_1 = models.{field_type}(primary_key=True) # The composite " f"primary key (column_1, column_2) found, that is not supported. The " f"first column is selected.", output, ) self.assertIn( "column_2 = models.%s()" % connection.features.introspected_field_types["IntegerField"], output, ) finally: with connection.cursor() as cursor: cursor.execute("DROP TABLE %s" % table_name)
5372ca8ca7bfe102b600a948005b2cd827edba9382cc13d4dbdc0dbc7a3ac403
from django.db import connection, models from django.db.models.functions import Lower class People(models.Model): name = models.CharField(max_length=255) parent = models.ForeignKey("self", models.CASCADE) class Message(models.Model): from_field = models.ForeignKey(People, models.CASCADE, db_column="from_id") author = models.ForeignKey(People, models.CASCADE, related_name="message_authors") class PeopleData(models.Model): people_pk = models.ForeignKey(People, models.CASCADE, primary_key=True) ssn = models.CharField(max_length=11) class PeopleMoreData(models.Model): people_unique = models.ForeignKey(People, models.CASCADE, unique=True) message = models.ForeignKey(Message, models.CASCADE, blank=True, null=True) license = models.CharField(max_length=255) class ForeignKeyToField(models.Model): to_field_fk = models.ForeignKey( PeopleMoreData, models.CASCADE, to_field="people_unique", ) class DigitsInColumnName(models.Model): all_digits = models.CharField(max_length=11, db_column="123") leading_digit = models.CharField(max_length=11, db_column="4extra") leading_digits = models.CharField(max_length=11, db_column="45extra") class SpecialName(models.Model): field = models.IntegerField(db_column="field") # Underscores field_field_0 = models.IntegerField(db_column="Field_") field_field_1 = models.IntegerField(db_column="Field__") field_field_2 = models.IntegerField(db_column="__field") # Other chars prc_x = models.IntegerField(db_column="prc(%) x") non_ascii = models.IntegerField(db_column="tamaño") class Meta: db_table = "inspectdb_special.table name" class ColumnTypes(models.Model): id = models.AutoField(primary_key=True) big_int_field = models.BigIntegerField() bool_field = models.BooleanField(default=False) null_bool_field = models.BooleanField(null=True) char_field = models.CharField(max_length=10) null_char_field = models.CharField(max_length=10, blank=True, null=True) date_field = models.DateField() date_time_field = models.DateTimeField() decimal_field = models.DecimalField(max_digits=6, decimal_places=1) email_field = models.EmailField() file_field = models.FileField(upload_to="unused") file_path_field = models.FilePathField() float_field = models.FloatField() int_field = models.IntegerField() gen_ip_address_field = models.GenericIPAddressField(protocol="ipv4") pos_big_int_field = models.PositiveBigIntegerField() pos_int_field = models.PositiveIntegerField() pos_small_int_field = models.PositiveSmallIntegerField() slug_field = models.SlugField() small_int_field = models.SmallIntegerField() text_field = models.TextField() time_field = models.TimeField() url_field = models.URLField() uuid_field = models.UUIDField() class JSONFieldColumnType(models.Model): json_field = models.JSONField() null_json_field = models.JSONField(blank=True, null=True) class Meta: required_db_features = { "can_introspect_json_field", "supports_json_field", } test_collation = connection.features.test_collations.get("non_default") class CharFieldDbCollation(models.Model): char_field = models.CharField(max_length=10, db_collation=test_collation) class Meta: required_db_features = {"supports_collation_on_charfield"} class TextFieldDbCollation(models.Model): text_field = models.TextField(db_collation=test_collation) class Meta: required_db_features = {"supports_collation_on_textfield"} class UniqueTogether(models.Model): field1 = models.IntegerField() field2 = models.CharField(max_length=10) from_field = models.IntegerField(db_column="from") non_unique = models.IntegerField(db_column="non__unique_column") non_unique_0 = models.IntegerField(db_column="non_unique__column") class Meta: unique_together = [ ("field1", "field2"), ("from_field", "field1"), ("non_unique", "non_unique_0"), ] class FuncUniqueConstraint(models.Model): name = models.CharField(max_length=255) rank = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint( Lower("name"), models.F("rank"), name="index_lower_name" ) ] required_db_features = {"supports_expression_indexes"}
ffbf7472492317723a716f4df36183d5748055df898ba1a5e5b650c3b69aaddd
# Unit tests for cache framework # Uses whatever cache backend is set in the test settings file. import copy import io import os import pickle import re import shutil import sys import tempfile import threading import time import unittest from pathlib import Path from unittest import mock, skipIf from django.conf import settings from django.core import management, signals from django.core.cache import ( DEFAULT_CACHE_ALIAS, CacheHandler, CacheKeyWarning, InvalidCacheKey, cache, caches, ) from django.core.cache.backends.base import InvalidCacheBackendError from django.core.cache.backends.redis import RedisCacheClient from django.core.cache.utils import make_template_fragment_key from django.db import close_old_connections, connection, connections from django.db.backends.utils import CursorWrapper from django.http import ( HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse, ) from django.middleware.cache import ( CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware, ) from django.middleware.csrf import CsrfViewMiddleware from django.template import engines from django.template.context_processors import csrf from django.template.response import TemplateResponse from django.test import ( RequestFactory, SimpleTestCase, TestCase, TransactionTestCase, override_settings, ) from django.test.signals import setting_changed from django.test.utils import CaptureQueriesContext from django.utils import timezone, translation from django.utils.cache import ( get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers, ) from django.views.decorators.cache import cache_control, cache_page from .models import Poll, expensive_calculation # functions/classes for complex data type tests def f(): return 42 class C: def m(n): return 24 class Unpicklable: def __getstate__(self): raise pickle.PickleError() def empty_response(request): return HttpResponse() KEY_ERRORS_WITH_MEMCACHED_MSG = ( "Cache key contains characters that will cause errors if used with memcached: %r" ) @override_settings( CACHES={ "default": { "BACKEND": "django.core.cache.backends.dummy.DummyCache", } } ) class DummyCacheTests(SimpleTestCase): # The Dummy cache backend doesn't really behave like a test backend, # so it has its own test case. def test_simple(self): "Dummy cache backend ignores cache set calls" cache.set("key", "value") self.assertIsNone(cache.get("key")) def test_add(self): "Add doesn't do anything in dummy cache backend" self.assertIs(cache.add("addkey1", "value"), True) self.assertIs(cache.add("addkey1", "newvalue"), True) self.assertIsNone(cache.get("addkey1")) def test_non_existent(self): "Nonexistent keys aren't found in the dummy cache backend" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): "get_many returns nothing for the dummy cache backend" cache.set_many({"a": "a", "b": "b", "c": "c", "d": "d"}) self.assertEqual(cache.get_many(["a", "c", "d"]), {}) self.assertEqual(cache.get_many(["a", "b", "e"]), {}) def test_get_many_invalid_key(self): msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ":1:key with spaces" with self.assertWarnsMessage(CacheKeyWarning, msg): cache.get_many(["key with spaces"]) def test_delete(self): "Cache deletion is transparently ignored on the dummy cache backend" cache.set_many({"key1": "spam", "key2": "eggs"}) self.assertIsNone(cache.get("key1")) self.assertIs(cache.delete("key1"), False) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_has_key(self): "The has_key method doesn't ever return True for the dummy cache backend" cache.set("hello1", "goodbye1") self.assertIs(cache.has_key("hello1"), False) self.assertIs(cache.has_key("goodbye1"), False) def test_in(self): "The in operator doesn't ever return True for the dummy cache backend" cache.set("hello2", "goodbye2") self.assertNotIn("hello2", cache) self.assertNotIn("goodbye2", cache) def test_incr(self): "Dummy cache values can't be incremented" cache.set("answer", 42) with self.assertRaises(ValueError): cache.incr("answer") with self.assertRaises(ValueError): cache.incr("does_not_exist") with self.assertRaises(ValueError): cache.incr("does_not_exist", -1) def test_decr(self): "Dummy cache values can't be decremented" cache.set("answer", 42) with self.assertRaises(ValueError): cache.decr("answer") with self.assertRaises(ValueError): cache.decr("does_not_exist") with self.assertRaises(ValueError): cache.decr("does_not_exist", -1) def test_touch(self): """Dummy cache can't do touch().""" self.assertIs(cache.touch("whatever"), False) def test_data_types(self): "All data types are ignored equally by the dummy cache" tests = { "string": "this is a string", "int": 42, "bool": True, "list": [1, 2, 3, 4], "tuple": (1, 2, 3, 4), "dict": {"A": 1, "B": 2}, "function": f, "class": C, } for key, value in tests.items(): with self.subTest(key=key): cache.set(key, value) self.assertIsNone(cache.get(key)) def test_expiration(self): "Expiration has no effect on the dummy cache" cache.set("expire1", "very quickly", 1) cache.set("expire2", "very quickly", 1) cache.set("expire3", "very quickly", 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) self.assertIs(cache.add("expire2", "newvalue"), True) self.assertIsNone(cache.get("expire2")) self.assertIs(cache.has_key("expire3"), False) def test_unicode(self): "Unicode values are ignored by the dummy cache" stuff = { "ascii": "ascii_value", "unicode_ascii": "Iñtërnâtiônàlizætiøn1", "Iñtërnâtiônàlizætiøn": "Iñtërnâtiônàlizætiøn2", "ascii2": {"x": 1}, } for (key, value) in stuff.items(): with self.subTest(key=key): cache.set(key, value) self.assertIsNone(cache.get(key)) def test_set_many(self): "set_many does nothing for the dummy cache backend" self.assertEqual(cache.set_many({"a": 1, "b": 2}), []) self.assertEqual(cache.set_many({"a": 1, "b": 2}, timeout=2, version="1"), []) def test_set_many_invalid_key(self): msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ":1:key with spaces" with self.assertWarnsMessage(CacheKeyWarning, msg): cache.set_many({"key with spaces": "foo"}) def test_delete_many(self): "delete_many does nothing for the dummy cache backend" cache.delete_many(["a", "b"]) def test_delete_many_invalid_key(self): msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ":1:key with spaces" with self.assertWarnsMessage(CacheKeyWarning, msg): cache.delete_many(["key with spaces"]) def test_clear(self): "clear does nothing for the dummy cache backend" cache.clear() def test_incr_version(self): "Dummy cache versions can't be incremented" cache.set("answer", 42) with self.assertRaises(ValueError): cache.incr_version("answer") with self.assertRaises(ValueError): cache.incr_version("does_not_exist") def test_decr_version(self): "Dummy cache versions can't be decremented" cache.set("answer", 42) with self.assertRaises(ValueError): cache.decr_version("answer") with self.assertRaises(ValueError): cache.decr_version("does_not_exist") def test_get_or_set(self): self.assertEqual(cache.get_or_set("mykey", "default"), "default") self.assertIsNone(cache.get_or_set("mykey", None)) def test_get_or_set_callable(self): def my_callable(): return "default" self.assertEqual(cache.get_or_set("mykey", my_callable), "default") self.assertEqual(cache.get_or_set("mykey", my_callable()), "default") def custom_key_func(key, key_prefix, version): "A customized cache key function" return "CUSTOM-" + "-".join([key_prefix, str(version), key]) _caches_setting_base = { "default": {}, "prefix": {"KEY_PREFIX": "cacheprefix{}".format(os.getpid())}, "v2": {"VERSION": 2}, "custom_key": {"KEY_FUNCTION": custom_key_func}, "custom_key2": {"KEY_FUNCTION": "cache.tests.custom_key_func"}, "cull": {"OPTIONS": {"MAX_ENTRIES": 30}}, "zero_cull": {"OPTIONS": {"CULL_FREQUENCY": 0, "MAX_ENTRIES": 30}}, } def caches_setting_for_tests(base=None, exclude=None, **params): # `base` is used to pull in the memcached config from the original settings, # `exclude` is a set of cache names denoting which `_caches_setting_base` keys # should be omitted. # `params` are test specific overrides and `_caches_settings_base` is the # base config for the tests. # This results in the following search order: # params -> _caches_setting_base -> base base = base or {} exclude = exclude or set() setting = {k: base.copy() for k in _caches_setting_base if k not in exclude} for key, cache_params in setting.items(): cache_params.update(_caches_setting_base[key]) cache_params.update(params) return setting class BaseCacheTests: # A common set of tests to apply to all cache backends factory = RequestFactory() # Some clients raise custom exceptions when .incr() or .decr() are called # with a non-integer value. incr_decr_type_error = TypeError def tearDown(self): cache.clear() def test_simple(self): # Simple cache set/get works cache.set("key", "value") self.assertEqual(cache.get("key"), "value") def test_default_used_when_none_is_set(self): """If None is cached, get() returns it instead of the default.""" cache.set("key_default_none", None) self.assertIsNone(cache.get("key_default_none", default="default")) def test_add(self): # A key can be added to a cache self.assertIs(cache.add("addkey1", "value"), True) self.assertIs(cache.add("addkey1", "newvalue"), False) self.assertEqual(cache.get("addkey1"), "value") def test_prefix(self): # Test for same cache key conflicts between shared backend cache.set("somekey", "value") # should not be set in the prefixed cache self.assertIs(caches["prefix"].has_key("somekey"), False) caches["prefix"].set("somekey", "value2") self.assertEqual(cache.get("somekey"), "value") self.assertEqual(caches["prefix"].get("somekey"), "value2") def test_non_existent(self): """Nonexistent cache keys return as None/default.""" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): # Multiple cache keys can be returned using get_many cache.set_many({"a": "a", "b": "b", "c": "c", "d": "d"}) self.assertEqual( cache.get_many(["a", "c", "d"]), {"a": "a", "c": "c", "d": "d"} ) self.assertEqual(cache.get_many(["a", "b", "e"]), {"a": "a", "b": "b"}) self.assertEqual(cache.get_many(iter(["a", "b", "e"])), {"a": "a", "b": "b"}) cache.set_many({"x": None, "y": 1}) self.assertEqual(cache.get_many(["x", "y"]), {"x": None, "y": 1}) def test_delete(self): # Cache keys can be deleted cache.set_many({"key1": "spam", "key2": "eggs"}) self.assertEqual(cache.get("key1"), "spam") self.assertIs(cache.delete("key1"), True) self.assertIsNone(cache.get("key1")) self.assertEqual(cache.get("key2"), "eggs") def test_delete_nonexistent(self): self.assertIs(cache.delete("nonexistent_key"), False) def test_has_key(self): # The cache can be inspected for cache keys cache.set("hello1", "goodbye1") self.assertIs(cache.has_key("hello1"), True) self.assertIs(cache.has_key("goodbye1"), False) cache.set("no_expiry", "here", None) self.assertIs(cache.has_key("no_expiry"), True) cache.set("null", None) self.assertIs(cache.has_key("null"), True) def test_in(self): # The in operator can be used to inspect cache contents cache.set("hello2", "goodbye2") self.assertIn("hello2", cache) self.assertNotIn("goodbye2", cache) cache.set("null", None) self.assertIn("null", cache) def test_incr(self): # Cache values can be incremented cache.set("answer", 41) self.assertEqual(cache.incr("answer"), 42) self.assertEqual(cache.get("answer"), 42) self.assertEqual(cache.incr("answer", 10), 52) self.assertEqual(cache.get("answer"), 52) self.assertEqual(cache.incr("answer", -10), 42) with self.assertRaises(ValueError): cache.incr("does_not_exist") with self.assertRaises(ValueError): cache.incr("does_not_exist", -1) cache.set("null", None) with self.assertRaises(self.incr_decr_type_error): cache.incr("null") def test_decr(self): # Cache values can be decremented cache.set("answer", 43) self.assertEqual(cache.decr("answer"), 42) self.assertEqual(cache.get("answer"), 42) self.assertEqual(cache.decr("answer", 10), 32) self.assertEqual(cache.get("answer"), 32) self.assertEqual(cache.decr("answer", -10), 42) with self.assertRaises(ValueError): cache.decr("does_not_exist") with self.assertRaises(ValueError): cache.incr("does_not_exist", -1) cache.set("null", None) with self.assertRaises(self.incr_decr_type_error): cache.decr("null") def test_close(self): self.assertTrue(hasattr(cache, "close")) cache.close() def test_data_types(self): # Many different data types can be cached tests = { "string": "this is a string", "int": 42, "bool": True, "list": [1, 2, 3, 4], "tuple": (1, 2, 3, 4), "dict": {"A": 1, "B": 2}, "function": f, "class": C, } for key, value in tests.items(): with self.subTest(key=key): cache.set(key, value) self.assertEqual(cache.get(key), value) def test_cache_read_for_model_instance(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() my_poll = Poll.objects.create(question="Well?") self.assertEqual(Poll.objects.count(), 1) pub_date = my_poll.pub_date cache.set("question", my_poll) cached_poll = cache.get("question") self.assertEqual(cached_poll.pub_date, pub_date) # We only want the default expensive calculation run once self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_write_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache write expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.defer("question") self.assertEqual(defer_qs.count(), 1) self.assertEqual(expensive_calculation.num_runs, 1) cache.set("deferred_queryset", defer_qs) # cache set should not re-evaluate default functions self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_read_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.defer("question") self.assertEqual(defer_qs.count(), 1) cache.set("deferred_queryset", defer_qs) self.assertEqual(expensive_calculation.num_runs, 1) runs_before_cache_read = expensive_calculation.num_runs cache.get("deferred_queryset") # We only want the default expensive calculation run on creation and set self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read) def test_expiration(self): # Cache values can be set to expire cache.set("expire1", "very quickly", 1) cache.set("expire2", "very quickly", 1) cache.set("expire3", "very quickly", 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) self.assertIs(cache.add("expire2", "newvalue"), True) self.assertEqual(cache.get("expire2"), "newvalue") self.assertIs(cache.has_key("expire3"), False) def test_touch(self): # cache.touch() updates the timeout. cache.set("expire1", "very quickly", timeout=1) self.assertIs(cache.touch("expire1", timeout=4), True) time.sleep(2) self.assertIs(cache.has_key("expire1"), True) time.sleep(3) self.assertIs(cache.has_key("expire1"), False) # cache.touch() works without the timeout argument. cache.set("expire1", "very quickly", timeout=1) self.assertIs(cache.touch("expire1"), True) time.sleep(2) self.assertIs(cache.has_key("expire1"), True) self.assertIs(cache.touch("nonexistent"), False) def test_unicode(self): # Unicode values can be cached stuff = { "ascii": "ascii_value", "unicode_ascii": "Iñtërnâtiônàlizætiøn1", "Iñtërnâtiônàlizætiøn": "Iñtërnâtiônàlizætiøn2", "ascii2": {"x": 1}, } # Test `set` for (key, value) in stuff.items(): with self.subTest(key=key): cache.set(key, value) self.assertEqual(cache.get(key), value) # Test `add` for (key, value) in stuff.items(): with self.subTest(key=key): self.assertIs(cache.delete(key), True) self.assertIs(cache.add(key, value), True) self.assertEqual(cache.get(key), value) # Test `set_many` for (key, value) in stuff.items(): self.assertIs(cache.delete(key), True) cache.set_many(stuff) for (key, value) in stuff.items(): with self.subTest(key=key): self.assertEqual(cache.get(key), value) def test_binary_string(self): # Binary strings should be cacheable from zlib import compress, decompress value = "value_to_be_compressed" compressed_value = compress(value.encode()) # Test set cache.set("binary1", compressed_value) compressed_result = cache.get("binary1") self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test add self.assertIs(cache.add("binary1-add", compressed_value), True) compressed_result = cache.get("binary1-add") self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test set_many cache.set_many({"binary1-set_many": compressed_value}) compressed_result = cache.get("binary1-set_many") self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) def test_set_many(self): # Multiple keys can be set using set_many cache.set_many({"key1": "spam", "key2": "eggs"}) self.assertEqual(cache.get("key1"), "spam") self.assertEqual(cache.get("key2"), "eggs") def test_set_many_returns_empty_list_on_success(self): """set_many() returns an empty list when all keys are inserted.""" failing_keys = cache.set_many({"key1": "spam", "key2": "eggs"}) self.assertEqual(failing_keys, []) def test_set_many_expiration(self): # set_many takes a second ``timeout`` parameter cache.set_many({"key1": "spam", "key2": "eggs"}, 1) time.sleep(2) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_set_many_empty_data(self): self.assertEqual(cache.set_many({}), []) def test_delete_many(self): # Multiple keys can be deleted using delete_many cache.set_many({"key1": "spam", "key2": "eggs", "key3": "ham"}) cache.delete_many(["key1", "key2"]) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) self.assertEqual(cache.get("key3"), "ham") def test_delete_many_no_keys(self): self.assertIsNone(cache.delete_many([])) def test_clear(self): # The cache can be emptied using clear cache.set_many({"key1": "spam", "key2": "eggs"}) cache.clear() self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_long_timeout(self): """ Follow memcached's convention where a timeout greater than 30 days is treated as an absolute expiration timestamp instead of a relative offset (#12399). """ cache.set("key1", "eggs", 60 * 60 * 24 * 30 + 1) # 30 days + 1 second self.assertEqual(cache.get("key1"), "eggs") self.assertIs(cache.add("key2", "ham", 60 * 60 * 24 * 30 + 1), True) self.assertEqual(cache.get("key2"), "ham") cache.set_many( {"key3": "sausage", "key4": "lobster bisque"}, 60 * 60 * 24 * 30 + 1 ) self.assertEqual(cache.get("key3"), "sausage") self.assertEqual(cache.get("key4"), "lobster bisque") def test_forever_timeout(self): """ Passing in None into timeout results in a value that is cached forever """ cache.set("key1", "eggs", None) self.assertEqual(cache.get("key1"), "eggs") self.assertIs(cache.add("key2", "ham", None), True) self.assertEqual(cache.get("key2"), "ham") self.assertIs(cache.add("key1", "new eggs", None), False) self.assertEqual(cache.get("key1"), "eggs") cache.set_many({"key3": "sausage", "key4": "lobster bisque"}, None) self.assertEqual(cache.get("key3"), "sausage") self.assertEqual(cache.get("key4"), "lobster bisque") cache.set("key5", "belgian fries", timeout=1) self.assertIs(cache.touch("key5", timeout=None), True) time.sleep(2) self.assertEqual(cache.get("key5"), "belgian fries") def test_zero_timeout(self): """ Passing in zero into timeout results in a value that is not cached """ cache.set("key1", "eggs", 0) self.assertIsNone(cache.get("key1")) self.assertIs(cache.add("key2", "ham", 0), True) self.assertIsNone(cache.get("key2")) cache.set_many({"key3": "sausage", "key4": "lobster bisque"}, 0) self.assertIsNone(cache.get("key3")) self.assertIsNone(cache.get("key4")) cache.set("key5", "belgian fries", timeout=5) self.assertIs(cache.touch("key5", timeout=0), True) self.assertIsNone(cache.get("key5")) def test_float_timeout(self): # Make sure a timeout given as a float doesn't crash anything. cache.set("key1", "spam", 100.2) self.assertEqual(cache.get("key1"), "spam") def _perform_cull_test(self, cull_cache_name, initial_count, final_count): try: cull_cache = caches[cull_cache_name] except InvalidCacheBackendError: self.skipTest("Culling isn't implemented.") # Create initial cache key entries. This will overflow the cache, # causing a cull. for i in range(1, initial_count): cull_cache.set("cull%d" % i, "value", 1000) count = 0 # Count how many keys are left in the cache. for i in range(1, initial_count): if cull_cache.has_key("cull%d" % i): count += 1 self.assertEqual(count, final_count) def test_cull(self): self._perform_cull_test("cull", 50, 29) def test_zero_cull(self): self._perform_cull_test("zero_cull", 50, 19) def test_cull_delete_when_store_empty(self): try: cull_cache = caches["cull"] except InvalidCacheBackendError: self.skipTest("Culling isn't implemented.") old_max_entries = cull_cache._max_entries # Force _cull to delete on first cached record. cull_cache._max_entries = -1 try: cull_cache.set("force_cull_delete", "value", 1000) self.assertIs(cull_cache.has_key("force_cull_delete"), True) finally: cull_cache._max_entries = old_max_entries def _perform_invalid_key_test(self, key, expected_warning, key_func=None): """ All the builtin backends should warn (except memcached that should error) on keys that would be refused by memcached. This encourages portable caching code without making it too difficult to use production backends with more liberal key rules. Refs #6447. """ # mimic custom ``make_key`` method being defined since the default will # never show the below warnings def func(key, *args): return key old_func = cache.key_func cache.key_func = key_func or func tests = [ ("add", [key, 1]), ("get", [key]), ("set", [key, 1]), ("incr", [key]), ("decr", [key]), ("touch", [key]), ("delete", [key]), ("get_many", [[key, "b"]]), ("set_many", [{key: 1, "b": 2}]), ("delete_many", [[key, "b"]]), ] try: for operation, args in tests: with self.subTest(operation=operation): with self.assertWarns(CacheKeyWarning) as cm: getattr(cache, operation)(*args) self.assertEqual(str(cm.warning), expected_warning) finally: cache.key_func = old_func def test_invalid_key_characters(self): # memcached doesn't allow whitespace or control characters in keys. key = "key with spaces and 清" self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key) def test_invalid_key_length(self): # memcached limits key length to 250. key = ("a" * 250) + "清" expected_warning = ( "Cache key will cause errors if used with memcached: " "%r (longer than %s)" % (key, 250) ) self._perform_invalid_key_test(key, expected_warning) def test_invalid_with_version_key_length(self): # Custom make_key() that adds a version to the key and exceeds the # limit. def key_func(key, *args): return key + ":1" key = "a" * 249 expected_warning = ( "Cache key will cause errors if used with memcached: " "%r (longer than %s)" % (key_func(key), 250) ) self._perform_invalid_key_test(key, expected_warning, key_func=key_func) def test_cache_versioning_get_set(self): # set, using default version = 1 cache.set("answer1", 42) self.assertEqual(cache.get("answer1"), 42) self.assertEqual(cache.get("answer1", version=1), 42) self.assertIsNone(cache.get("answer1", version=2)) self.assertIsNone(caches["v2"].get("answer1")) self.assertEqual(caches["v2"].get("answer1", version=1), 42) self.assertIsNone(caches["v2"].get("answer1", version=2)) # set, default version = 1, but manually override version = 2 cache.set("answer2", 42, version=2) self.assertIsNone(cache.get("answer2")) self.assertIsNone(cache.get("answer2", version=1)) self.assertEqual(cache.get("answer2", version=2), 42) self.assertEqual(caches["v2"].get("answer2"), 42) self.assertIsNone(caches["v2"].get("answer2", version=1)) self.assertEqual(caches["v2"].get("answer2", version=2), 42) # v2 set, using default version = 2 caches["v2"].set("answer3", 42) self.assertIsNone(cache.get("answer3")) self.assertIsNone(cache.get("answer3", version=1)) self.assertEqual(cache.get("answer3", version=2), 42) self.assertEqual(caches["v2"].get("answer3"), 42) self.assertIsNone(caches["v2"].get("answer3", version=1)) self.assertEqual(caches["v2"].get("answer3", version=2), 42) # v2 set, default version = 2, but manually override version = 1 caches["v2"].set("answer4", 42, version=1) self.assertEqual(cache.get("answer4"), 42) self.assertEqual(cache.get("answer4", version=1), 42) self.assertIsNone(cache.get("answer4", version=2)) self.assertIsNone(caches["v2"].get("answer4")) self.assertEqual(caches["v2"].get("answer4", version=1), 42) self.assertIsNone(caches["v2"].get("answer4", version=2)) def test_cache_versioning_add(self): # add, default version = 1, but manually override version = 2 self.assertIs(cache.add("answer1", 42, version=2), True) self.assertIsNone(cache.get("answer1", version=1)) self.assertEqual(cache.get("answer1", version=2), 42) self.assertIs(cache.add("answer1", 37, version=2), False) self.assertIsNone(cache.get("answer1", version=1)) self.assertEqual(cache.get("answer1", version=2), 42) self.assertIs(cache.add("answer1", 37, version=1), True) self.assertEqual(cache.get("answer1", version=1), 37) self.assertEqual(cache.get("answer1", version=2), 42) # v2 add, using default version = 2 self.assertIs(caches["v2"].add("answer2", 42), True) self.assertIsNone(cache.get("answer2", version=1)) self.assertEqual(cache.get("answer2", version=2), 42) self.assertIs(caches["v2"].add("answer2", 37), False) self.assertIsNone(cache.get("answer2", version=1)) self.assertEqual(cache.get("answer2", version=2), 42) self.assertIs(caches["v2"].add("answer2", 37, version=1), True) self.assertEqual(cache.get("answer2", version=1), 37) self.assertEqual(cache.get("answer2", version=2), 42) # v2 add, default version = 2, but manually override version = 1 self.assertIs(caches["v2"].add("answer3", 42, version=1), True) self.assertEqual(cache.get("answer3", version=1), 42) self.assertIsNone(cache.get("answer3", version=2)) self.assertIs(caches["v2"].add("answer3", 37, version=1), False) self.assertEqual(cache.get("answer3", version=1), 42) self.assertIsNone(cache.get("answer3", version=2)) self.assertIs(caches["v2"].add("answer3", 37), True) self.assertEqual(cache.get("answer3", version=1), 42) self.assertEqual(cache.get("answer3", version=2), 37) def test_cache_versioning_has_key(self): cache.set("answer1", 42) # has_key self.assertIs(cache.has_key("answer1"), True) self.assertIs(cache.has_key("answer1", version=1), True) self.assertIs(cache.has_key("answer1", version=2), False) self.assertIs(caches["v2"].has_key("answer1"), False) self.assertIs(caches["v2"].has_key("answer1", version=1), True) self.assertIs(caches["v2"].has_key("answer1", version=2), False) def test_cache_versioning_delete(self): cache.set("answer1", 37, version=1) cache.set("answer1", 42, version=2) self.assertIs(cache.delete("answer1"), True) self.assertIsNone(cache.get("answer1", version=1)) self.assertEqual(cache.get("answer1", version=2), 42) cache.set("answer2", 37, version=1) cache.set("answer2", 42, version=2) self.assertIs(cache.delete("answer2", version=2), True) self.assertEqual(cache.get("answer2", version=1), 37) self.assertIsNone(cache.get("answer2", version=2)) cache.set("answer3", 37, version=1) cache.set("answer3", 42, version=2) self.assertIs(caches["v2"].delete("answer3"), True) self.assertEqual(cache.get("answer3", version=1), 37) self.assertIsNone(cache.get("answer3", version=2)) cache.set("answer4", 37, version=1) cache.set("answer4", 42, version=2) self.assertIs(caches["v2"].delete("answer4", version=1), True) self.assertIsNone(cache.get("answer4", version=1)) self.assertEqual(cache.get("answer4", version=2), 42) def test_cache_versioning_incr_decr(self): cache.set("answer1", 37, version=1) cache.set("answer1", 42, version=2) self.assertEqual(cache.incr("answer1"), 38) self.assertEqual(cache.get("answer1", version=1), 38) self.assertEqual(cache.get("answer1", version=2), 42) self.assertEqual(cache.decr("answer1"), 37) self.assertEqual(cache.get("answer1", version=1), 37) self.assertEqual(cache.get("answer1", version=2), 42) cache.set("answer2", 37, version=1) cache.set("answer2", 42, version=2) self.assertEqual(cache.incr("answer2", version=2), 43) self.assertEqual(cache.get("answer2", version=1), 37) self.assertEqual(cache.get("answer2", version=2), 43) self.assertEqual(cache.decr("answer2", version=2), 42) self.assertEqual(cache.get("answer2", version=1), 37) self.assertEqual(cache.get("answer2", version=2), 42) cache.set("answer3", 37, version=1) cache.set("answer3", 42, version=2) self.assertEqual(caches["v2"].incr("answer3"), 43) self.assertEqual(cache.get("answer3", version=1), 37) self.assertEqual(cache.get("answer3", version=2), 43) self.assertEqual(caches["v2"].decr("answer3"), 42) self.assertEqual(cache.get("answer3", version=1), 37) self.assertEqual(cache.get("answer3", version=2), 42) cache.set("answer4", 37, version=1) cache.set("answer4", 42, version=2) self.assertEqual(caches["v2"].incr("answer4", version=1), 38) self.assertEqual(cache.get("answer4", version=1), 38) self.assertEqual(cache.get("answer4", version=2), 42) self.assertEqual(caches["v2"].decr("answer4", version=1), 37) self.assertEqual(cache.get("answer4", version=1), 37) self.assertEqual(cache.get("answer4", version=2), 42) def test_cache_versioning_get_set_many(self): # set, using default version = 1 cache.set_many({"ford1": 37, "arthur1": 42}) self.assertEqual( cache.get_many(["ford1", "arthur1"]), {"ford1": 37, "arthur1": 42} ) self.assertEqual( cache.get_many(["ford1", "arthur1"], version=1), {"ford1": 37, "arthur1": 42}, ) self.assertEqual(cache.get_many(["ford1", "arthur1"], version=2), {}) self.assertEqual(caches["v2"].get_many(["ford1", "arthur1"]), {}) self.assertEqual( caches["v2"].get_many(["ford1", "arthur1"], version=1), {"ford1": 37, "arthur1": 42}, ) self.assertEqual(caches["v2"].get_many(["ford1", "arthur1"], version=2), {}) # set, default version = 1, but manually override version = 2 cache.set_many({"ford2": 37, "arthur2": 42}, version=2) self.assertEqual(cache.get_many(["ford2", "arthur2"]), {}) self.assertEqual(cache.get_many(["ford2", "arthur2"], version=1), {}) self.assertEqual( cache.get_many(["ford2", "arthur2"], version=2), {"ford2": 37, "arthur2": 42}, ) self.assertEqual( caches["v2"].get_many(["ford2", "arthur2"]), {"ford2": 37, "arthur2": 42} ) self.assertEqual(caches["v2"].get_many(["ford2", "arthur2"], version=1), {}) self.assertEqual( caches["v2"].get_many(["ford2", "arthur2"], version=2), {"ford2": 37, "arthur2": 42}, ) # v2 set, using default version = 2 caches["v2"].set_many({"ford3": 37, "arthur3": 42}) self.assertEqual(cache.get_many(["ford3", "arthur3"]), {}) self.assertEqual(cache.get_many(["ford3", "arthur3"], version=1), {}) self.assertEqual( cache.get_many(["ford3", "arthur3"], version=2), {"ford3": 37, "arthur3": 42}, ) self.assertEqual( caches["v2"].get_many(["ford3", "arthur3"]), {"ford3": 37, "arthur3": 42} ) self.assertEqual(caches["v2"].get_many(["ford3", "arthur3"], version=1), {}) self.assertEqual( caches["v2"].get_many(["ford3", "arthur3"], version=2), {"ford3": 37, "arthur3": 42}, ) # v2 set, default version = 2, but manually override version = 1 caches["v2"].set_many({"ford4": 37, "arthur4": 42}, version=1) self.assertEqual( cache.get_many(["ford4", "arthur4"]), {"ford4": 37, "arthur4": 42} ) self.assertEqual( cache.get_many(["ford4", "arthur4"], version=1), {"ford4": 37, "arthur4": 42}, ) self.assertEqual(cache.get_many(["ford4", "arthur4"], version=2), {}) self.assertEqual(caches["v2"].get_many(["ford4", "arthur4"]), {}) self.assertEqual( caches["v2"].get_many(["ford4", "arthur4"], version=1), {"ford4": 37, "arthur4": 42}, ) self.assertEqual(caches["v2"].get_many(["ford4", "arthur4"], version=2), {}) def test_incr_version(self): cache.set("answer", 42, version=2) self.assertIsNone(cache.get("answer")) self.assertIsNone(cache.get("answer", version=1)) self.assertEqual(cache.get("answer", version=2), 42) self.assertIsNone(cache.get("answer", version=3)) self.assertEqual(cache.incr_version("answer", version=2), 3) self.assertIsNone(cache.get("answer")) self.assertIsNone(cache.get("answer", version=1)) self.assertIsNone(cache.get("answer", version=2)) self.assertEqual(cache.get("answer", version=3), 42) caches["v2"].set("answer2", 42) self.assertEqual(caches["v2"].get("answer2"), 42) self.assertIsNone(caches["v2"].get("answer2", version=1)) self.assertEqual(caches["v2"].get("answer2", version=2), 42) self.assertIsNone(caches["v2"].get("answer2", version=3)) self.assertEqual(caches["v2"].incr_version("answer2"), 3) self.assertIsNone(caches["v2"].get("answer2")) self.assertIsNone(caches["v2"].get("answer2", version=1)) self.assertIsNone(caches["v2"].get("answer2", version=2)) self.assertEqual(caches["v2"].get("answer2", version=3), 42) with self.assertRaises(ValueError): cache.incr_version("does_not_exist") cache.set("null", None) self.assertEqual(cache.incr_version("null"), 2) def test_decr_version(self): cache.set("answer", 42, version=2) self.assertIsNone(cache.get("answer")) self.assertIsNone(cache.get("answer", version=1)) self.assertEqual(cache.get("answer", version=2), 42) self.assertEqual(cache.decr_version("answer", version=2), 1) self.assertEqual(cache.get("answer"), 42) self.assertEqual(cache.get("answer", version=1), 42) self.assertIsNone(cache.get("answer", version=2)) caches["v2"].set("answer2", 42) self.assertEqual(caches["v2"].get("answer2"), 42) self.assertIsNone(caches["v2"].get("answer2", version=1)) self.assertEqual(caches["v2"].get("answer2", version=2), 42) self.assertEqual(caches["v2"].decr_version("answer2"), 1) self.assertIsNone(caches["v2"].get("answer2")) self.assertEqual(caches["v2"].get("answer2", version=1), 42) self.assertIsNone(caches["v2"].get("answer2", version=2)) with self.assertRaises(ValueError): cache.decr_version("does_not_exist", version=2) cache.set("null", None, version=2) self.assertEqual(cache.decr_version("null", version=2), 1) def test_custom_key_func(self): # Two caches with different key functions aren't visible to each other cache.set("answer1", 42) self.assertEqual(cache.get("answer1"), 42) self.assertIsNone(caches["custom_key"].get("answer1")) self.assertIsNone(caches["custom_key2"].get("answer1")) caches["custom_key"].set("answer2", 42) self.assertIsNone(cache.get("answer2")) self.assertEqual(caches["custom_key"].get("answer2"), 42) self.assertEqual(caches["custom_key2"].get("answer2"), 42) @override_settings(CACHE_MIDDLEWARE_ALIAS=DEFAULT_CACHE_ALIAS) def test_cache_write_unpicklable_object(self): fetch_middleware = FetchFromCacheMiddleware(empty_response) request = self.factory.get("/cache/test") request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware(empty_response).process_request( request ) self.assertIsNone(get_cache_data) content = "Testing cookie serialization." def get_response(req): response = HttpResponse(content) response.set_cookie("foo", "bar") return response update_middleware = UpdateCacheMiddleware(get_response) response = update_middleware(request) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) UpdateCacheMiddleware(lambda req: get_cache_data)(request) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) def test_add_fail_on_pickleerror(self): # Shouldn't fail silently if trying to cache an unpicklable type. with self.assertRaises(pickle.PickleError): cache.add("unpicklable", Unpicklable()) def test_set_fail_on_pickleerror(self): with self.assertRaises(pickle.PickleError): cache.set("unpicklable", Unpicklable()) def test_get_or_set(self): self.assertIsNone(cache.get("projector")) self.assertEqual(cache.get_or_set("projector", 42), 42) self.assertEqual(cache.get("projector"), 42) self.assertIsNone(cache.get_or_set("null", None)) # Previous get_or_set() stores None in the cache. self.assertIsNone(cache.get("null", "default")) def test_get_or_set_callable(self): def my_callable(): return "value" self.assertEqual(cache.get_or_set("mykey", my_callable), "value") self.assertEqual(cache.get_or_set("mykey", my_callable()), "value") self.assertIsNone(cache.get_or_set("null", lambda: None)) # Previous get_or_set() stores None in the cache. self.assertIsNone(cache.get("null", "default")) def test_get_or_set_version(self): msg = "get_or_set() missing 1 required positional argument: 'default'" self.assertEqual(cache.get_or_set("brian", 1979, version=2), 1979) with self.assertRaisesMessage(TypeError, msg): cache.get_or_set("brian") with self.assertRaisesMessage(TypeError, msg): cache.get_or_set("brian", version=1) self.assertIsNone(cache.get("brian", version=1)) self.assertEqual(cache.get_or_set("brian", 42, version=1), 42) self.assertEqual(cache.get_or_set("brian", 1979, version=2), 1979) self.assertIsNone(cache.get("brian", version=3)) def test_get_or_set_racing(self): with mock.patch( "%s.%s" % (settings.CACHES["default"]["BACKEND"], "add") ) as cache_add: # Simulate cache.add() failing to add a value. In that case, the # default value should be returned. cache_add.return_value = False self.assertEqual(cache.get_or_set("key", "default"), "default") @override_settings( CACHES=caches_setting_for_tests( BACKEND="django.core.cache.backends.db.DatabaseCache", # Spaces are used in the table name to ensure quoting/escaping is working LOCATION="test cache table", ) ) class DBCacheTests(BaseCacheTests, TransactionTestCase): available_apps = ["cache"] def setUp(self): # The super calls needs to happen first for the settings override. super().setUp() self.create_table() def tearDown(self): # The super call needs to happen first because it uses the database. super().tearDown() self.drop_table() def create_table(self): management.call_command("createcachetable", verbosity=0) def drop_table(self): with connection.cursor() as cursor: table_name = connection.ops.quote_name("test cache table") cursor.execute("DROP TABLE %s" % table_name) def test_get_many_num_queries(self): cache.set_many({"a": 1, "b": 2}) cache.set("expired", "expired", 0.01) with self.assertNumQueries(1): self.assertEqual(cache.get_many(["a", "b"]), {"a": 1, "b": 2}) time.sleep(0.02) with self.assertNumQueries(2): self.assertEqual(cache.get_many(["a", "b", "expired"]), {"a": 1, "b": 2}) def test_delete_many_num_queries(self): cache.set_many({"a": 1, "b": 2, "c": 3}) with self.assertNumQueries(1): cache.delete_many(["a", "b", "c"]) def test_cull_queries(self): old_max_entries = cache._max_entries # Force _cull to delete on first cached record. cache._max_entries = -1 with CaptureQueriesContext(connection) as captured_queries: try: cache.set("force_cull", "value", 1000) finally: cache._max_entries = old_max_entries num_count_queries = sum("COUNT" in query["sql"] for query in captured_queries) self.assertEqual(num_count_queries, 1) # Column names are quoted. for query in captured_queries: sql = query["sql"] if "expires" in sql: self.assertIn(connection.ops.quote_name("expires"), sql) if "cache_key" in sql: self.assertIn(connection.ops.quote_name("cache_key"), sql) def test_delete_cursor_rowcount(self): """ The rowcount attribute should not be checked on a closed cursor. """ class MockedCursorWrapper(CursorWrapper): is_closed = False def close(self): self.cursor.close() self.is_closed = True @property def rowcount(self): if self.is_closed: raise Exception("Cursor is closed.") return self.cursor.rowcount cache.set_many({"a": 1, "b": 2}) with mock.patch("django.db.backends.utils.CursorWrapper", MockedCursorWrapper): self.assertIs(cache.delete("a"), True) def test_zero_cull(self): self._perform_cull_test("zero_cull", 50, 18) def test_second_call_doesnt_crash(self): out = io.StringIO() management.call_command("createcachetable", stdout=out) self.assertEqual( out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES), ) @override_settings( CACHES=caches_setting_for_tests( BACKEND="django.core.cache.backends.db.DatabaseCache", # Use another table name to avoid the 'table already exists' message. LOCATION="createcachetable_dry_run_mode", ) ) def test_createcachetable_dry_run_mode(self): out = io.StringIO() management.call_command("createcachetable", dry_run=True, stdout=out) output = out.getvalue() self.assertTrue(output.startswith("CREATE TABLE")) def test_createcachetable_with_table_argument(self): """ Delete and recreate cache table with legacy behavior (explicitly specifying the table name). """ self.drop_table() out = io.StringIO() management.call_command( "createcachetable", "test cache table", verbosity=2, stdout=out, ) self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n") def test_has_key_query_columns_quoted(self): with CaptureQueriesContext(connection) as captured_queries: cache.has_key("key") self.assertEqual(len(captured_queries), 1) sql = captured_queries[0]["sql"] # Column names are quoted. self.assertIn(connection.ops.quote_name("expires"), sql) self.assertIn(connection.ops.quote_name("cache_key"), sql) @override_settings(USE_TZ=True) class DBCacheWithTimeZoneTests(DBCacheTests): pass class DBCacheRouter: """A router that puts the cache table on the 'other' database.""" def db_for_read(self, model, **hints): if model._meta.app_label == "django_cache": return "other" return None def db_for_write(self, model, **hints): if model._meta.app_label == "django_cache": return "other" return None def allow_migrate(self, db, app_label, **hints): if app_label == "django_cache": return db == "other" return None @override_settings( CACHES={ "default": { "BACKEND": "django.core.cache.backends.db.DatabaseCache", "LOCATION": "my_cache_table", }, }, ) class CreateCacheTableForDBCacheTests(TestCase): databases = {"default", "other"} @override_settings(DATABASE_ROUTERS=[DBCacheRouter()]) def test_createcachetable_observes_database_router(self): # cache table should not be created on 'default' with self.assertNumQueries(0, using="default"): management.call_command("createcachetable", database="default", verbosity=0) # cache table should be created on 'other' # Queries: # 1: check table doesn't already exist # 2: create savepoint (if transactional DDL is supported) # 3: create the table # 4: create the index # 5: release savepoint (if transactional DDL is supported) num = 5 if connections["other"].features.can_rollback_ddl else 3 with self.assertNumQueries(num, using="other"): management.call_command("createcachetable", database="other", verbosity=0) class PicklingSideEffect: def __init__(self, cache): self.cache = cache self.locked = False def __getstate__(self): self.locked = self.cache._lock.locked() return {} limit_locmem_entries = override_settings( CACHES=caches_setting_for_tests( BACKEND="django.core.cache.backends.locmem.LocMemCache", OPTIONS={"MAX_ENTRIES": 9}, ) ) @override_settings( CACHES=caches_setting_for_tests( BACKEND="django.core.cache.backends.locmem.LocMemCache", ) ) class LocMemCacheTests(BaseCacheTests, TestCase): def setUp(self): super().setUp() # LocMem requires a hack to make the other caches # share a data store with the 'normal' cache. caches["prefix"]._cache = cache._cache caches["prefix"]._expire_info = cache._expire_info caches["v2"]._cache = cache._cache caches["v2"]._expire_info = cache._expire_info caches["custom_key"]._cache = cache._cache caches["custom_key"]._expire_info = cache._expire_info caches["custom_key2"]._cache = cache._cache caches["custom_key2"]._expire_info = cache._expire_info @override_settings( CACHES={ "default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}, "other": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "other", }, } ) def test_multiple_caches(self): "Multiple locmem caches are isolated" cache.set("value", 42) self.assertEqual(caches["default"].get("value"), 42) self.assertIsNone(caches["other"].get("value")) def test_locking_on_pickle(self): """#20613/#18541 -- Ensures pickling is done outside of the lock.""" bad_obj = PicklingSideEffect(cache) cache.set("set", bad_obj) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") self.assertIs(cache.add("add", bad_obj), True) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") def test_incr_decr_timeout(self): """incr/decr does not modify expiry time (matches memcached behavior)""" key = "value" _key = cache.make_key(key) cache.set(key, 1, timeout=cache.default_timeout * 10) expire = cache._expire_info[_key] self.assertEqual(cache.incr(key), 2) self.assertEqual(expire, cache._expire_info[_key]) self.assertEqual(cache.decr(key), 1) self.assertEqual(expire, cache._expire_info[_key]) @limit_locmem_entries def test_lru_get(self): """get() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key) cache.set(9, 9, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key) for key in range(6, 9): self.assertIsNone(cache.get(key)) self.assertEqual(cache.get(9), 9) @limit_locmem_entries def test_lru_set(self): """set() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(3, 9): cache.set(key, key, timeout=None) cache.set(9, 9, timeout=None) for key in range(3, 10): self.assertEqual(cache.get(key), key) for key in range(3): self.assertIsNone(cache.get(key)) @limit_locmem_entries def test_lru_incr(self): """incr() moves cache keys.""" for key in range(9): cache.set(key, key, timeout=None) for key in range(6): self.assertEqual(cache.incr(key), key + 1) cache.set(9, 9, timeout=None) for key in range(6): self.assertEqual(cache.get(key), key + 1) for key in range(6, 9): self.assertIsNone(cache.get(key)) self.assertEqual(cache.get(9), 9) # memcached and redis backends aren't guaranteed to be available. # To check the backends, the test settings file will need to contain at least # one cache backend setting that points at your cache server. configured_caches = {} for _cache_params in settings.CACHES.values(): configured_caches[_cache_params["BACKEND"]] = _cache_params PyLibMCCache_params = configured_caches.get( "django.core.cache.backends.memcached.PyLibMCCache" ) PyMemcacheCache_params = configured_caches.get( "django.core.cache.backends.memcached.PyMemcacheCache" ) # The memcached backends don't support cull-related options like `MAX_ENTRIES`. memcached_excluded_caches = {"cull", "zero_cull"} RedisCache_params = configured_caches.get("django.core.cache.backends.redis.RedisCache") # The redis backend does not support cull-related options like `MAX_ENTRIES`. redis_excluded_caches = {"cull", "zero_cull"} class BaseMemcachedTests(BaseCacheTests): # By default it's assumed that the client doesn't clean up connections # properly, in which case the backend must do so after each request. should_disconnect_on_close = True def test_location_multiple_servers(self): locations = [ ["server1.tld", "server2:11211"], "server1.tld;server2:11211", "server1.tld,server2:11211", ] for location in locations: with self.subTest(location=location): params = {"BACKEND": self.base_params["BACKEND"], "LOCATION": location} with self.settings(CACHES={"default": params}): self.assertEqual(cache._servers, ["server1.tld", "server2:11211"]) def _perform_invalid_key_test(self, key, expected_warning): """ While other backends merely warn, memcached should raise for an invalid key. """ msg = expected_warning.replace(key, cache.make_key(key)) tests = [ ("add", [key, 1]), ("get", [key]), ("set", [key, 1]), ("incr", [key]), ("decr", [key]), ("touch", [key]), ("delete", [key]), ("get_many", [[key, "b"]]), ("set_many", [{key: 1, "b": 2}]), ("delete_many", [[key, "b"]]), ] for operation, args in tests: with self.subTest(operation=operation): with self.assertRaises(InvalidCacheKey) as cm: getattr(cache, operation)(*args) self.assertEqual(str(cm.exception), msg) def test_invalid_with_version_key_length(self): # make_key() adds a version to the key and exceeds the limit. key = "a" * 248 expected_warning = ( "Cache key will cause errors if used with memcached: " "%r (longer than %s)" % (key, 250) ) self._perform_invalid_key_test(key, expected_warning) def test_default_never_expiring_timeout(self): # Regression test for #22845 with self.settings( CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, TIMEOUT=None ) ): cache.set("infinite_foo", "bar") self.assertEqual(cache.get("infinite_foo"), "bar") def test_default_far_future_timeout(self): # Regression test for #22845 with self.settings( CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, # 60*60*24*365, 1 year TIMEOUT=31536000, ) ): cache.set("future_foo", "bar") self.assertEqual(cache.get("future_foo"), "bar") def test_memcached_deletes_key_on_failed_set(self): # By default memcached allows objects up to 1MB. For the cache_db session # backend to always use the current session, memcached needs to delete # the old key if it fails to set. max_value_length = 2**20 cache.set("small_value", "a") self.assertEqual(cache.get("small_value"), "a") large_value = "a" * (max_value_length + 1) try: cache.set("small_value", large_value) except Exception: # Most clients (e.g. pymemcache or pylibmc) raise when the value is # too large. This test is primarily checking that the key was # deleted, so the return/exception behavior for the set() itself is # not important. pass # small_value should be deleted, or set if configured to accept larger values value = cache.get("small_value") self.assertTrue(value is None or value == large_value) def test_close(self): # For clients that don't manage their connections properly, the # connection is closed when the request is complete. signals.request_finished.disconnect(close_old_connections) try: with mock.patch.object( cache._class, "disconnect_all", autospec=True ) as mock_disconnect: signals.request_finished.send(self.__class__) self.assertIs(mock_disconnect.called, self.should_disconnect_on_close) finally: signals.request_finished.connect(close_old_connections) def test_set_many_returns_failing_keys(self): def fail_set_multi(mapping, *args, **kwargs): return mapping.keys() with mock.patch.object(cache._class, "set_multi", side_effect=fail_set_multi): failing_keys = cache.set_many({"key": "value"}) self.assertEqual(failing_keys, ["key"]) @unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured") @override_settings( CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, ) ) class PyLibMCCacheTests(BaseMemcachedTests, TestCase): base_params = PyLibMCCache_params # libmemcached manages its own connections. should_disconnect_on_close = False @property def incr_decr_type_error(self): return cache._lib.ClientError @override_settings( CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, OPTIONS={ "binary": True, "behaviors": {"tcp_nodelay": True}, }, ) ) def test_pylibmc_options(self): self.assertTrue(cache._cache.binary) self.assertEqual(cache._cache.behaviors["tcp_nodelay"], int(True)) def test_pylibmc_client_servers(self): backend = self.base_params["BACKEND"] tests = [ ("unix:/run/memcached/socket", "/run/memcached/socket"), ("/run/memcached/socket", "/run/memcached/socket"), ("localhost", "localhost"), ("localhost:11211", "localhost:11211"), ("[::1]", "[::1]"), ("[::1]:11211", "[::1]:11211"), ("127.0.0.1", "127.0.0.1"), ("127.0.0.1:11211", "127.0.0.1:11211"), ] for location, expected in tests: settings = {"default": {"BACKEND": backend, "LOCATION": location}} with self.subTest(location), self.settings(CACHES=settings): self.assertEqual(cache.client_servers, [expected]) @unittest.skipUnless(PyMemcacheCache_params, "PyMemcacheCache backend not configured") @override_settings( CACHES=caches_setting_for_tests( base=PyMemcacheCache_params, exclude=memcached_excluded_caches, ) ) class PyMemcacheCacheTests(BaseMemcachedTests, TestCase): base_params = PyMemcacheCache_params @property def incr_decr_type_error(self): return cache._lib.exceptions.MemcacheClientError def test_pymemcache_highest_pickle_version(self): self.assertEqual( cache._cache.default_kwargs["serde"]._serialize_func.keywords[ "pickle_version" ], pickle.HIGHEST_PROTOCOL, ) for cache_key in settings.CACHES: for client_key, client in caches[cache_key]._cache.clients.items(): with self.subTest(cache_key=cache_key, server=client_key): self.assertEqual( client.serde._serialize_func.keywords["pickle_version"], pickle.HIGHEST_PROTOCOL, ) @override_settings( CACHES=caches_setting_for_tests( base=PyMemcacheCache_params, exclude=memcached_excluded_caches, OPTIONS={"no_delay": True}, ) ) def test_pymemcache_options(self): self.assertIs(cache._cache.default_kwargs["no_delay"], True) @override_settings( CACHES=caches_setting_for_tests( BACKEND="django.core.cache.backends.filebased.FileBasedCache", ) ) class FileBasedCacheTests(BaseCacheTests, TestCase): """ Specific test cases for the file-based cache. """ def setUp(self): super().setUp() self.dirname = self.mkdtemp() # Caches location cannot be modified through override_settings / # modify_settings, hence settings are manipulated directly here and the # setting_changed signal is triggered manually. for cache_params in settings.CACHES.values(): cache_params["LOCATION"] = self.dirname setting_changed.send(self.__class__, setting="CACHES", enter=False) def tearDown(self): super().tearDown() # Call parent first, as cache.clear() may recreate cache base directory shutil.rmtree(self.dirname) def mkdtemp(self): return tempfile.mkdtemp() def test_ignores_non_cache_files(self): fname = os.path.join(self.dirname, "not-a-cache-file") with open(fname, "w"): os.utime(fname, None) cache.clear() self.assertTrue( os.path.exists(fname), "Expected cache.clear to ignore non cache files" ) os.remove(fname) def test_clear_does_not_remove_cache_dir(self): cache.clear() self.assertTrue( os.path.exists(self.dirname), "Expected cache.clear to keep the cache dir" ) def test_creates_cache_dir_if_nonexistent(self): os.rmdir(self.dirname) cache.set("foo", "bar") self.assertTrue(os.path.exists(self.dirname)) def test_get_ignores_enoent(self): cache.set("foo", "bar") os.unlink(cache._key_to_file("foo")) # Returns the default instead of erroring. self.assertEqual(cache.get("foo", "baz"), "baz") @skipIf( sys.platform == "win32", "Windows only partially supports umasks and chmod.", ) def test_cache_dir_permissions(self): os.rmdir(self.dirname) dir_path = Path(self.dirname) / "nested" / "filebasedcache" for cache_params in settings.CACHES.values(): cache_params["LOCATION"] = dir_path setting_changed.send(self.__class__, setting="CACHES", enter=False) cache.set("foo", "bar") self.assertIs(dir_path.exists(), True) tests = [ dir_path, dir_path.parent, dir_path.parent.parent, ] for directory in tests: with self.subTest(directory=directory): dir_mode = directory.stat().st_mode & 0o777 self.assertEqual(dir_mode, 0o700) def test_get_does_not_ignore_non_filenotfound_exceptions(self): with mock.patch("builtins.open", side_effect=OSError): with self.assertRaises(OSError): cache.get("foo") def test_empty_cache_file_considered_expired(self): cache_file = cache._key_to_file("foo") with open(cache_file, "wb") as fh: fh.write(b"") with open(cache_file, "rb") as fh: self.assertIs(cache._is_expired(fh), True) @unittest.skipUnless(RedisCache_params, "Redis backend not configured") @override_settings( CACHES=caches_setting_for_tests( base=RedisCache_params, exclude=redis_excluded_caches, ) ) class RedisCacheTests(BaseCacheTests, TestCase): def setUp(self): import redis super().setUp() self.lib = redis @property def incr_decr_type_error(self): return self.lib.ResponseError def test_cache_client_class(self): self.assertIs(cache._class, RedisCacheClient) self.assertIsInstance(cache._cache, RedisCacheClient) def test_get_backend_timeout_method(self): positive_timeout = 10 positive_backend_timeout = cache.get_backend_timeout(positive_timeout) self.assertEqual(positive_backend_timeout, positive_timeout) negative_timeout = -5 negative_backend_timeout = cache.get_backend_timeout(negative_timeout) self.assertEqual(negative_backend_timeout, 0) none_timeout = None none_backend_timeout = cache.get_backend_timeout(none_timeout) self.assertIsNone(none_backend_timeout) def test_get_connection_pool_index(self): pool_index = cache._cache._get_connection_pool_index(write=True) self.assertEqual(pool_index, 0) pool_index = cache._cache._get_connection_pool_index(write=False) if len(cache._cache._servers) == 1: self.assertEqual(pool_index, 0) else: self.assertGreater(pool_index, 0) self.assertLess(pool_index, len(cache._cache._servers)) def test_get_connection_pool(self): pool = cache._cache._get_connection_pool(write=True) self.assertIsInstance(pool, self.lib.ConnectionPool) pool = cache._cache._get_connection_pool(write=False) self.assertIsInstance(pool, self.lib.ConnectionPool) def test_get_client(self): self.assertIsInstance(cache._cache.get_client(), self.lib.Redis) def test_serializer_dumps(self): self.assertEqual(cache._cache._serializer.dumps(123), 123) self.assertIsInstance(cache._cache._serializer.dumps(True), bytes) self.assertIsInstance(cache._cache._serializer.dumps("abc"), bytes) @override_settings( CACHES=caches_setting_for_tests( base=RedisCache_params, exclude=redis_excluded_caches, OPTIONS={ "db": 5, "socket_timeout": 0.1, "retry_on_timeout": True, }, ) ) def test_redis_pool_options(self): pool = cache._cache._get_connection_pool(write=False) self.assertEqual(pool.connection_kwargs["db"], 5) self.assertEqual(pool.connection_kwargs["socket_timeout"], 0.1) self.assertIs(pool.connection_kwargs["retry_on_timeout"], True) class FileBasedCachePathLibTests(FileBasedCacheTests): def mkdtemp(self): tmp_dir = super().mkdtemp() return Path(tmp_dir) @override_settings( CACHES={ "default": { "BACKEND": "cache.liberal_backend.CacheClass", }, } ) class CustomCacheKeyValidationTests(SimpleTestCase): """ Tests for the ability to mixin a custom ``validate_key`` method to a custom cache backend that otherwise inherits from a builtin backend, and override the default key validation. Refs #6447. """ def test_custom_key_validation(self): # this key is both longer than 250 characters, and has spaces key = "some key with spaces" * 15 val = "a value" cache.set(key, val) self.assertEqual(cache.get(key), val) @override_settings( CACHES={ "default": { "BACKEND": "cache.closeable_cache.CacheClass", } } ) class CacheClosingTests(SimpleTestCase): def test_close(self): self.assertFalse(cache.closed) signals.request_finished.send(self.__class__) self.assertTrue(cache.closed) def test_close_only_initialized(self): with self.settings( CACHES={ "cache_1": { "BACKEND": "cache.closeable_cache.CacheClass", }, "cache_2": { "BACKEND": "cache.closeable_cache.CacheClass", }, } ): self.assertEqual(caches.all(initialized_only=True), []) signals.request_finished.send(self.__class__) self.assertEqual(caches.all(initialized_only=True), []) DEFAULT_MEMORY_CACHES_SETTINGS = { "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "unique-snowflake", } } NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS) NEVER_EXPIRING_CACHES_SETTINGS["default"]["TIMEOUT"] = None class DefaultNonExpiringCacheKeyTests(SimpleTestCase): """ Settings having Cache arguments with a TIMEOUT=None create Caches that will set non-expiring keys. """ def setUp(self): # The 5 minute (300 seconds) default expiration time for keys is # defined in the implementation of the initializer method of the # BaseCache type. self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout def tearDown(self): del self.DEFAULT_TIMEOUT def test_default_expiration_time_for_keys_is_5_minutes(self): """The default expiration time of a cache key is 5 minutes. This value is defined in django.core.cache.backends.base.BaseCache.__init__(). """ self.assertEqual(300, self.DEFAULT_TIMEOUT) def test_caches_with_unset_timeout_has_correct_default_timeout(self): """Caches that have the TIMEOUT parameter undefined in the default settings will use the default 5 minute timeout. """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self): """Memory caches that have the TIMEOUT parameter set to `None` in the default settings with have `None` as the default timeout. This means "no timeout". """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertIsNone(cache.default_timeout) self.assertIsNone(cache.get_backend_timeout()) @override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS) def test_caches_with_unset_timeout_set_expiring_key(self): """Memory caches that have the TIMEOUT parameter unset will set cache keys having the default 5 minute timeout. """ key = "my-key" value = "my-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNotNone(cache._expire_info[cache_key]) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_set_non_expiring_key(self): """Memory caches that have the TIMEOUT parameter set to `None` will set a non expiring key by default. """ key = "another-key" value = "another-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNone(cache._expire_info[cache_key]) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="settingsprefix", CACHE_MIDDLEWARE_SECONDS=1, CACHES={ "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", }, }, USE_I18N=False, ALLOWED_HOSTS=[".example.com"], ) class CacheUtils(SimpleTestCase): """TestCase for django.utils.cache functions.""" host = "www.example.com" path = "/cache/test/" factory = RequestFactory(HTTP_HOST=host) def tearDown(self): cache.clear() def _get_request_cache(self, method="GET", query_string=None, update_cache=None): request = self._get_request( self.host, self.path, method, query_string=query_string ) request._cache_update_cache = update_cache if update_cache else True return request def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ("Accept-Encoding",), "Accept-Encoding"), ("Accept-Encoding", ("accept-encoding",), "Accept-Encoding"), ("Accept-Encoding", ("ACCEPT-ENCODING",), "Accept-Encoding"), ("Cookie", ("Accept-Encoding",), "Cookie, Accept-Encoding"), ( "Cookie, Accept-Encoding", ("Accept-Encoding",), "Cookie, Accept-Encoding", ), ( "Cookie, Accept-Encoding", ("Accept-Encoding", "cookie"), "Cookie, Accept-Encoding", ), (None, ("Accept-Encoding", "COOKIE"), "Accept-Encoding, COOKIE"), ( "Cookie, Accept-Encoding", ("Accept-Encoding", "cookie"), "Cookie, Accept-Encoding", ), ( "Cookie , Accept-Encoding", ("Accept-Encoding", "cookie"), "Cookie, Accept-Encoding", ), ("*", ("Accept-Language", "Cookie"), "*"), ("Accept-Language, Cookie", ("*",), "*"), ) for initial_vary, newheaders, resulting_vary in headers: with self.subTest(initial_vary=initial_vary, newheaders=newheaders): response = HttpResponse() if initial_vary is not None: response.headers["Vary"] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response.headers["Vary"], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), "views.decorators.cache.cache_page.settingsprefix.GET." "18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e", ) # A specified key_prefix is taken into account. key_prefix = "localprefix" learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), "views.decorators.cache.cache_page.localprefix.GET." "18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e", ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {"test": 1}) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), "views.decorators.cache.cache_page.settingsprefix.GET." "beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e", ) def test_cache_key_varies_by_url(self): """ get_cache_key keys differ by fully-qualified URL instead of path """ request1 = self.factory.get(self.path, HTTP_HOST="sub-1.example.com") learn_cache_key(request1, HttpResponse()) request2 = self.factory.get(self.path, HTTP_HOST="sub-2.example.com") learn_cache_key(request2, HttpResponse()) self.assertNotEqual(get_cache_key(request1), get_cache_key(request2)) def test_learn_cache_key(self): request = self.factory.head(self.path) response = HttpResponse() response.headers["Vary"] = "Pony" # Make sure that the Vary header is added to the key hash learn_cache_key(request, response) self.assertEqual( get_cache_key(request), "views.decorators.cache.cache_page.settingsprefix.GET." "18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e", ) def test_patch_cache_control(self): tests = ( # Initial Cache-Control, kwargs to patch_cache_control, expected # Cache-Control parts. (None, {"private": True}, {"private"}), ("", {"private": True}, {"private"}), # no-cache. ("", {"no_cache": "Set-Cookie"}, {"no-cache=Set-Cookie"}), ("", {"no-cache": "Set-Cookie"}, {"no-cache=Set-Cookie"}), ("no-cache=Set-Cookie", {"no_cache": True}, {"no-cache"}), ("no-cache=Set-Cookie,no-cache=Link", {"no_cache": True}, {"no-cache"}), ( "no-cache=Set-Cookie", {"no_cache": "Link"}, {"no-cache=Set-Cookie", "no-cache=Link"}, ), ( "no-cache=Set-Cookie,no-cache=Link", {"no_cache": "Custom"}, {"no-cache=Set-Cookie", "no-cache=Link", "no-cache=Custom"}, ), # Test whether private/public attributes are mutually exclusive ("private", {"private": True}, {"private"}), ("private", {"public": True}, {"public"}), ("public", {"public": True}, {"public"}), ("public", {"private": True}, {"private"}), ( "must-revalidate,max-age=60,private", {"public": True}, {"must-revalidate", "max-age=60", "public"}, ), ( "must-revalidate,max-age=60,public", {"private": True}, {"must-revalidate", "max-age=60", "private"}, ), ( "must-revalidate,max-age=60", {"public": True}, {"must-revalidate", "max-age=60", "public"}, ), ) cc_delim_re = re.compile(r"\s*,\s*") for initial_cc, newheaders, expected_cc in tests: with self.subTest(initial_cc=initial_cc, newheaders=newheaders): response = HttpResponse() if initial_cc is not None: response.headers["Cache-Control"] = initial_cc patch_cache_control(response, **newheaders) parts = set(cc_delim_re.split(response.headers["Cache-Control"])) self.assertEqual(parts, expected_cc) @override_settings( CACHES={ "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "KEY_PREFIX": "cacheprefix", }, }, ) class PrefixedCacheUtils(CacheUtils): pass @override_settings( CACHE_MIDDLEWARE_SECONDS=60, CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHES={ "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", }, }, ) class CacheHEADTest(SimpleTestCase): path = "/cache/test/" factory = RequestFactory() def tearDown(self): cache.clear() def _set_cache(self, request, msg): return UpdateCacheMiddleware(lambda req: HttpResponse(msg))(request) def test_head_caches_correctly(self): test_content = "test content" request = self.factory.head(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware(empty_response).process_request( request ) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) def test_head_with_cached_get(self): test_content = "test content" request = self.factory.get(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) get_cache_data = FetchFromCacheMiddleware(empty_response).process_request( request ) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="settingsprefix", CACHES={ "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", }, }, LANGUAGES=[ ("en", "English"), ("es", "Spanish"), ], ) class CacheI18nTest(SimpleTestCase): path = "/cache/test/" factory = RequestFactory() def tearDown(self): cache.clear() @override_settings(USE_I18N=True, USE_TZ=False) def test_cache_key_i18n_translation(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn( lang, key, "Cache keys should include the language name when translation is active", ) key2 = get_cache_key(request) self.assertEqual(key, key2) def check_accept_language_vary(self, accept_language, vary, reference_key): request = self.factory.get(self.path) request.META["HTTP_ACCEPT_LANGUAGE"] = accept_language request.META["HTTP_ACCEPT_ENCODING"] = "gzip;q=1.0, identity; q=0.5, *;q=0" response = HttpResponse() response.headers["Vary"] = vary key = learn_cache_key(request, response) key2 = get_cache_key(request) self.assertEqual(key, reference_key) self.assertEqual(key2, reference_key) @override_settings(USE_I18N=True, USE_TZ=False) def test_cache_key_i18n_translation_accept_language(self): lang = translation.get_language() self.assertEqual(lang, "en") request = self.factory.get(self.path) request.META["HTTP_ACCEPT_ENCODING"] = "gzip;q=1.0, identity; q=0.5, *;q=0" response = HttpResponse() response.headers["Vary"] = "accept-encoding" key = learn_cache_key(request, response) self.assertIn( lang, key, "Cache keys should include the language name when translation is active", ) self.check_accept_language_vary( "en-us", "cookie, accept-language, accept-encoding", key ) self.check_accept_language_vary( "en-US", "cookie, accept-encoding, accept-language", key ) self.check_accept_language_vary( "en-US,en;q=0.8", "accept-encoding, accept-language, cookie", key ) self.check_accept_language_vary( "en-US,en;q=0.8,ko;q=0.6", "accept-language, cookie, accept-encoding", key ) self.check_accept_language_vary( "ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ", "accept-encoding, cookie, accept-language", key, ) self.check_accept_language_vary( "ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4", "accept-language, accept-encoding, cookie", key, ) self.check_accept_language_vary( "ko;q=1.0,en;q=0.5", "cookie, accept-language, accept-encoding", key ) self.check_accept_language_vary( "ko, en", "cookie, accept-encoding, accept-language", key ) self.check_accept_language_vary( "ko-KR, en-US", "accept-encoding, accept-language, cookie", key ) @override_settings(USE_I18N=False, USE_TZ=True) def test_cache_key_i18n_timezone(self): request = self.factory.get(self.path) tz = timezone.get_current_timezone_name() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn( tz, key, "Cache keys should include the time zone name when time zones are active", ) key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False) def test_cache_key_no_i18n(self): request = self.factory.get(self.path) lang = translation.get_language() tz = timezone.get_current_timezone_name() response = HttpResponse() key = learn_cache_key(request, response) self.assertNotIn( lang, key, "Cache keys shouldn't include the language name when i18n isn't active", ) self.assertNotIn( tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active", ) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, USE_I18N=True, ) def test_middleware(self): def set_cache(request, lang, msg): def get_response(req): return HttpResponse(msg) translation.activate(lang) return UpdateCacheMiddleware(get_response)(request) # cache with non empty request.GET request = self.factory.get(self.path, {"foo": "bar", "other": "true"}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware(empty_response).process_request( request ) # first access, cache must return None self.assertIsNone(get_cache_data) content = "Check for cache with QUERY_STRING" def get_response(req): return HttpResponse(content) UpdateCacheMiddleware(get_response)(request) get_cache_data = FetchFromCacheMiddleware(empty_response).process_request( request ) # cache must return content self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) # different QUERY_STRING, cache must be empty request = self.factory.get(self.path, {"foo": "bar", "somethingelse": "true"}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware(empty_response).process_request( request ) self.assertIsNone(get_cache_data) # i18n tests en_message = "Hello world!" es_message = "Hola mundo!" request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, "en", en_message) get_cache_data = FetchFromCacheMiddleware(empty_response).process_request( request ) # The cache can be recovered self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, en_message.encode()) # change the session language and set content request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, "es", es_message) # change again the language translation.activate("en") # retrieve the content from cache get_cache_data = FetchFromCacheMiddleware(empty_response).process_request( request ) self.assertEqual(get_cache_data.content, en_message.encode()) # change again the language translation.activate("es") get_cache_data = FetchFromCacheMiddleware(empty_response).process_request( request ) self.assertEqual(get_cache_data.content, es_message.encode()) # reset the language translation.deactivate() @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, ) def test_middleware_doesnt_cache_streaming_response(self): request = self.factory.get(self.path) get_cache_data = FetchFromCacheMiddleware(empty_response).process_request( request ) self.assertIsNone(get_cache_data) def get_stream_response(req): return StreamingHttpResponse(["Check for cache with streaming content."]) UpdateCacheMiddleware(get_stream_response)(request) get_cache_data = FetchFromCacheMiddleware(empty_response).process_request( request ) self.assertIsNone(get_cache_data) @override_settings( CACHES={ "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "KEY_PREFIX": "cacheprefix", }, }, ) class PrefixedCacheI18nTest(CacheI18nTest): pass def hello_world_view(request, value): return HttpResponse("Hello World %s" % value) def csrf_view(request): return HttpResponse(csrf(request)["csrf_token"]) @override_settings( CACHE_MIDDLEWARE_ALIAS="other", CACHE_MIDDLEWARE_KEY_PREFIX="middlewareprefix", CACHE_MIDDLEWARE_SECONDS=30, CACHES={ "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", }, "other": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "other", "TIMEOUT": "1", }, }, ) class CacheMiddlewareTest(SimpleTestCase): factory = RequestFactory() def setUp(self): self.default_cache = caches["default"] self.other_cache = caches["other"] def tearDown(self): self.default_cache.clear() self.other_cache.clear() super().tearDown() def test_constructor(self): """ The constructor is correctly distinguishing between usage of CacheMiddleware as Middleware vs. usage of CacheMiddleware as view decorator and setting attributes appropriately. """ # If only one argument is passed in construction, it's being used as # middleware. middleware = CacheMiddleware(empty_response) # Now test object attributes against values defined in setUp above self.assertEqual(middleware.cache_timeout, 30) self.assertEqual(middleware.key_prefix, "middlewareprefix") self.assertEqual(middleware.cache_alias, "other") self.assertEqual(middleware.cache, self.other_cache) # If more arguments are being passed in construction, it's being used # as a decorator. First, test with "defaults": as_view_decorator = CacheMiddleware( empty_response, cache_alias=None, key_prefix=None ) self.assertEqual( as_view_decorator.cache_timeout, 30 ) # Timeout value for 'default' cache, i.e. 30 self.assertEqual(as_view_decorator.key_prefix, "") # Value of DEFAULT_CACHE_ALIAS from django.core.cache self.assertEqual(as_view_decorator.cache_alias, "default") self.assertEqual(as_view_decorator.cache, self.default_cache) # Next, test with custom values: as_view_decorator_with_custom = CacheMiddleware( hello_world_view, cache_timeout=60, cache_alias="other", key_prefix="foo" ) self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60) self.assertEqual(as_view_decorator_with_custom.key_prefix, "foo") self.assertEqual(as_view_decorator_with_custom.cache_alias, "other") self.assertEqual(as_view_decorator_with_custom.cache, self.other_cache) def test_update_cache_middleware_constructor(self): middleware = UpdateCacheMiddleware(empty_response) self.assertEqual(middleware.cache_timeout, 30) self.assertIsNone(middleware.page_timeout) self.assertEqual(middleware.key_prefix, "middlewareprefix") self.assertEqual(middleware.cache_alias, "other") self.assertEqual(middleware.cache, self.other_cache) def test_fetch_cache_middleware_constructor(self): middleware = FetchFromCacheMiddleware(empty_response) self.assertEqual(middleware.key_prefix, "middlewareprefix") self.assertEqual(middleware.cache_alias, "other") self.assertEqual(middleware.cache, self.other_cache) def test_middleware(self): middleware = CacheMiddleware(hello_world_view) prefix_middleware = CacheMiddleware(hello_world_view, key_prefix="prefix1") timeout_middleware = CacheMiddleware(hello_world_view, cache_timeout=1) request = self.factory.get("/view/") # Put the request through the request middleware result = middleware.process_request(request) self.assertIsNone(result) response = hello_world_view(request, "1") # Now put the response through the response middleware response = middleware.process_response(request, response) # Repeating the request should result in a cache hit result = middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b"Hello World 1") # The same request through a different middleware won't hit result = prefix_middleware.process_request(request) self.assertIsNone(result) # The same request with a timeout _will_ hit result = timeout_middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b"Hello World 1") def test_view_decorator(self): # decorate the same view with different cache decorators default_view = cache_page(3)(hello_world_view) default_with_prefix_view = cache_page(3, key_prefix="prefix1")(hello_world_view) explicit_default_view = cache_page(3, cache="default")(hello_world_view) explicit_default_with_prefix_view = cache_page( 3, cache="default", key_prefix="prefix1" )(hello_world_view) other_view = cache_page(1, cache="other")(hello_world_view) other_with_prefix_view = cache_page(1, cache="other", key_prefix="prefix2")( hello_world_view ) request = self.factory.get("/view/") # Request the view once response = default_view(request, "1") self.assertEqual(response.content, b"Hello World 1") # Request again -- hit the cache response = default_view(request, "2") self.assertEqual(response.content, b"Hello World 1") # Requesting the same view with the explicit cache should yield the same result response = explicit_default_view(request, "3") self.assertEqual(response.content, b"Hello World 1") # Requesting with a prefix will hit a different cache key response = explicit_default_with_prefix_view(request, "4") self.assertEqual(response.content, b"Hello World 4") # Hitting the same view again gives a cache hit response = explicit_default_with_prefix_view(request, "5") self.assertEqual(response.content, b"Hello World 4") # And going back to the implicit cache will hit the same cache response = default_with_prefix_view(request, "6") self.assertEqual(response.content, b"Hello World 4") # Requesting from an alternate cache won't hit cache response = other_view(request, "7") self.assertEqual(response.content, b"Hello World 7") # But a repeated hit will hit cache response = other_view(request, "8") self.assertEqual(response.content, b"Hello World 7") # And prefixing the alternate cache yields yet another cache entry response = other_with_prefix_view(request, "9") self.assertEqual(response.content, b"Hello World 9") # But if we wait a couple of seconds... time.sleep(2) # ... the default cache will still hit caches["default"] response = default_view(request, "11") self.assertEqual(response.content, b"Hello World 1") # ... the default cache with a prefix will still hit response = default_with_prefix_view(request, "12") self.assertEqual(response.content, b"Hello World 4") # ... the explicit default cache will still hit response = explicit_default_view(request, "13") self.assertEqual(response.content, b"Hello World 1") # ... the explicit default cache with a prefix will still hit response = explicit_default_with_prefix_view(request, "14") self.assertEqual(response.content, b"Hello World 4") # .. but a rapidly expiring cache won't hit response = other_view(request, "15") self.assertEqual(response.content, b"Hello World 15") # .. even if it has a prefix response = other_with_prefix_view(request, "16") self.assertEqual(response.content, b"Hello World 16") def test_cache_page_timeout(self): # Page timeout takes precedence over the "max-age" section of the # "Cache-Control". tests = [ (1, 3), # max_age < page_timeout. (3, 1), # max_age > page_timeout. ] for max_age, page_timeout in tests: with self.subTest(max_age=max_age, page_timeout=page_timeout): view = cache_page(timeout=page_timeout)( cache_control(max_age=max_age)(hello_world_view) ) request = self.factory.get("/view/") response = view(request, "1") self.assertEqual(response.content, b"Hello World 1") time.sleep(1) response = view(request, "2") self.assertEqual( response.content, b"Hello World 1" if page_timeout > max_age else b"Hello World 2", ) cache.clear() def test_cached_control_private_not_cached(self): """Responses with 'Cache-Control: private' are not cached.""" view_with_private_cache = cache_page(3)( cache_control(private=True)(hello_world_view) ) request = self.factory.get("/view/") response = view_with_private_cache(request, "1") self.assertEqual(response.content, b"Hello World 1") response = view_with_private_cache(request, "2") self.assertEqual(response.content, b"Hello World 2") def test_sensitive_cookie_not_cached(self): """ Django must prevent caching of responses that set a user-specific (and maybe security sensitive) cookie in response to a cookie-less request. """ request = self.factory.get("/view/") csrf_middleware = CsrfViewMiddleware(csrf_view) csrf_middleware.process_view(request, csrf_view, (), {}) cache_middleware = CacheMiddleware(csrf_middleware) self.assertIsNone(cache_middleware.process_request(request)) cache_middleware(request) # Inserting a CSRF cookie in a cookie-less request prevented caching. self.assertIsNone(cache_middleware.process_request(request)) def test_304_response_has_http_caching_headers_but_not_cached(self): original_view = mock.Mock(return_value=HttpResponseNotModified()) view = cache_page(2)(original_view) request = self.factory.get("/view/") # The view shouldn't be cached on the second call. view(request).close() response = view(request) response.close() self.assertEqual(original_view.call_count, 2) self.assertIsInstance(response, HttpResponseNotModified) self.assertIn("Cache-Control", response) self.assertIn("Expires", response) def test_per_thread(self): """The cache instance is different for each thread.""" thread_caches = [] middleware = CacheMiddleware(empty_response) def runner(): thread_caches.append(middleware.cache) for _ in range(2): thread = threading.Thread(target=runner) thread.start() thread.join() self.assertIsNot(thread_caches[0], thread_caches[1]) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="settingsprefix", CACHE_MIDDLEWARE_SECONDS=1, CACHES={ "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", }, }, USE_I18N=False, ) class TestWithTemplateResponse(SimpleTestCase): """ Tests various headers w/ TemplateResponse. Most are probably redundant since they manipulate the same object anyway but the ETag header is 'special' because it relies on the content being complete (which is not necessarily always the case with a TemplateResponse) """ path = "/cache/test/" factory = RequestFactory() def tearDown(self): cache.clear() def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ("Accept-Encoding",), "Accept-Encoding"), ("Accept-Encoding", ("accept-encoding",), "Accept-Encoding"), ("Accept-Encoding", ("ACCEPT-ENCODING",), "Accept-Encoding"), ("Cookie", ("Accept-Encoding",), "Cookie, Accept-Encoding"), ( "Cookie, Accept-Encoding", ("Accept-Encoding",), "Cookie, Accept-Encoding", ), ( "Cookie, Accept-Encoding", ("Accept-Encoding", "cookie"), "Cookie, Accept-Encoding", ), (None, ("Accept-Encoding", "COOKIE"), "Accept-Encoding, COOKIE"), ( "Cookie, Accept-Encoding", ("Accept-Encoding", "cookie"), "Cookie, Accept-Encoding", ), ( "Cookie , Accept-Encoding", ("Accept-Encoding", "cookie"), "Cookie, Accept-Encoding", ), ) for initial_vary, newheaders, resulting_vary in headers: with self.subTest(initial_vary=initial_vary, newheaders=newheaders): template = engines["django"].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) if initial_vary is not None: response.headers["Vary"] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response.headers["Vary"], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) template = engines["django"].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) key_prefix = "localprefix" # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), "views.decorators.cache.cache_page.settingsprefix.GET." "58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e", ) # A specified key_prefix is taken into account. learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), "views.decorators.cache.cache_page.localprefix.GET." "58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e", ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {"test": 1}) template = engines["django"].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), "views.decorators.cache.cache_page.settingsprefix.GET." "0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e", ) class TestMakeTemplateFragmentKey(SimpleTestCase): def test_without_vary_on(self): key = make_template_fragment_key("a.fragment") self.assertEqual( key, "template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e" ) def test_with_one_vary_on(self): key = make_template_fragment_key("foo", ["abc"]) self.assertEqual(key, "template.cache.foo.493e283d571a73056196f1a68efd0f66") def test_with_many_vary_on(self): key = make_template_fragment_key("bar", ["abc", "def"]) self.assertEqual(key, "template.cache.bar.17c1a507a0cb58384f4c639067a93520") def test_proper_escaping(self): key = make_template_fragment_key("spam", ["abc:def%"]) self.assertEqual(key, "template.cache.spam.06c8ae8e8c430b69fb0a6443504153dc") def test_with_ints_vary_on(self): key = make_template_fragment_key("foo", [1, 2, 3, 4, 5]) self.assertEqual(key, "template.cache.foo.7ae8fd2e0d25d651c683bdeebdb29461") def test_with_unicode_vary_on(self): key = make_template_fragment_key("foo", ["42º", "😀"]) self.assertEqual(key, "template.cache.foo.7ced1c94e543668590ba39b3c08b0237") def test_long_vary_on(self): key = make_template_fragment_key("foo", ["x" * 10000]) self.assertEqual(key, "template.cache.foo.3670b349b5124aa56bdb50678b02b23a") class CacheHandlerTest(SimpleTestCase): def test_same_instance(self): """ Attempting to retrieve the same alias should yield the same instance. """ cache1 = caches["default"] cache2 = caches["default"] self.assertIs(cache1, cache2) def test_per_thread(self): """ Requesting the same alias from separate threads should yield separate instances. """ c = [] def runner(): c.append(caches["default"]) for x in range(2): t = threading.Thread(target=runner) t.start() t.join() self.assertIsNot(c[0], c[1]) def test_nonexistent_alias(self): msg = "The connection 'nonexistent' doesn't exist." with self.assertRaisesMessage(InvalidCacheBackendError, msg): caches["nonexistent"] def test_nonexistent_backend(self): test_caches = CacheHandler( { "invalid_backend": { "BACKEND": "django.nonexistent.NonexistentBackend", }, } ) msg = ( "Could not find backend 'django.nonexistent.NonexistentBackend': " "No module named 'django.nonexistent'" ) with self.assertRaisesMessage(InvalidCacheBackendError, msg): test_caches["invalid_backend"] def test_all(self): test_caches = CacheHandler( { "cache_1": { "BACKEND": "django.core.cache.backends.dummy.DummyCache", }, "cache_2": { "BACKEND": "django.core.cache.backends.dummy.DummyCache", }, } ) self.assertEqual(test_caches.all(initialized_only=True), []) cache_1 = test_caches["cache_1"] self.assertEqual(test_caches.all(initialized_only=True), [cache_1]) self.assertEqual(len(test_caches.all()), 2) # .all() initializes all caches. self.assertEqual(len(test_caches.all(initialized_only=True)), 2) self.assertEqual(test_caches.all(), test_caches.all(initialized_only=True))
64be020d1ff40e2dc4f0b60f227ccd0e01c981fd71a72119ee358d95480f3894
import datetime from unittest import skipUnless from django.conf import settings from django.db import connection from django.db.models import ( CASCADE, CharField, DateTimeField, ForeignKey, Index, Model, Q, ) from django.db.models.functions import Lower from django.test import ( TestCase, TransactionTestCase, ignore_warnings, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import isolate_apps, override_settings from django.utils import timezone from django.utils.deprecation import RemovedInDjango51Warning from .models import Article, ArticleTranslation, IndexedArticle2 class SchemaIndexesTests(TestCase): """ Test index handling by the db.backends.schema infrastructure. """ def test_index_name_hash(self): """ Index names should be deterministic. """ editor = connection.schema_editor() index_name = editor._create_index_name( table_name=Article._meta.db_table, column_names=("c1",), suffix="123", ) self.assertEqual(index_name, "indexes_article_c1_a52bd80b123") def test_index_name(self): """ Index names on the built-in database backends:: * Are truncated as needed. * Include all the column names. * Include a deterministic hash. """ long_name = "l%sng" % ("o" * 100) editor = connection.schema_editor() index_name = editor._create_index_name( table_name=Article._meta.db_table, column_names=("c1", "c2", long_name), suffix="ix", ) expected = { "mysql": "indexes_article_c1_c2_looooooooooooooooooo_255179b2ix", "oracle": "indexes_a_c1_c2_loo_255179b2ix", "postgresql": "indexes_article_c1_c2_loooooooooooooooooo_255179b2ix", "sqlite": "indexes_article_c1_c2_l%sng_255179b2ix" % ("o" * 100), } if connection.vendor not in expected: self.skipTest( "This test is only supported on the built-in database backends." ) self.assertEqual(index_name, expected[connection.vendor]) def test_quoted_index_name(self): editor = connection.schema_editor() index_sql = [str(statement) for statement in editor._model_indexes_sql(Article)] self.assertEqual(len(index_sql), 1) # Ensure the index name is properly quoted. self.assertIn( connection.ops.quote_name(Article._meta.indexes[0].name), index_sql[0], ) @ignore_warnings(category=RemovedInDjango51Warning) @isolate_apps("indexes") def test_index_together_single_list(self): class IndexTogetherSingleList(Model): headline = CharField(max_length=100) pub_date = DateTimeField() class Meta: index_together = ["headline", "pub_date"] index_sql = connection.schema_editor()._model_indexes_sql( IndexTogetherSingleList ) self.assertEqual(len(index_sql), 1) def test_columns_list_sql(self): index = Index(fields=["headline"], name="whitespace_idx") editor = connection.schema_editor() self.assertIn( "(%s)" % editor.quote_name("headline"), str(index.create_sql(Article, editor)), ) @skipUnlessDBFeature("supports_index_column_ordering") def test_descending_columns_list_sql(self): index = Index(fields=["-headline"], name="whitespace_idx") editor = connection.schema_editor() self.assertIn( "(%s DESC)" % editor.quote_name("headline"), str(index.create_sql(Article, editor)), ) class SchemaIndexesNotPostgreSQLTests(TransactionTestCase): available_apps = ["indexes"] def test_create_index_ignores_opclasses(self): index = Index( name="test_ops_class", fields=["headline"], opclasses=["varchar_pattern_ops"], ) with connection.schema_editor() as editor: # This would error if opclasses weren't ignored. editor.add_index(IndexedArticle2, index) # The `condition` parameter is ignored by databases that don't support partial # indexes. @skipIfDBFeature("supports_partial_indexes") class PartialIndexConditionIgnoredTests(TransactionTestCase): available_apps = ["indexes"] def test_condition_ignored(self): index = Index( name="test_condition_ignored", fields=["published"], condition=Q(published=True), ) with connection.schema_editor() as editor: # This would error if condition weren't ignored. editor.add_index(Article, index) self.assertNotIn( "WHERE %s" % editor.quote_name("published"), str(index.create_sql(Article, editor)), ) @skipUnless(connection.vendor == "postgresql", "PostgreSQL tests") class SchemaIndexesPostgreSQLTests(TransactionTestCase): available_apps = ["indexes"] get_opclass_query = """ SELECT opcname, c.relname FROM pg_opclass AS oc JOIN pg_index as i on oc.oid = ANY(i.indclass) JOIN pg_class as c on c.oid = i.indexrelid WHERE c.relname = '%s' """ def test_text_indexes(self): """Test creation of PostgreSQL-specific text indexes (#12234)""" from .models import IndexedArticle index_sql = [ str(statement) for statement in connection.schema_editor()._model_indexes_sql( IndexedArticle ) ] self.assertEqual(len(index_sql), 5) self.assertIn('("headline" varchar_pattern_ops)', index_sql[1]) self.assertIn('("body" text_pattern_ops)', index_sql[3]) # unique=True and db_index=True should only create the varchar-specific # index (#19441). self.assertIn('("slug" varchar_pattern_ops)', index_sql[4]) def test_virtual_relation_indexes(self): """Test indexes are not created for related objects""" index_sql = connection.schema_editor()._model_indexes_sql(Article) self.assertEqual(len(index_sql), 1) def test_ops_class(self): index = Index( name="test_ops_class", fields=["headline"], opclasses=["varchar_pattern_ops"], ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % "test_ops_class") self.assertEqual( cursor.fetchall(), [("varchar_pattern_ops", "test_ops_class")] ) def test_ops_class_multiple_columns(self): index = Index( name="test_ops_class_multiple", fields=["headline", "body"], opclasses=["varchar_pattern_ops", "text_pattern_ops"], ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % "test_ops_class_multiple") expected_ops_classes = ( ("varchar_pattern_ops", "test_ops_class_multiple"), ("text_pattern_ops", "test_ops_class_multiple"), ) self.assertCountEqual(cursor.fetchall(), expected_ops_classes) def test_ops_class_partial(self): index = Index( name="test_ops_class_partial", fields=["body"], opclasses=["text_pattern_ops"], condition=Q(headline__contains="China"), ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % "test_ops_class_partial") self.assertCountEqual( cursor.fetchall(), [("text_pattern_ops", "test_ops_class_partial")] ) def test_ops_class_partial_tablespace(self): indexname = "test_ops_class_tblspace" index = Index( name=indexname, fields=["body"], opclasses=["text_pattern_ops"], condition=Q(headline__contains="China"), db_tablespace="pg_default", ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) self.assertIn( 'TABLESPACE "pg_default" ', str(index.create_sql(IndexedArticle2, editor)), ) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % indexname) self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", indexname)]) def test_ops_class_descending(self): indexname = "test_ops_class_ordered" index = Index( name=indexname, fields=["-body"], opclasses=["text_pattern_ops"], ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % indexname) self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", indexname)]) def test_ops_class_descending_partial(self): indexname = "test_ops_class_ordered_partial" index = Index( name=indexname, fields=["-body"], opclasses=["text_pattern_ops"], condition=Q(headline__contains="China"), ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % indexname) self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", indexname)]) @skipUnlessDBFeature("supports_covering_indexes") def test_ops_class_include(self): index_name = "test_ops_class_include" index = Index( name=index_name, fields=["body"], opclasses=["text_pattern_ops"], include=["headline"], ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % index_name) self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", index_name)]) @skipUnlessDBFeature("supports_covering_indexes") def test_ops_class_include_tablespace(self): index_name = "test_ops_class_include_tblspace" index = Index( name=index_name, fields=["body"], opclasses=["text_pattern_ops"], include=["headline"], db_tablespace="pg_default", ) with connection.schema_editor() as editor: editor.add_index(IndexedArticle2, index) self.assertIn( 'TABLESPACE "pg_default"', str(index.create_sql(IndexedArticle2, editor)), ) with editor.connection.cursor() as cursor: cursor.execute(self.get_opclass_query % index_name) self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", index_name)]) def test_ops_class_columns_lists_sql(self): index = Index( fields=["headline"], name="whitespace_idx", opclasses=["text_pattern_ops"], ) with connection.schema_editor() as editor: self.assertIn( "(%s text_pattern_ops)" % editor.quote_name("headline"), str(index.create_sql(Article, editor)), ) def test_ops_class_descending_columns_list_sql(self): index = Index( fields=["-headline"], name="whitespace_idx", opclasses=["text_pattern_ops"], ) with connection.schema_editor() as editor: self.assertIn( "(%s text_pattern_ops DESC)" % editor.quote_name("headline"), str(index.create_sql(Article, editor)), ) @skipUnless(connection.vendor == "mysql", "MySQL tests") class SchemaIndexesMySQLTests(TransactionTestCase): available_apps = ["indexes"] def test_no_index_for_foreignkey(self): """ MySQL on InnoDB already creates indexes automatically for foreign keys. (#14180). An index should be created if db_constraint=False (#26171). """ with connection.cursor() as cursor: storage = connection.introspection.get_storage_engine( cursor, ArticleTranslation._meta.db_table, ) if storage != "InnoDB": self.skipTest("This test only applies to the InnoDB storage engine") index_sql = [ str(statement) for statement in connection.schema_editor()._model_indexes_sql( ArticleTranslation ) ] self.assertEqual( index_sql, [ "CREATE INDEX " "`indexes_articletranslation_article_no_constraint_id_d6c0806b` " "ON `indexes_articletranslation` (`article_no_constraint_id`)" ], ) # The index also shouldn't be created if the ForeignKey is added after # the model was created. field_created = False try: with connection.schema_editor() as editor: new_field = ForeignKey(Article, CASCADE) new_field.set_attributes_from_name("new_foreign_key") editor.add_field(ArticleTranslation, new_field) field_created = True # No deferred SQL. The FK constraint is included in the # statement to add the field. self.assertFalse(editor.deferred_sql) finally: if field_created: with connection.schema_editor() as editor: editor.remove_field(ArticleTranslation, new_field) @skipUnlessDBFeature("supports_partial_indexes") # SQLite doesn't support timezone-aware datetimes when USE_TZ is False. @override_settings(USE_TZ=True) class PartialIndexTests(TransactionTestCase): # Schema editor is used to create the index to test that it works. available_apps = ["indexes"] def test_partial_index(self): with connection.schema_editor() as editor: index = Index( name="recent_article_idx", fields=["pub_date"], condition=Q( pub_date__gt=datetime.datetime( year=2015, month=1, day=1, # PostgreSQL would otherwise complain about the lookup # being converted to a mutable function (by removing # the timezone in the cast) which is forbidden. tzinfo=timezone.get_current_timezone(), ), ), ) self.assertIn( "WHERE %s" % editor.quote_name("pub_date"), str(index.create_sql(Article, schema_editor=editor)), ) editor.add_index(index=index, model=Article) with connection.cursor() as cursor: self.assertIn( index.name, connection.introspection.get_constraints( cursor=cursor, table_name=Article._meta.db_table, ), ) editor.remove_index(index=index, model=Article) def test_integer_restriction_partial(self): with connection.schema_editor() as editor: index = Index( name="recent_article_idx", fields=["id"], condition=Q(pk__gt=1), ) self.assertIn( "WHERE %s" % editor.quote_name("id"), str(index.create_sql(Article, schema_editor=editor)), ) editor.add_index(index=index, model=Article) with connection.cursor() as cursor: self.assertIn( index.name, connection.introspection.get_constraints( cursor=cursor, table_name=Article._meta.db_table, ), ) editor.remove_index(index=index, model=Article) def test_boolean_restriction_partial(self): with connection.schema_editor() as editor: index = Index( name="published_index", fields=["published"], condition=Q(published=True), ) self.assertIn( "WHERE %s" % editor.quote_name("published"), str(index.create_sql(Article, schema_editor=editor)), ) editor.add_index(index=index, model=Article) with connection.cursor() as cursor: self.assertIn( index.name, connection.introspection.get_constraints( cursor=cursor, table_name=Article._meta.db_table, ), ) editor.remove_index(index=index, model=Article) @skipUnlessDBFeature("supports_functions_in_partial_indexes") def test_multiple_conditions(self): with connection.schema_editor() as editor: index = Index( name="recent_article_idx", fields=["pub_date", "headline"], condition=( Q( pub_date__gt=datetime.datetime( year=2015, month=1, day=1, tzinfo=timezone.get_current_timezone(), ) ) & Q(headline__contains="China") ), ) sql = str(index.create_sql(Article, schema_editor=editor)) where = sql.find("WHERE") self.assertIn("WHERE (%s" % editor.quote_name("pub_date"), sql) # Because each backend has different syntax for the operators, # check ONLY the occurrence of headline in the SQL. self.assertGreater(sql.rfind("headline"), where) editor.add_index(index=index, model=Article) with connection.cursor() as cursor: self.assertIn( index.name, connection.introspection.get_constraints( cursor=cursor, table_name=Article._meta.db_table, ), ) editor.remove_index(index=index, model=Article) def test_is_null_condition(self): with connection.schema_editor() as editor: index = Index( name="recent_article_idx", fields=["pub_date"], condition=Q(pub_date__isnull=False), ) self.assertIn( "WHERE %s IS NOT NULL" % editor.quote_name("pub_date"), str(index.create_sql(Article, schema_editor=editor)), ) editor.add_index(index=index, model=Article) with connection.cursor() as cursor: self.assertIn( index.name, connection.introspection.get_constraints( cursor=cursor, table_name=Article._meta.db_table, ), ) editor.remove_index(index=index, model=Article) @skipUnlessDBFeature("supports_expression_indexes") def test_partial_func_index(self): index_name = "partial_func_idx" index = Index( Lower("headline").desc(), name=index_name, condition=Q(pub_date__isnull=False), ) with connection.schema_editor() as editor: editor.add_index(index=index, model=Article) sql = index.create_sql(Article, schema_editor=editor) table = Article._meta.db_table self.assertIs(sql.references_column(table, "headline"), True) sql = str(sql) self.assertIn("LOWER(%s)" % editor.quote_name("headline"), sql) self.assertIn( "WHERE %s IS NOT NULL" % editor.quote_name("pub_date"), sql, ) self.assertGreater(sql.find("WHERE"), sql.find("LOWER")) with connection.cursor() as cursor: constraints = connection.introspection.get_constraints( cursor=cursor, table_name=table, ) self.assertIn(index_name, constraints) if connection.features.supports_index_column_ordering: self.assertEqual(constraints[index_name]["orders"], ["DESC"]) with connection.schema_editor() as editor: editor.remove_index(Article, index) with connection.cursor() as cursor: self.assertNotIn( index_name, connection.introspection.get_constraints( cursor=cursor, table_name=table, ), ) @skipUnlessDBFeature("supports_covering_indexes") class CoveringIndexTests(TransactionTestCase): available_apps = ["indexes"] def test_covering_index(self): index = Index( name="covering_headline_idx", fields=["headline"], include=["pub_date", "published"], ) with connection.schema_editor() as editor: self.assertIn( "(%s) INCLUDE (%s, %s)" % ( editor.quote_name("headline"), editor.quote_name("pub_date"), editor.quote_name("published"), ), str(index.create_sql(Article, editor)), ) editor.add_index(Article, index) with connection.cursor() as cursor: constraints = connection.introspection.get_constraints( cursor=cursor, table_name=Article._meta.db_table, ) self.assertIn(index.name, constraints) self.assertEqual( constraints[index.name]["columns"], ["headline", "pub_date", "published"], ) editor.remove_index(Article, index) with connection.cursor() as cursor: self.assertNotIn( index.name, connection.introspection.get_constraints( cursor=cursor, table_name=Article._meta.db_table, ), ) def test_covering_partial_index(self): index = Index( name="covering_partial_headline_idx", fields=["headline"], include=["pub_date"], condition=Q(pub_date__isnull=False), ) with connection.schema_editor() as editor: extra_sql = "" if settings.DEFAULT_INDEX_TABLESPACE: extra_sql = "TABLESPACE %s " % editor.quote_name( settings.DEFAULT_INDEX_TABLESPACE ) self.assertIn( "(%s) INCLUDE (%s) %sWHERE %s " % ( editor.quote_name("headline"), editor.quote_name("pub_date"), extra_sql, editor.quote_name("pub_date"), ), str(index.create_sql(Article, editor)), ) editor.add_index(Article, index) with connection.cursor() as cursor: constraints = connection.introspection.get_constraints( cursor=cursor, table_name=Article._meta.db_table, ) self.assertIn(index.name, constraints) self.assertEqual( constraints[index.name]["columns"], ["headline", "pub_date"], ) editor.remove_index(Article, index) with connection.cursor() as cursor: self.assertNotIn( index.name, connection.introspection.get_constraints( cursor=cursor, table_name=Article._meta.db_table, ), ) @skipUnlessDBFeature("supports_expression_indexes") def test_covering_func_index(self): index_name = "covering_func_headline_idx" index = Index(Lower("headline"), name=index_name, include=["pub_date"]) with connection.schema_editor() as editor: editor.add_index(index=index, model=Article) sql = index.create_sql(Article, schema_editor=editor) table = Article._meta.db_table self.assertIs(sql.references_column(table, "headline"), True) sql = str(sql) self.assertIn("LOWER(%s)" % editor.quote_name("headline"), sql) self.assertIn("INCLUDE (%s)" % editor.quote_name("pub_date"), sql) self.assertGreater(sql.find("INCLUDE"), sql.find("LOWER")) with connection.cursor() as cursor: constraints = connection.introspection.get_constraints( cursor=cursor, table_name=table, ) self.assertIn(index_name, constraints) self.assertIn("pub_date", constraints[index_name]["columns"]) with connection.schema_editor() as editor: editor.remove_index(Article, index) with connection.cursor() as cursor: self.assertNotIn( index_name, connection.introspection.get_constraints( cursor=cursor, table_name=table, ), ) @skipIfDBFeature("supports_covering_indexes") class CoveringIndexIgnoredTests(TransactionTestCase): available_apps = ["indexes"] def test_covering_ignored(self): index = Index( name="test_covering_ignored", fields=["headline"], include=["pub_date"], ) with connection.schema_editor() as editor: editor.add_index(Article, index) self.assertNotIn( "INCLUDE (%s)" % editor.quote_name("headline"), str(index.create_sql(Article, editor)), )
e510c98248bd98a1fdb5fd70cca67dfc41d5723ff78233c30dcba7a30cf8ab8e
from django.db import models class CurrentTranslation(models.ForeignObject): """ Creates virtual relation to the translation with model cache enabled. """ # Avoid validation requires_unique_target = False def __init__(self, to, on_delete, from_fields, to_fields, **kwargs): # Disable reverse relation kwargs["related_name"] = "+" # Set unique to enable model cache. kwargs["unique"] = True super().__init__(to, on_delete, from_fields, to_fields, **kwargs) class ArticleTranslation(models.Model): article = models.ForeignKey("indexes.Article", models.CASCADE) article_no_constraint = models.ForeignKey( "indexes.Article", models.CASCADE, db_constraint=False, related_name="+" ) language = models.CharField(max_length=10, unique=True) content = models.TextField() class Article(models.Model): headline = models.CharField(max_length=100) pub_date = models.DateTimeField() published = models.BooleanField(default=False) # Add virtual relation to the ArticleTranslation model. translation = CurrentTranslation( ArticleTranslation, models.CASCADE, ["id"], ["article"] ) class Meta: indexes = [models.Index(fields=["headline", "pub_date"])] class IndexedArticle(models.Model): headline = models.CharField(max_length=100, db_index=True) body = models.TextField(db_index=True) slug = models.CharField(max_length=40, unique=True) class Meta: required_db_features = {"supports_index_on_text_field"} class IndexedArticle2(models.Model): headline = models.CharField(max_length=100) body = models.TextField()
01b1683719bf612387a2d57c8350e690b91e1f2db659c087a0ddb3431947b94f
import datetime import re from datetime import date from decimal import Decimal from django import forms from django.core.exceptions import ImproperlyConfigured from django.db import models from django.forms.formsets import formset_factory from django.forms.models import ( BaseModelFormSet, _get_foreign_key, inlineformset_factory, modelformset_factory, ) from django.http import QueryDict from django.test import TestCase, skipUnlessDBFeature from .models import ( AlternateBook, Author, AuthorMeeting, BetterAuthor, Book, BookWithCustomPK, BookWithOptionalAltEditor, ClassyMexicanRestaurant, CustomPrimaryKey, Location, Membership, MexicanRestaurant, Owner, OwnerProfile, Person, Place, Player, Poem, Poet, Post, Price, Product, Repository, Restaurant, Revision, Team, ) class DeletionTests(TestCase): def test_deletion(self): PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True) poet = Poet.objects.create(name="test") data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "1", "form-MAX_NUM_FORMS": "0", "form-0-id": str(poet.pk), "form-0-name": "test", "form-0-DELETE": "on", } formset = PoetFormSet(data, queryset=Poet.objects.all()) formset.save(commit=False) self.assertEqual(Poet.objects.count(), 1) formset.save() self.assertTrue(formset.is_valid()) self.assertEqual(Poet.objects.count(), 0) def test_add_form_deletion_when_invalid(self): """ Make sure that an add form that is filled out, but marked for deletion doesn't cause validation errors. """ PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True) poet = Poet.objects.create(name="test") # One existing untouched and two new unvalid forms data = { "form-TOTAL_FORMS": "3", "form-INITIAL_FORMS": "1", "form-MAX_NUM_FORMS": "0", "form-0-id": str(poet.id), "form-0-name": "test", "form-1-id": "", "form-1-name": "x" * 1000, # Too long "form-2-id": str(poet.id), # Violate unique constraint "form-2-name": "test2", } formset = PoetFormSet(data, queryset=Poet.objects.all()) # Make sure this form doesn't pass validation. self.assertIs(formset.is_valid(), False) self.assertEqual(Poet.objects.count(), 1) # Then make sure that it *does* pass validation and delete the object, # even though the data in new forms aren't actually valid. data["form-0-DELETE"] = "on" data["form-1-DELETE"] = "on" data["form-2-DELETE"] = "on" formset = PoetFormSet(data, queryset=Poet.objects.all()) self.assertIs(formset.is_valid(), True) formset.save() self.assertEqual(Poet.objects.count(), 0) def test_change_form_deletion_when_invalid(self): """ Make sure that a change form that is filled out, but marked for deletion doesn't cause validation errors. """ PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True) poet = Poet.objects.create(name="test") data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "1", "form-MAX_NUM_FORMS": "0", "form-0-id": str(poet.id), "form-0-name": "x" * 1000, } formset = PoetFormSet(data, queryset=Poet.objects.all()) # Make sure this form doesn't pass validation. self.assertIs(formset.is_valid(), False) self.assertEqual(Poet.objects.count(), 1) # Then make sure that it *does* pass validation and delete the object, # even though the data isn't actually valid. data["form-0-DELETE"] = "on" formset = PoetFormSet(data, queryset=Poet.objects.all()) self.assertIs(formset.is_valid(), True) formset.save() self.assertEqual(Poet.objects.count(), 0) def test_outdated_deletion(self): poet = Poet.objects.create(name="test") poem = Poem.objects.create(name="Brevity is the soul of wit", poet=poet) PoemFormSet = inlineformset_factory( Poet, Poem, fields="__all__", can_delete=True ) # Simulate deletion of an object that doesn't exist in the database data = { "form-TOTAL_FORMS": "2", "form-INITIAL_FORMS": "2", "form-0-id": str(poem.pk), "form-0-name": "foo", "form-1-id": str(poem.pk + 1), # doesn't exist "form-1-name": "bar", "form-1-DELETE": "on", } formset = PoemFormSet(data, instance=poet, prefix="form") # The formset is valid even though poem.pk + 1 doesn't exist, # because it's marked for deletion anyway self.assertTrue(formset.is_valid()) formset.save() # Make sure the save went through correctly self.assertEqual(Poem.objects.get(pk=poem.pk).name, "foo") self.assertEqual(poet.poem_set.count(), 1) self.assertFalse(Poem.objects.filter(pk=poem.pk + 1).exists()) class ModelFormsetTest(TestCase): def test_modelformset_factory_without_fields(self): """Regression for #19733""" message = ( "Calling modelformset_factory without defining 'fields' or 'exclude' " "explicitly is prohibited." ) with self.assertRaisesMessage(ImproperlyConfigured, message): modelformset_factory(Author) def test_simple_save(self): qs = Author.objects.all() AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=3) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 3) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-name">Name:</label>' '<input id="id_form-0-name" type="text" name="form-0-name" maxlength="100">' '<input type="hidden" name="form-0-id" id="id_form-0-id"></p>', ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_form-1-name">Name:</label>' '<input id="id_form-1-name" type="text" name="form-1-name" maxlength="100">' '<input type="hidden" name="form-1-id" id="id_form-1-id"></p>', ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_form-2-name">Name:</label>' '<input id="id_form-2-name" type="text" name="form-2-name" maxlength="100">' '<input type="hidden" name="form-2-id" id="id_form-2-id"></p>', ) data = { "form-TOTAL_FORMS": "3", # the number of forms rendered "form-INITIAL_FORMS": "0", # the number of forms with initial data "form-MAX_NUM_FORMS": "", # the max number of forms "form-0-name": "Charles Baudelaire", "form-1-name": "Arthur Rimbaud", "form-2-name": "", } formset = AuthorFormSet(data=data, queryset=qs) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 2) author1, author2 = saved self.assertEqual(author1, Author.objects.get(name="Charles Baudelaire")) self.assertEqual(author2, Author.objects.get(name="Arthur Rimbaud")) authors = list(Author.objects.order_by("name")) self.assertEqual(authors, [author2, author1]) # Gah! We forgot Paul Verlaine. Let's create a formset to edit the # existing authors with an extra form to add him. We *could* pass in a # queryset to restrict the Author objects we edit, but in this case # we'll use it to display them in alphabetical order by name. qs = Author.objects.order_by("name") AuthorFormSet = modelformset_factory( Author, fields="__all__", extra=1, can_delete=False ) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 3) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-name">Name:</label>' '<input id="id_form-0-name" type="text" name="form-0-name" ' 'value="Arthur Rimbaud" maxlength="100">' '<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id"></p>' % author2.id, ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_form-1-name">Name:</label>' '<input id="id_form-1-name" type="text" name="form-1-name" ' 'value="Charles Baudelaire" maxlength="100">' '<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id"></p>' % author1.id, ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_form-2-name">Name:</label>' '<input id="id_form-2-name" type="text" name="form-2-name" maxlength="100">' '<input type="hidden" name="form-2-id" id="id_form-2-id"></p>', ) data = { "form-TOTAL_FORMS": "3", # the number of forms rendered "form-INITIAL_FORMS": "2", # the number of forms with initial data "form-MAX_NUM_FORMS": "", # the max number of forms "form-0-id": str(author2.id), "form-0-name": "Arthur Rimbaud", "form-1-id": str(author1.id), "form-1-name": "Charles Baudelaire", "form-2-name": "Paul Verlaine", } formset = AuthorFormSet(data=data, queryset=qs) self.assertTrue(formset.is_valid()) # Only changed or new objects are returned from formset.save() saved = formset.save() self.assertEqual(len(saved), 1) author3 = saved[0] self.assertEqual(author3, Author.objects.get(name="Paul Verlaine")) authors = list(Author.objects.order_by("name")) self.assertEqual(authors, [author2, author1, author3]) # This probably shouldn't happen, but it will. If an add form was # marked for deletion, make sure we don't save that form. qs = Author.objects.order_by("name") AuthorFormSet = modelformset_factory( Author, fields="__all__", extra=1, can_delete=True ) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 4) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-name">Name:</label>' '<input id="id_form-0-name" type="text" name="form-0-name" ' 'value="Arthur Rimbaud" maxlength="100"></p>' '<p><label for="id_form-0-DELETE">Delete:</label>' '<input type="checkbox" name="form-0-DELETE" id="id_form-0-DELETE">' '<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id"></p>' % author2.id, ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_form-1-name">Name:</label>' '<input id="id_form-1-name" type="text" name="form-1-name" ' 'value="Charles Baudelaire" maxlength="100"></p>' '<p><label for="id_form-1-DELETE">Delete:</label>' '<input type="checkbox" name="form-1-DELETE" id="id_form-1-DELETE">' '<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id"></p>' % author1.id, ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_form-2-name">Name:</label>' '<input id="id_form-2-name" type="text" name="form-2-name" ' 'value="Paul Verlaine" maxlength="100"></p>' '<p><label for="id_form-2-DELETE">Delete:</label>' '<input type="checkbox" name="form-2-DELETE" id="id_form-2-DELETE">' '<input type="hidden" name="form-2-id" value="%d" id="id_form-2-id"></p>' % author3.id, ) self.assertHTMLEqual( formset.forms[3].as_p(), '<p><label for="id_form-3-name">Name:</label>' '<input id="id_form-3-name" type="text" name="form-3-name" maxlength="100">' '</p><p><label for="id_form-3-DELETE">Delete:</label>' '<input type="checkbox" name="form-3-DELETE" id="id_form-3-DELETE">' '<input type="hidden" name="form-3-id" id="id_form-3-id"></p>', ) data = { "form-TOTAL_FORMS": "4", # the number of forms rendered "form-INITIAL_FORMS": "3", # the number of forms with initial data "form-MAX_NUM_FORMS": "", # the max number of forms "form-0-id": str(author2.id), "form-0-name": "Arthur Rimbaud", "form-1-id": str(author1.id), "form-1-name": "Charles Baudelaire", "form-2-id": str(author3.id), "form-2-name": "Paul Verlaine", "form-3-name": "Walt Whitman", "form-3-DELETE": "on", } formset = AuthorFormSet(data=data, queryset=qs) self.assertTrue(formset.is_valid()) # No objects were changed or saved so nothing will come back. self.assertEqual(formset.save(), []) authors = list(Author.objects.order_by("name")) self.assertEqual(authors, [author2, author1, author3]) # Let's edit a record to ensure save only returns that one record. data = { "form-TOTAL_FORMS": "4", # the number of forms rendered "form-INITIAL_FORMS": "3", # the number of forms with initial data "form-MAX_NUM_FORMS": "", # the max number of forms "form-0-id": str(author2.id), "form-0-name": "Walt Whitman", "form-1-id": str(author1.id), "form-1-name": "Charles Baudelaire", "form-2-id": str(author3.id), "form-2-name": "Paul Verlaine", "form-3-name": "", "form-3-DELETE": "", } formset = AuthorFormSet(data=data, queryset=qs) self.assertTrue(formset.is_valid()) # One record has changed. saved = formset.save() self.assertEqual(len(saved), 1) self.assertEqual(saved[0], Author.objects.get(name="Walt Whitman")) def test_commit_false(self): # Test the behavior of commit=False and save_m2m author1 = Author.objects.create(name="Charles Baudelaire") author2 = Author.objects.create(name="Paul Verlaine") author3 = Author.objects.create(name="Walt Whitman") meeting = AuthorMeeting.objects.create(created=date.today()) meeting.authors.set(Author.objects.all()) # create an Author instance to add to the meeting. author4 = Author.objects.create(name="John Steinbeck") AuthorMeetingFormSet = modelformset_factory( AuthorMeeting, fields="__all__", extra=1, can_delete=True ) data = { "form-TOTAL_FORMS": "2", # the number of forms rendered "form-INITIAL_FORMS": "1", # the number of forms with initial data "form-MAX_NUM_FORMS": "", # the max number of forms "form-0-id": str(meeting.id), "form-0-name": "2nd Tuesday of the Week Meeting", "form-0-authors": [author2.id, author1.id, author3.id, author4.id], "form-1-name": "", "form-1-authors": "", "form-1-DELETE": "", } formset = AuthorMeetingFormSet(data=data, queryset=AuthorMeeting.objects.all()) self.assertTrue(formset.is_valid()) instances = formset.save(commit=False) for instance in instances: instance.created = date.today() instance.save() formset.save_m2m() self.assertSequenceEqual( instances[0].authors.all(), [author1, author4, author2, author3], ) def test_max_num(self): # Test the behavior of max_num with model formsets. It should allow # all existing related objects/inlines for a given object to be # displayed, but not allow the creation of new inlines beyond max_num. a1 = Author.objects.create(name="Charles Baudelaire") a2 = Author.objects.create(name="Paul Verlaine") a3 = Author.objects.create(name="Walt Whitman") qs = Author.objects.order_by("name") AuthorFormSet = modelformset_factory( Author, fields="__all__", max_num=None, extra=3 ) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 6) self.assertEqual(len(formset.extra_forms), 3) AuthorFormSet = modelformset_factory( Author, fields="__all__", max_num=4, extra=3 ) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 4) self.assertEqual(len(formset.extra_forms), 1) AuthorFormSet = modelformset_factory( Author, fields="__all__", max_num=0, extra=3 ) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 3) self.assertEqual(len(formset.extra_forms), 0) AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None) formset = AuthorFormSet(queryset=qs) self.assertSequenceEqual(formset.get_queryset(), [a1, a2, a3]) AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0) formset = AuthorFormSet(queryset=qs) self.assertSequenceEqual(formset.get_queryset(), [a1, a2, a3]) AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4) formset = AuthorFormSet(queryset=qs) self.assertSequenceEqual(formset.get_queryset(), [a1, a2, a3]) def test_min_num(self): # Test the behavior of min_num with model formsets. It should be # added to extra. qs = Author.objects.none() AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 0) AuthorFormSet = modelformset_factory( Author, fields="__all__", min_num=1, extra=0 ) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 1) AuthorFormSet = modelformset_factory( Author, fields="__all__", min_num=1, extra=1 ) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 2) def test_min_num_with_existing(self): # Test the behavior of min_num with existing objects. Author.objects.create(name="Charles Baudelaire") qs = Author.objects.all() AuthorFormSet = modelformset_factory( Author, fields="__all__", extra=0, min_num=1 ) formset = AuthorFormSet(queryset=qs) self.assertEqual(len(formset.forms), 1) def test_custom_save_method(self): class PoetForm(forms.ModelForm): def save(self, commit=True): # change the name to "Vladimir Mayakovsky" just to be a jerk. author = super().save(commit=False) author.name = "Vladimir Mayakovsky" if commit: author.save() return author PoetFormSet = modelformset_factory(Poet, fields="__all__", form=PoetForm) data = { "form-TOTAL_FORMS": "3", # the number of forms rendered "form-INITIAL_FORMS": "0", # the number of forms with initial data "form-MAX_NUM_FORMS": "", # the max number of forms "form-0-name": "Walt Whitman", "form-1-name": "Charles Baudelaire", "form-2-name": "", } qs = Poet.objects.all() formset = PoetFormSet(data=data, queryset=qs) self.assertTrue(formset.is_valid()) poets = formset.save() self.assertEqual(len(poets), 2) poet1, poet2 = poets self.assertEqual(poet1.name, "Vladimir Mayakovsky") self.assertEqual(poet2.name, "Vladimir Mayakovsky") def test_custom_form(self): """ model_formset_factory() respects fields and exclude parameters of a custom form. """ class PostForm1(forms.ModelForm): class Meta: model = Post fields = ("title", "posted") class PostForm2(forms.ModelForm): class Meta: model = Post exclude = ("subtitle",) PostFormSet = modelformset_factory(Post, form=PostForm1) formset = PostFormSet() self.assertNotIn("subtitle", formset.forms[0].fields) PostFormSet = modelformset_factory(Post, form=PostForm2) formset = PostFormSet() self.assertNotIn("subtitle", formset.forms[0].fields) def test_custom_queryset_init(self): """ A queryset can be overridden in the formset's __init__() method. """ Author.objects.create(name="Charles Baudelaire") Author.objects.create(name="Paul Verlaine") class BaseAuthorFormSet(BaseModelFormSet): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.queryset = Author.objects.filter(name__startswith="Charles") AuthorFormSet = modelformset_factory( Author, fields="__all__", formset=BaseAuthorFormSet ) formset = AuthorFormSet() self.assertEqual(len(formset.get_queryset()), 1) def test_model_inheritance(self): BetterAuthorFormSet = modelformset_factory(BetterAuthor, fields="__all__") formset = BetterAuthorFormSet() self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-name">Name:</label>' '<input id="id_form-0-name" type="text" name="form-0-name" maxlength="100">' '</p><p><label for="id_form-0-write_speed">Write speed:</label>' '<input type="number" name="form-0-write_speed" id="id_form-0-write_speed">' '<input type="hidden" name="form-0-author_ptr" id="id_form-0-author_ptr">' "</p>", ) data = { "form-TOTAL_FORMS": "1", # the number of forms rendered "form-INITIAL_FORMS": "0", # the number of forms with initial data "form-MAX_NUM_FORMS": "", # the max number of forms "form-0-author_ptr": "", "form-0-name": "Ernest Hemingway", "form-0-write_speed": "10", } formset = BetterAuthorFormSet(data) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (author1,) = saved self.assertEqual(author1, BetterAuthor.objects.get(name="Ernest Hemingway")) hemingway_id = BetterAuthor.objects.get(name="Ernest Hemingway").pk formset = BetterAuthorFormSet() self.assertEqual(len(formset.forms), 2) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-name">Name:</label>' '<input id="id_form-0-name" type="text" name="form-0-name" ' 'value="Ernest Hemingway" maxlength="100"></p>' '<p><label for="id_form-0-write_speed">Write speed:</label>' '<input type="number" name="form-0-write_speed" value="10" ' 'id="id_form-0-write_speed">' '<input type="hidden" name="form-0-author_ptr" value="%d" ' 'id="id_form-0-author_ptr"></p>' % hemingway_id, ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_form-1-name">Name:</label>' '<input id="id_form-1-name" type="text" name="form-1-name" maxlength="100">' '</p><p><label for="id_form-1-write_speed">Write speed:</label>' '<input type="number" name="form-1-write_speed" id="id_form-1-write_speed">' '<input type="hidden" name="form-1-author_ptr" id="id_form-1-author_ptr">' "</p>", ) data = { "form-TOTAL_FORMS": "2", # the number of forms rendered "form-INITIAL_FORMS": "1", # the number of forms with initial data "form-MAX_NUM_FORMS": "", # the max number of forms "form-0-author_ptr": hemingway_id, "form-0-name": "Ernest Hemingway", "form-0-write_speed": "10", "form-1-author_ptr": "", "form-1-name": "", "form-1-write_speed": "", } formset = BetterAuthorFormSet(data) self.assertTrue(formset.is_valid()) self.assertEqual(formset.save(), []) def test_inline_formsets(self): # We can also create a formset that is tied to a parent model. This is # how the admin system's edit inline functionality works. AuthorBooksFormSet = inlineformset_factory( Author, Book, can_delete=False, extra=3, fields="__all__" ) author = Author.objects.create(name="Charles Baudelaire") formset = AuthorBooksFormSet(instance=author) self.assertEqual(len(formset.forms), 3) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_book_set-0-title">Title:</label>' '<input id="id_book_set-0-title" type="text" name="book_set-0-title" ' 'maxlength="100">' '<input type="hidden" name="book_set-0-author" value="%d" ' 'id="id_book_set-0-author">' '<input type="hidden" name="book_set-0-id" id="id_book_set-0-id">' "</p>" % author.id, ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_book_set-1-title">Title:</label>' '<input id="id_book_set-1-title" type="text" name="book_set-1-title" ' 'maxlength="100">' '<input type="hidden" name="book_set-1-author" value="%d" ' 'id="id_book_set-1-author">' '<input type="hidden" name="book_set-1-id" id="id_book_set-1-id"></p>' % author.id, ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_book_set-2-title">Title:</label>' '<input id="id_book_set-2-title" type="text" name="book_set-2-title" ' 'maxlength="100">' '<input type="hidden" name="book_set-2-author" value="%d" ' 'id="id_book_set-2-author">' '<input type="hidden" name="book_set-2-id" id="id_book_set-2-id"></p>' % author.id, ) data = { "book_set-TOTAL_FORMS": "3", # the number of forms rendered "book_set-INITIAL_FORMS": "0", # the number of forms with initial data "book_set-MAX_NUM_FORMS": "", # the max number of forms "book_set-0-title": "Les Fleurs du Mal", "book_set-1-title": "", "book_set-2-title": "", } formset = AuthorBooksFormSet(data, instance=author) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (book1,) = saved self.assertEqual(book1, Book.objects.get(title="Les Fleurs du Mal")) self.assertSequenceEqual(author.book_set.all(), [book1]) # Now that we've added a book to Charles Baudelaire, let's try adding # another one. This time though, an edit form will be available for # every existing book. AuthorBooksFormSet = inlineformset_factory( Author, Book, can_delete=False, extra=2, fields="__all__" ) author = Author.objects.get(name="Charles Baudelaire") formset = AuthorBooksFormSet(instance=author) self.assertEqual(len(formset.forms), 3) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_book_set-0-title">Title:</label>' '<input id="id_book_set-0-title" type="text" name="book_set-0-title" ' 'value="Les Fleurs du Mal" maxlength="100">' '<input type="hidden" name="book_set-0-author" value="%d" ' 'id="id_book_set-0-author">' '<input type="hidden" name="book_set-0-id" value="%d" ' 'id="id_book_set-0-id"></p>' % ( author.id, book1.id, ), ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_book_set-1-title">Title:</label>' '<input id="id_book_set-1-title" type="text" name="book_set-1-title" ' 'maxlength="100">' '<input type="hidden" name="book_set-1-author" value="%d" ' 'id="id_book_set-1-author">' '<input type="hidden" name="book_set-1-id" id="id_book_set-1-id"></p>' % author.id, ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_book_set-2-title">Title:</label>' '<input id="id_book_set-2-title" type="text" name="book_set-2-title" ' 'maxlength="100">' '<input type="hidden" name="book_set-2-author" value="%d" ' 'id="id_book_set-2-author">' '<input type="hidden" name="book_set-2-id" id="id_book_set-2-id"></p>' % author.id, ) data = { "book_set-TOTAL_FORMS": "3", # the number of forms rendered "book_set-INITIAL_FORMS": "1", # the number of forms with initial data "book_set-MAX_NUM_FORMS": "", # the max number of forms "book_set-0-id": str(book1.id), "book_set-0-title": "Les Fleurs du Mal", "book_set-1-title": "Les Paradis Artificiels", "book_set-2-title": "", } formset = AuthorBooksFormSet(data, instance=author) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (book2,) = saved self.assertEqual(book2, Book.objects.get(title="Les Paradis Artificiels")) # As you can see, 'Les Paradis Artificiels' is now a book belonging to # Charles Baudelaire. self.assertSequenceEqual(author.book_set.order_by("title"), [book1, book2]) def test_inline_formsets_save_as_new(self): # The save_as_new parameter lets you re-associate the data to a new # instance. This is used in the admin for save_as functionality. AuthorBooksFormSet = inlineformset_factory( Author, Book, can_delete=False, extra=2, fields="__all__" ) Author.objects.create(name="Charles Baudelaire") # An immutable QueryDict simulates request.POST. data = QueryDict(mutable=True) data.update( { "book_set-TOTAL_FORMS": "3", # the number of forms rendered "book_set-INITIAL_FORMS": "2", # the number of forms with initial data "book_set-MAX_NUM_FORMS": "", # the max number of forms "book_set-0-id": "1", "book_set-0-title": "Les Fleurs du Mal", "book_set-1-id": "2", "book_set-1-title": "Les Paradis Artificiels", "book_set-2-title": "", } ) data._mutable = False formset = AuthorBooksFormSet(data, instance=Author(), save_as_new=True) self.assertTrue(formset.is_valid()) self.assertIs(data._mutable, False) new_author = Author.objects.create(name="Charles Baudelaire") formset = AuthorBooksFormSet(data, instance=new_author, save_as_new=True) saved = formset.save() self.assertEqual(len(saved), 2) book1, book2 = saved self.assertEqual(book1.title, "Les Fleurs du Mal") self.assertEqual(book2.title, "Les Paradis Artificiels") # Test using a custom prefix on an inline formset. formset = AuthorBooksFormSet(prefix="test") self.assertEqual(len(formset.forms), 2) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_test-0-title">Title:</label>' '<input id="id_test-0-title" type="text" name="test-0-title" ' 'maxlength="100">' '<input type="hidden" name="test-0-author" id="id_test-0-author">' '<input type="hidden" name="test-0-id" id="id_test-0-id"></p>', ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_test-1-title">Title:</label>' '<input id="id_test-1-title" type="text" name="test-1-title" ' 'maxlength="100">' '<input type="hidden" name="test-1-author" id="id_test-1-author">' '<input type="hidden" name="test-1-id" id="id_test-1-id"></p>', ) def test_inline_formsets_with_custom_pk(self): # Test inline formsets where the inline-edited object has a custom # primary key that is not the fk to the parent object. self.maxDiff = 1024 AuthorBooksFormSet2 = inlineformset_factory( Author, BookWithCustomPK, can_delete=False, extra=1, fields="__all__" ) author = Author.objects.create(pk=1, name="Charles Baudelaire") formset = AuthorBooksFormSet2(instance=author) self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_bookwithcustompk_set-0-my_pk">My pk:</label>' '<input id="id_bookwithcustompk_set-0-my_pk" type="number" ' 'name="bookwithcustompk_set-0-my_pk" step="1"></p>' '<p><label for="id_bookwithcustompk_set-0-title">Title:</label>' '<input id="id_bookwithcustompk_set-0-title" type="text" ' 'name="bookwithcustompk_set-0-title" maxlength="100">' '<input type="hidden" name="bookwithcustompk_set-0-author" ' 'value="1" id="id_bookwithcustompk_set-0-author"></p>', ) data = { # The number of forms rendered. "bookwithcustompk_set-TOTAL_FORMS": "1", # The number of forms with initial data. "bookwithcustompk_set-INITIAL_FORMS": "0", # The max number of forms. "bookwithcustompk_set-MAX_NUM_FORMS": "", "bookwithcustompk_set-0-my_pk": "77777", "bookwithcustompk_set-0-title": "Les Fleurs du Mal", } formset = AuthorBooksFormSet2(data, instance=author) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (book1,) = saved self.assertEqual(book1.pk, 77777) book1 = author.bookwithcustompk_set.get() self.assertEqual(book1.title, "Les Fleurs du Mal") def test_inline_formsets_with_multi_table_inheritance(self): # Test inline formsets where the inline-edited object uses multi-table # inheritance, thus has a non AutoField yet auto-created primary key. AuthorBooksFormSet3 = inlineformset_factory( Author, AlternateBook, can_delete=False, extra=1, fields="__all__" ) author = Author.objects.create(pk=1, name="Charles Baudelaire") formset = AuthorBooksFormSet3(instance=author) self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_alternatebook_set-0-title">Title:</label>' '<input id="id_alternatebook_set-0-title" type="text" ' 'name="alternatebook_set-0-title" maxlength="100"></p>' '<p><label for="id_alternatebook_set-0-notes">Notes:</label>' '<input id="id_alternatebook_set-0-notes" type="text" ' 'name="alternatebook_set-0-notes" maxlength="100">' '<input type="hidden" name="alternatebook_set-0-author" value="1" ' 'id="id_alternatebook_set-0-author">' '<input type="hidden" name="alternatebook_set-0-book_ptr" ' 'id="id_alternatebook_set-0-book_ptr"></p>', ) data = { # The number of forms rendered. "alternatebook_set-TOTAL_FORMS": "1", # The number of forms with initial data. "alternatebook_set-INITIAL_FORMS": "0", # The max number of forms. "alternatebook_set-MAX_NUM_FORMS": "", "alternatebook_set-0-title": "Flowers of Evil", "alternatebook_set-0-notes": "English translation of Les Fleurs du Mal", } formset = AuthorBooksFormSet3(data, instance=author) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (book1,) = saved self.assertEqual(book1.title, "Flowers of Evil") self.assertEqual(book1.notes, "English translation of Les Fleurs du Mal") @skipUnlessDBFeature("supports_partially_nullable_unique_constraints") def test_inline_formsets_with_nullable_unique_together(self): # Test inline formsets where the inline-edited object has a # unique_together constraint with a nullable member AuthorBooksFormSet4 = inlineformset_factory( Author, BookWithOptionalAltEditor, can_delete=False, extra=2, fields="__all__", ) author = Author.objects.create(pk=1, name="Charles Baudelaire") data = { # The number of forms rendered. "bookwithoptionalalteditor_set-TOTAL_FORMS": "2", # The number of forms with initial data. "bookwithoptionalalteditor_set-INITIAL_FORMS": "0", # The max number of forms. "bookwithoptionalalteditor_set-MAX_NUM_FORMS": "", "bookwithoptionalalteditor_set-0-author": "1", "bookwithoptionalalteditor_set-0-title": "Les Fleurs du Mal", "bookwithoptionalalteditor_set-1-author": "1", "bookwithoptionalalteditor_set-1-title": "Les Fleurs du Mal", } formset = AuthorBooksFormSet4(data, instance=author) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 2) book1, book2 = saved self.assertEqual(book1.author_id, 1) self.assertEqual(book1.title, "Les Fleurs du Mal") self.assertEqual(book2.author_id, 1) self.assertEqual(book2.title, "Les Fleurs du Mal") def test_inline_formsets_with_custom_save_method(self): AuthorBooksFormSet = inlineformset_factory( Author, Book, can_delete=False, extra=2, fields="__all__" ) author = Author.objects.create(pk=1, name="Charles Baudelaire") book1 = Book.objects.create( pk=1, author=author, title="Les Paradis Artificiels" ) book2 = Book.objects.create(pk=2, author=author, title="Les Fleurs du Mal") book3 = Book.objects.create(pk=3, author=author, title="Flowers of Evil") class PoemForm(forms.ModelForm): def save(self, commit=True): # change the name to "Brooklyn Bridge" just to be a jerk. poem = super().save(commit=False) poem.name = "Brooklyn Bridge" if commit: poem.save() return poem PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm, fields="__all__") data = { "poem_set-TOTAL_FORMS": "3", # the number of forms rendered "poem_set-INITIAL_FORMS": "0", # the number of forms with initial data "poem_set-MAX_NUM_FORMS": "", # the max number of forms "poem_set-0-name": "The Cloud in Trousers", "poem_set-1-name": "I", "poem_set-2-name": "", } poet = Poet.objects.create(name="Vladimir Mayakovsky") formset = PoemFormSet(data=data, instance=poet) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 2) poem1, poem2 = saved self.assertEqual(poem1.name, "Brooklyn Bridge") self.assertEqual(poem2.name, "Brooklyn Bridge") # We can provide a custom queryset to our InlineFormSet: custom_qs = Book.objects.order_by("-title") formset = AuthorBooksFormSet(instance=author, queryset=custom_qs) self.assertEqual(len(formset.forms), 5) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_book_set-0-title">Title:</label>' '<input id="id_book_set-0-title" type="text" name="book_set-0-title" ' 'value="Les Paradis Artificiels" maxlength="100">' '<input type="hidden" name="book_set-0-author" value="1" ' 'id="id_book_set-0-author">' '<input type="hidden" name="book_set-0-id" value="1" id="id_book_set-0-id">' "</p>", ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_book_set-1-title">Title:</label>' '<input id="id_book_set-1-title" type="text" name="book_set-1-title" ' 'value="Les Fleurs du Mal" maxlength="100">' '<input type="hidden" name="book_set-1-author" value="1" ' 'id="id_book_set-1-author">' '<input type="hidden" name="book_set-1-id" value="2" id="id_book_set-1-id">' "</p>", ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_book_set-2-title">Title:</label>' '<input id="id_book_set-2-title" type="text" name="book_set-2-title" ' 'value="Flowers of Evil" maxlength="100">' '<input type="hidden" name="book_set-2-author" value="1" ' 'id="id_book_set-2-author">' '<input type="hidden" name="book_set-2-id" value="3" ' 'id="id_book_set-2-id"></p>', ) self.assertHTMLEqual( formset.forms[3].as_p(), '<p><label for="id_book_set-3-title">Title:</label>' '<input id="id_book_set-3-title" type="text" name="book_set-3-title" ' 'maxlength="100">' '<input type="hidden" name="book_set-3-author" value="1" ' 'id="id_book_set-3-author">' '<input type="hidden" name="book_set-3-id" id="id_book_set-3-id"></p>', ) self.assertHTMLEqual( formset.forms[4].as_p(), '<p><label for="id_book_set-4-title">Title:</label>' '<input id="id_book_set-4-title" type="text" name="book_set-4-title" ' 'maxlength="100">' '<input type="hidden" name="book_set-4-author" value="1" ' 'id="id_book_set-4-author">' '<input type="hidden" name="book_set-4-id" id="id_book_set-4-id"></p>', ) data = { "book_set-TOTAL_FORMS": "5", # the number of forms rendered "book_set-INITIAL_FORMS": "3", # the number of forms with initial data "book_set-MAX_NUM_FORMS": "", # the max number of forms "book_set-0-id": str(book1.id), "book_set-0-title": "Les Paradis Artificiels", "book_set-1-id": str(book2.id), "book_set-1-title": "Les Fleurs du Mal", "book_set-2-id": str(book3.id), "book_set-2-title": "Flowers of Evil", "book_set-3-title": "Revue des deux mondes", "book_set-4-title": "", } formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs) self.assertTrue(formset.is_valid()) custom_qs = Book.objects.filter(title__startswith="F") formset = AuthorBooksFormSet(instance=author, queryset=custom_qs) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_book_set-0-title">Title:</label>' '<input id="id_book_set-0-title" type="text" name="book_set-0-title" ' 'value="Flowers of Evil" maxlength="100">' '<input type="hidden" name="book_set-0-author" value="1" ' 'id="id_book_set-0-author">' '<input type="hidden" name="book_set-0-id" value="3" ' 'id="id_book_set-0-id"></p>', ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_book_set-1-title">Title:</label>' '<input id="id_book_set-1-title" type="text" name="book_set-1-title" ' 'maxlength="100">' '<input type="hidden" name="book_set-1-author" value="1" ' 'id="id_book_set-1-author">' '<input type="hidden" name="book_set-1-id" id="id_book_set-1-id"></p>', ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_book_set-2-title">Title:</label>' '<input id="id_book_set-2-title" type="text" name="book_set-2-title" ' 'maxlength="100">' '<input type="hidden" name="book_set-2-author" value="1" ' 'id="id_book_set-2-author">' '<input type="hidden" name="book_set-2-id" id="id_book_set-2-id"></p>', ) data = { "book_set-TOTAL_FORMS": "3", # the number of forms rendered "book_set-INITIAL_FORMS": "1", # the number of forms with initial data "book_set-MAX_NUM_FORMS": "", # the max number of forms "book_set-0-id": str(book3.id), "book_set-0-title": "Flowers of Evil", "book_set-1-title": "Revue des deux mondes", "book_set-2-title": "", } formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs) self.assertTrue(formset.is_valid()) def test_inline_formsets_with_custom_save_method_related_instance(self): """ The ModelForm.save() method should be able to access the related object if it exists in the database (#24395). """ class PoemForm2(forms.ModelForm): def save(self, commit=True): poem = super().save(commit=False) poem.name = "%s by %s" % (poem.name, poem.poet.name) if commit: poem.save() return poem PoemFormSet = inlineformset_factory( Poet, Poem, form=PoemForm2, fields="__all__" ) data = { "poem_set-TOTAL_FORMS": "1", "poem_set-INITIAL_FORMS": "0", "poem_set-MAX_NUM_FORMS": "", "poem_set-0-name": "Le Lac", } poet = Poet() formset = PoemFormSet(data=data, instance=poet) self.assertTrue(formset.is_valid()) # The Poet instance is saved after the formset instantiation. This # happens in admin's changeform_view() when adding a new object and # some inlines in the same request. poet.name = "Lamartine" poet.save() poem = formset.save()[0] self.assertEqual(poem.name, "Le Lac by Lamartine") def test_inline_formsets_with_wrong_fk_name(self): """Regression for #23451""" message = "fk_name 'title' is not a ForeignKey to 'model_formsets.Author'." with self.assertRaisesMessage(ValueError, message): inlineformset_factory(Author, Book, fields="__all__", fk_name="title") def test_custom_pk(self): # We need to ensure that it is displayed CustomPrimaryKeyFormSet = modelformset_factory( CustomPrimaryKey, fields="__all__" ) formset = CustomPrimaryKeyFormSet() self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-my_pk">My pk:</label>' '<input id="id_form-0-my_pk" type="text" name="form-0-my_pk" ' 'maxlength="10"></p>' '<p><label for="id_form-0-some_field">Some field:</label>' '<input id="id_form-0-some_field" type="text" name="form-0-some_field" ' 'maxlength="100"></p>', ) # Custom primary keys with ForeignKey, OneToOneField and AutoField ############ place = Place.objects.create(pk=1, name="Giordanos", city="Chicago") FormSet = inlineformset_factory( Place, Owner, extra=2, can_delete=False, fields="__all__" ) formset = FormSet(instance=place) self.assertEqual(len(formset.forms), 2) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_owner_set-0-name">Name:</label>' '<input id="id_owner_set-0-name" type="text" name="owner_set-0-name" ' 'maxlength="100">' '<input type="hidden" name="owner_set-0-place" value="1" ' 'id="id_owner_set-0-place">' '<input type="hidden" name="owner_set-0-auto_id" ' 'id="id_owner_set-0-auto_id"></p>', ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_owner_set-1-name">Name:</label>' '<input id="id_owner_set-1-name" type="text" name="owner_set-1-name" ' 'maxlength="100">' '<input type="hidden" name="owner_set-1-place" value="1" ' 'id="id_owner_set-1-place">' '<input type="hidden" name="owner_set-1-auto_id" ' 'id="id_owner_set-1-auto_id"></p>', ) data = { "owner_set-TOTAL_FORMS": "2", "owner_set-INITIAL_FORMS": "0", "owner_set-MAX_NUM_FORMS": "", "owner_set-0-auto_id": "", "owner_set-0-name": "Joe Perry", "owner_set-1-auto_id": "", "owner_set-1-name": "", } formset = FormSet(data, instance=place) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (owner1,) = saved self.assertEqual(owner1.name, "Joe Perry") self.assertEqual(owner1.place.name, "Giordanos") formset = FormSet(instance=place) self.assertEqual(len(formset.forms), 3) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_owner_set-0-name">Name:</label>' '<input id="id_owner_set-0-name" type="text" name="owner_set-0-name" ' 'value="Joe Perry" maxlength="100">' '<input type="hidden" name="owner_set-0-place" value="1" ' 'id="id_owner_set-0-place">' '<input type="hidden" name="owner_set-0-auto_id" value="%d" ' 'id="id_owner_set-0-auto_id"></p>' % owner1.auto_id, ) self.assertHTMLEqual( formset.forms[1].as_p(), '<p><label for="id_owner_set-1-name">Name:</label>' '<input id="id_owner_set-1-name" type="text" name="owner_set-1-name" ' 'maxlength="100">' '<input type="hidden" name="owner_set-1-place" value="1" ' 'id="id_owner_set-1-place">' '<input type="hidden" name="owner_set-1-auto_id" ' 'id="id_owner_set-1-auto_id"></p>', ) self.assertHTMLEqual( formset.forms[2].as_p(), '<p><label for="id_owner_set-2-name">Name:</label>' '<input id="id_owner_set-2-name" type="text" name="owner_set-2-name" ' 'maxlength="100">' '<input type="hidden" name="owner_set-2-place" value="1" ' 'id="id_owner_set-2-place">' '<input type="hidden" name="owner_set-2-auto_id" ' 'id="id_owner_set-2-auto_id"></p>', ) data = { "owner_set-TOTAL_FORMS": "3", "owner_set-INITIAL_FORMS": "1", "owner_set-MAX_NUM_FORMS": "", "owner_set-0-auto_id": str(owner1.auto_id), "owner_set-0-name": "Joe Perry", "owner_set-1-auto_id": "", "owner_set-1-name": "Jack Berry", "owner_set-2-auto_id": "", "owner_set-2-name": "", } formset = FormSet(data, instance=place) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (owner2,) = saved self.assertEqual(owner2.name, "Jack Berry") self.assertEqual(owner2.place.name, "Giordanos") # A custom primary key that is a ForeignKey or OneToOneField get # rendered for the user to choose. FormSet = modelformset_factory(OwnerProfile, fields="__all__") formset = FormSet() self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_form-0-owner">Owner:</label>' '<select name="form-0-owner" id="id_form-0-owner">' '<option value="" selected>---------</option>' '<option value="%d">Joe Perry at Giordanos</option>' '<option value="%d">Jack Berry at Giordanos</option>' "</select></p>" '<p><label for="id_form-0-age">Age:</label>' '<input type="number" name="form-0-age" id="id_form-0-age" min="0"></p>' % (owner1.auto_id, owner2.auto_id), ) owner1 = Owner.objects.get(name="Joe Perry") FormSet = inlineformset_factory( Owner, OwnerProfile, max_num=1, can_delete=False, fields="__all__" ) self.assertEqual(FormSet.max_num, 1) formset = FormSet(instance=owner1) self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_ownerprofile-0-age">Age:</label>' '<input type="number" name="ownerprofile-0-age" ' 'id="id_ownerprofile-0-age" min="0">' '<input type="hidden" name="ownerprofile-0-owner" value="%d" ' 'id="id_ownerprofile-0-owner"></p>' % owner1.auto_id, ) data = { "ownerprofile-TOTAL_FORMS": "1", "ownerprofile-INITIAL_FORMS": "0", "ownerprofile-MAX_NUM_FORMS": "1", "ownerprofile-0-owner": "", "ownerprofile-0-age": "54", } formset = FormSet(data, instance=owner1) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (profile1,) = saved self.assertEqual(profile1.owner, owner1) self.assertEqual(profile1.age, 54) formset = FormSet(instance=owner1) self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_ownerprofile-0-age">Age:</label>' '<input type="number" name="ownerprofile-0-age" value="54" ' 'id="id_ownerprofile-0-age" min="0">' '<input type="hidden" name="ownerprofile-0-owner" value="%d" ' 'id="id_ownerprofile-0-owner"></p>' % owner1.auto_id, ) data = { "ownerprofile-TOTAL_FORMS": "1", "ownerprofile-INITIAL_FORMS": "1", "ownerprofile-MAX_NUM_FORMS": "1", "ownerprofile-0-owner": str(owner1.auto_id), "ownerprofile-0-age": "55", } formset = FormSet(data, instance=owner1) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (profile1,) = saved self.assertEqual(profile1.owner, owner1) self.assertEqual(profile1.age, 55) def test_unique_true_enforces_max_num_one(self): # ForeignKey with unique=True should enforce max_num=1 place = Place.objects.create(pk=1, name="Giordanos", city="Chicago") FormSet = inlineformset_factory( Place, Location, can_delete=False, fields="__all__" ) self.assertEqual(FormSet.max_num, 1) formset = FormSet(instance=place) self.assertEqual(len(formset.forms), 1) self.assertHTMLEqual( formset.forms[0].as_p(), '<p><label for="id_location_set-0-lat">Lat:</label>' '<input id="id_location_set-0-lat" type="text" name="location_set-0-lat" ' 'maxlength="100"></p>' '<p><label for="id_location_set-0-lon">Lon:</label>' '<input id="id_location_set-0-lon" type="text" name="location_set-0-lon" ' 'maxlength="100">' '<input type="hidden" name="location_set-0-place" value="1" ' 'id="id_location_set-0-place">' '<input type="hidden" name="location_set-0-id" ' 'id="id_location_set-0-id"></p>', ) def test_foreign_keys_in_parents(self): self.assertEqual(type(_get_foreign_key(Restaurant, Owner)), models.ForeignKey) self.assertEqual( type(_get_foreign_key(MexicanRestaurant, Owner)), models.ForeignKey ) def test_unique_validation(self): FormSet = modelformset_factory(Product, fields="__all__", extra=1) data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "", "form-0-slug": "car-red", } formset = FormSet(data) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (product1,) = saved self.assertEqual(product1.slug, "car-red") data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "", "form-0-slug": "car-red", } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual( formset.errors, [{"slug": ["Product with this Slug already exists."]}] ) def test_modelformset_validate_max_flag(self): # If validate_max is set and max_num is less than TOTAL_FORMS in the # data, then throw an exception. MAX_NUM_FORMS in the data is # irrelevant here (it's output as a hint for the client but its # value in the returned data is not checked) data = { "form-TOTAL_FORMS": "2", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "2", # should be ignored "form-0-price": "12.00", "form-0-quantity": "1", "form-1-price": "24.00", "form-1-quantity": "2", } FormSet = modelformset_factory( Price, fields="__all__", extra=1, max_num=1, validate_max=True ) formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual(formset.non_form_errors(), ["Please submit at most 1 form."]) # Now test the same thing without the validate_max flag to ensure # default behavior is unchanged FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1) formset = FormSet(data) self.assertTrue(formset.is_valid()) def test_modelformset_min_num_equals_max_num_less_than(self): data = { "form-TOTAL_FORMS": "3", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "2", "form-0-slug": "car-red", "form-1-slug": "car-blue", "form-2-slug": "car-black", } FormSet = modelformset_factory( Product, fields="__all__", extra=1, max_num=2, validate_max=True, min_num=2, validate_min=True, ) formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual(formset.non_form_errors(), ["Please submit at most 2 forms."]) def test_modelformset_min_num_equals_max_num_more_than(self): data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "2", "form-0-slug": "car-red", } FormSet = modelformset_factory( Product, fields="__all__", extra=1, max_num=2, validate_max=True, min_num=2, validate_min=True, ) formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual(formset.non_form_errors(), ["Please submit at least 2 forms."]) def test_unique_together_validation(self): FormSet = modelformset_factory(Price, fields="__all__", extra=1) data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "", "form-0-price": "12.00", "form-0-quantity": "1", } formset = FormSet(data) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (price1,) = saved self.assertEqual(price1.price, Decimal("12.00")) self.assertEqual(price1.quantity, 1) data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "", "form-0-price": "12.00", "form-0-quantity": "1", } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual( formset.errors, [{"__all__": ["Price with this Price and Quantity already exists."]}], ) def test_unique_together_with_inlineformset_factory(self): # Also see bug #8882. repository = Repository.objects.create(name="Test Repo") FormSet = inlineformset_factory(Repository, Revision, extra=1, fields="__all__") data = { "revision_set-TOTAL_FORMS": "1", "revision_set-INITIAL_FORMS": "0", "revision_set-MAX_NUM_FORMS": "", "revision_set-0-repository": repository.pk, "revision_set-0-revision": "146239817507f148d448db38840db7c3cbf47c76", "revision_set-0-DELETE": "", } formset = FormSet(data, instance=repository) self.assertTrue(formset.is_valid()) saved = formset.save() self.assertEqual(len(saved), 1) (revision1,) = saved self.assertEqual(revision1.repository, repository) self.assertEqual(revision1.revision, "146239817507f148d448db38840db7c3cbf47c76") # attempt to save the same revision against the same repo. data = { "revision_set-TOTAL_FORMS": "1", "revision_set-INITIAL_FORMS": "0", "revision_set-MAX_NUM_FORMS": "", "revision_set-0-repository": repository.pk, "revision_set-0-revision": "146239817507f148d448db38840db7c3cbf47c76", "revision_set-0-DELETE": "", } formset = FormSet(data, instance=repository) self.assertFalse(formset.is_valid()) self.assertEqual( formset.errors, [ { "__all__": [ "Revision with this Repository and Revision already exists." ] } ], ) # unique_together with inlineformset_factory with overridden form fields # Also see #9494 FormSet = inlineformset_factory( Repository, Revision, fields=("revision",), extra=1 ) data = { "revision_set-TOTAL_FORMS": "1", "revision_set-INITIAL_FORMS": "0", "revision_set-MAX_NUM_FORMS": "", "revision_set-0-repository": repository.pk, "revision_set-0-revision": "146239817507f148d448db38840db7c3cbf47c76", "revision_set-0-DELETE": "", } formset = FormSet(data, instance=repository) self.assertFalse(formset.is_valid()) def test_callable_defaults(self): # Use of callable defaults (see bug #7975). person = Person.objects.create(name="Ringo") FormSet = inlineformset_factory( Person, Membership, can_delete=False, extra=1, fields="__all__" ) formset = FormSet(instance=person) # Django will render a hidden field for model fields that have a callable # default. This is required to ensure the value is tested for change correctly # when determine what extra forms have changed to save. self.assertEqual(len(formset.forms), 1) # this formset only has one form form = formset.forms[0] now = form.fields["date_joined"].initial() result = form.as_p() result = re.sub( r"[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}(?:\.[0-9]+)?", "__DATETIME__", result, ) self.assertHTMLEqual( result, '<p><label for="id_membership_set-0-date_joined">Date joined:</label>' '<input type="text" name="membership_set-0-date_joined" ' 'value="__DATETIME__" id="id_membership_set-0-date_joined">' '<input type="hidden" name="initial-membership_set-0-date_joined" ' 'value="__DATETIME__" ' 'id="initial-membership_set-0-id_membership_set-0-date_joined"></p>' '<p><label for="id_membership_set-0-karma">Karma:</label>' '<input type="number" name="membership_set-0-karma" ' 'id="id_membership_set-0-karma">' '<input type="hidden" name="membership_set-0-person" value="%d" ' 'id="id_membership_set-0-person">' '<input type="hidden" name="membership_set-0-id" ' 'id="id_membership_set-0-id"></p>' % person.id, ) # test for validation with callable defaults. Validations rely on hidden fields data = { "membership_set-TOTAL_FORMS": "1", "membership_set-INITIAL_FORMS": "0", "membership_set-MAX_NUM_FORMS": "", "membership_set-0-date_joined": now.strftime("%Y-%m-%d %H:%M:%S"), "initial-membership_set-0-date_joined": now.strftime("%Y-%m-%d %H:%M:%S"), "membership_set-0-karma": "", } formset = FormSet(data, instance=person) self.assertTrue(formset.is_valid()) # now test for when the data changes one_day_later = now + datetime.timedelta(days=1) filled_data = { "membership_set-TOTAL_FORMS": "1", "membership_set-INITIAL_FORMS": "0", "membership_set-MAX_NUM_FORMS": "", "membership_set-0-date_joined": one_day_later.strftime("%Y-%m-%d %H:%M:%S"), "initial-membership_set-0-date_joined": now.strftime("%Y-%m-%d %H:%M:%S"), "membership_set-0-karma": "", } formset = FormSet(filled_data, instance=person) self.assertFalse(formset.is_valid()) # now test with split datetime fields class MembershipForm(forms.ModelForm): date_joined = forms.SplitDateTimeField(initial=now) class Meta: model = Membership fields = "__all__" def __init__(self, **kwargs): super().__init__(**kwargs) self.fields["date_joined"].widget = forms.SplitDateTimeWidget() FormSet = inlineformset_factory( Person, Membership, form=MembershipForm, can_delete=False, extra=1, fields="__all__", ) data = { "membership_set-TOTAL_FORMS": "1", "membership_set-INITIAL_FORMS": "0", "membership_set-MAX_NUM_FORMS": "", "membership_set-0-date_joined_0": now.strftime("%Y-%m-%d"), "membership_set-0-date_joined_1": now.strftime("%H:%M:%S"), "initial-membership_set-0-date_joined": now.strftime("%Y-%m-%d %H:%M:%S"), "membership_set-0-karma": "", } formset = FormSet(data, instance=person) self.assertTrue(formset.is_valid()) def test_inlineformset_factory_with_null_fk(self): # inlineformset_factory tests with fk having null=True. see #9462. # create some data that will exhibit the issue team = Team.objects.create(name="Red Vipers") Player(name="Timmy").save() Player(name="Bobby", team=team).save() PlayerInlineFormSet = inlineformset_factory(Team, Player, fields="__all__") formset = PlayerInlineFormSet() self.assertQuerysetEqual(formset.get_queryset(), []) formset = PlayerInlineFormSet(instance=team) players = formset.get_queryset() self.assertEqual(len(players), 1) (player1,) = players self.assertEqual(player1.team, team) self.assertEqual(player1.name, "Bobby") def test_inlineformset_with_arrayfield(self): class SimpleArrayField(forms.CharField): """A proxy for django.contrib.postgres.forms.SimpleArrayField.""" def to_python(self, value): value = super().to_python(value) return value.split(",") if value else [] class BookForm(forms.ModelForm): title = SimpleArrayField() class Meta: model = Book fields = ("title",) BookFormSet = inlineformset_factory(Author, Book, form=BookForm) data = { "book_set-TOTAL_FORMS": "3", "book_set-INITIAL_FORMS": "0", "book_set-MAX_NUM_FORMS": "", "book_set-0-title": "test1,test2", "book_set-1-title": "test1,test2", "book_set-2-title": "test3,test4", } author = Author.objects.create(name="test") formset = BookFormSet(data, instance=author) self.assertEqual( formset.errors, [{}, {"__all__": ["Please correct the duplicate values below."]}, {}], ) def test_model_formset_with_custom_pk(self): # a formset for a Model that has a custom primary key that still needs to be # added to the formset automatically FormSet = modelformset_factory( ClassyMexicanRestaurant, fields=["tacos_are_yummy"] ) self.assertEqual( sorted(FormSet().forms[0].fields), ["tacos_are_yummy", "the_restaurant"] ) def test_model_formset_with_initial_model_instance(self): # has_changed should compare model instance and primary key # see #18898 FormSet = modelformset_factory(Poem, fields="__all__") john_milton = Poet(name="John Milton") john_milton.save() data = { "form-TOTAL_FORMS": 1, "form-INITIAL_FORMS": 0, "form-MAX_NUM_FORMS": "", "form-0-name": "", "form-0-poet": str(john_milton.id), } formset = FormSet(initial=[{"poet": john_milton}], data=data) self.assertFalse(formset.extra_forms[0].has_changed()) def test_model_formset_with_initial_queryset(self): # has_changed should work with queryset and list of pk's # see #18898 FormSet = modelformset_factory(AuthorMeeting, fields="__all__") Author.objects.create(pk=1, name="Charles Baudelaire") data = { "form-TOTAL_FORMS": 1, "form-INITIAL_FORMS": 0, "form-MAX_NUM_FORMS": "", "form-0-name": "", "form-0-created": "", "form-0-authors": list(Author.objects.values_list("id", flat=True)), } formset = FormSet(initial=[{"authors": Author.objects.all()}], data=data) self.assertFalse(formset.extra_forms[0].has_changed()) def test_prevent_duplicates_from_with_the_same_formset(self): FormSet = modelformset_factory(Product, fields="__all__", extra=2) data = { "form-TOTAL_FORMS": 2, "form-INITIAL_FORMS": 0, "form-MAX_NUM_FORMS": "", "form-0-slug": "red_car", "form-1-slug": "red_car", } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual( formset._non_form_errors, ["Please correct the duplicate data for slug."] ) FormSet = modelformset_factory(Price, fields="__all__", extra=2) data = { "form-TOTAL_FORMS": 2, "form-INITIAL_FORMS": 0, "form-MAX_NUM_FORMS": "", "form-0-price": "25", "form-0-quantity": "7", "form-1-price": "25", "form-1-quantity": "7", } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual( formset._non_form_errors, [ "Please correct the duplicate data for price and quantity, which must " "be unique." ], ) # Only the price field is specified, this should skip any unique # checks since the unique_together is not fulfilled. This will fail # with a KeyError if broken. FormSet = modelformset_factory(Price, fields=("price",), extra=2) data = { "form-TOTAL_FORMS": "2", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "", "form-0-price": "24", "form-1-price": "24", } formset = FormSet(data) self.assertTrue(formset.is_valid()) FormSet = inlineformset_factory(Author, Book, extra=0, fields="__all__") author = Author.objects.create(pk=1, name="Charles Baudelaire") Book.objects.create(pk=1, author=author, title="Les Paradis Artificiels") Book.objects.create(pk=2, author=author, title="Les Fleurs du Mal") Book.objects.create(pk=3, author=author, title="Flowers of Evil") book_ids = author.book_set.order_by("id").values_list("id", flat=True) data = { "book_set-TOTAL_FORMS": "2", "book_set-INITIAL_FORMS": "2", "book_set-MAX_NUM_FORMS": "", "book_set-0-title": "The 2008 Election", "book_set-0-author": str(author.id), "book_set-0-id": str(book_ids[0]), "book_set-1-title": "The 2008 Election", "book_set-1-author": str(author.id), "book_set-1-id": str(book_ids[1]), } formset = FormSet(data=data, instance=author) self.assertFalse(formset.is_valid()) self.assertEqual( formset._non_form_errors, ["Please correct the duplicate data for title."] ) self.assertEqual( formset.errors, [{}, {"__all__": ["Please correct the duplicate values below."]}], ) FormSet = modelformset_factory(Post, fields="__all__", extra=2) data = { "form-TOTAL_FORMS": "2", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "", "form-0-title": "blah", "form-0-slug": "Morning", "form-0-subtitle": "foo", "form-0-posted": "2009-01-01", "form-1-title": "blah", "form-1-slug": "Morning in Prague", "form-1-subtitle": "rawr", "form-1-posted": "2009-01-01", } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual( formset._non_form_errors, [ "Please correct the duplicate data for title which must be unique for " "the date in posted." ], ) self.assertEqual( formset.errors, [{}, {"__all__": ["Please correct the duplicate values below."]}], ) data = { "form-TOTAL_FORMS": "2", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "", "form-0-title": "foo", "form-0-slug": "Morning in Prague", "form-0-subtitle": "foo", "form-0-posted": "2009-01-01", "form-1-title": "blah", "form-1-slug": "Morning in Prague", "form-1-subtitle": "rawr", "form-1-posted": "2009-08-02", } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual( formset._non_form_errors, [ "Please correct the duplicate data for slug which must be unique for " "the year in posted." ], ) data = { "form-TOTAL_FORMS": "2", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "", "form-0-title": "foo", "form-0-slug": "Morning in Prague", "form-0-subtitle": "rawr", "form-0-posted": "2008-08-01", "form-1-title": "blah", "form-1-slug": "Prague", "form-1-subtitle": "rawr", "form-1-posted": "2009-08-02", } formset = FormSet(data) self.assertFalse(formset.is_valid()) self.assertEqual( formset._non_form_errors, [ "Please correct the duplicate data for subtitle which must be unique " "for the month in posted." ], ) def test_prevent_change_outer_model_and_create_invalid_data(self): author = Author.objects.create(name="Charles") other_author = Author.objects.create(name="Walt") AuthorFormSet = modelformset_factory(Author, fields="__all__") data = { "form-TOTAL_FORMS": "2", "form-INITIAL_FORMS": "2", "form-MAX_NUM_FORMS": "", "form-0-id": str(author.id), "form-0-name": "Charles", "form-1-id": str(other_author.id), # A model not in the formset's queryset. "form-1-name": "Changed name", } # This formset is only for Walt Whitman and shouldn't accept data for # other_author. formset = AuthorFormSet( data=data, queryset=Author.objects.filter(id__in=(author.id,)) ) self.assertTrue(formset.is_valid()) formset.save() # The name of other_author shouldn't be changed and new models aren't # created. self.assertSequenceEqual(Author.objects.all(), [author, other_author]) def test_validation_without_id(self): AuthorFormSet = modelformset_factory(Author, fields="__all__") data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "1", "form-MAX_NUM_FORMS": "", "form-0-name": "Charles", } formset = AuthorFormSet(data) self.assertEqual( formset.errors, [{"id": ["This field is required."]}], ) def test_validation_with_child_model_without_id(self): BetterAuthorFormSet = modelformset_factory(BetterAuthor, fields="__all__") data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "1", "form-MAX_NUM_FORMS": "", "form-0-name": "Charles", "form-0-write_speed": "10", } formset = BetterAuthorFormSet(data) self.assertEqual( formset.errors, [{"author_ptr": ["This field is required."]}], ) def test_validation_with_invalid_id(self): AuthorFormSet = modelformset_factory(Author, fields="__all__") data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "1", "form-MAX_NUM_FORMS": "", "form-0-id": "abc", "form-0-name": "Charles", } formset = AuthorFormSet(data) self.assertEqual( formset.errors, [ { "id": [ "Select a valid choice. That choice is not one of the " "available choices." ] } ], ) def test_validation_with_nonexistent_id(self): AuthorFormSet = modelformset_factory(Author, fields="__all__") data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "1", "form-MAX_NUM_FORMS": "", "form-0-id": "12345", "form-0-name": "Charles", } formset = AuthorFormSet(data) self.assertEqual( formset.errors, [ { "id": [ "Select a valid choice. That choice is not one of the " "available choices." ] } ], ) def test_initial_form_count_empty_data(self): AuthorFormSet = modelformset_factory(Author, fields="__all__") formset = AuthorFormSet({}) self.assertEqual(formset.initial_form_count(), 0) def test_edit_only(self): charles = Author.objects.create(name="Charles Baudelaire") AuthorFormSet = modelformset_factory(Author, fields="__all__", edit_only=True) data = { "form-TOTAL_FORMS": "2", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "0", "form-0-name": "Arthur Rimbaud", "form-1-name": "Walt Whitman", } formset = AuthorFormSet(data) self.assertIs(formset.is_valid(), True) formset.save() self.assertSequenceEqual(Author.objects.all(), [charles]) data = { "form-TOTAL_FORMS": "2", "form-INITIAL_FORMS": "1", "form-MAX_NUM_FORMS": "0", "form-0-id": charles.pk, "form-0-name": "Arthur Rimbaud", "form-1-name": "Walt Whitman", } formset = AuthorFormSet(data) self.assertIs(formset.is_valid(), True) formset.save() charles.refresh_from_db() self.assertEqual(charles.name, "Arthur Rimbaud") self.assertSequenceEqual(Author.objects.all(), [charles]) def test_edit_only_inlineformset_factory(self): charles = Author.objects.create(name="Charles Baudelaire") book = Book.objects.create(author=charles, title="Les Paradis Artificiels") AuthorFormSet = inlineformset_factory( Author, Book, can_delete=False, fields="__all__", edit_only=True, ) data = { "book_set-TOTAL_FORMS": "4", "book_set-INITIAL_FORMS": "1", "book_set-MAX_NUM_FORMS": "0", "book_set-0-id": book.pk, "book_set-0-title": "Les Fleurs du Mal", "book_set-0-author": charles.pk, "book_set-1-title": "Flowers of Evil", "book_set-1-author": charles.pk, } formset = AuthorFormSet(data, instance=charles) self.assertIs(formset.is_valid(), True) formset.save() book.refresh_from_db() self.assertEqual(book.title, "Les Fleurs du Mal") self.assertSequenceEqual(Book.objects.all(), [book]) def test_edit_only_object_outside_of_queryset(self): charles = Author.objects.create(name="Charles Baudelaire") walt = Author.objects.create(name="Walt Whitman") data = { "form-TOTAL_FORMS": "1", "form-INITIAL_FORMS": "1", "form-0-id": walt.pk, "form-0-name": "Parth Patil", } AuthorFormSet = modelformset_factory(Author, fields="__all__", edit_only=True) formset = AuthorFormSet(data, queryset=Author.objects.filter(pk=charles.pk)) self.assertIs(formset.is_valid(), True) formset.save() self.assertCountEqual(Author.objects.all(), [charles, walt]) def test_edit_only_formset_factory_with_basemodelformset(self): charles = Author.objects.create(name="Charles Baudelaire") class AuthorForm(forms.ModelForm): class Meta: model = Author fields = "__all__" class BaseAuthorFormSet(BaseModelFormSet): def __init__(self, *args, **kwargs): self.model = Author super().__init__(*args, **kwargs) AuthorFormSet = formset_factory(AuthorForm, formset=BaseAuthorFormSet) data = { "form-TOTAL_FORMS": "2", "form-INITIAL_FORMS": "1", "form-MAX_NUM_FORMS": "0", "form-0-id": charles.pk, "form-0-name": "Shawn Dong", "form-1-name": "Walt Whitman", } formset = AuthorFormSet(data) self.assertIs(formset.is_valid(), True) formset.save() self.assertEqual(Author.objects.count(), 2) charles.refresh_from_db() self.assertEqual(charles.name, "Shawn Dong") self.assertEqual(Author.objects.count(), 2) class TestModelFormsetOverridesTroughFormMeta(TestCase): def test_modelformset_factory_widgets(self): widgets = {"name": forms.TextInput(attrs={"class": "poet"})} PoetFormSet = modelformset_factory(Poet, fields="__all__", widgets=widgets) form = PoetFormSet.form() self.assertHTMLEqual( str(form["name"]), '<input id="id_name" maxlength="100" type="text" class="poet" name="name" ' "required>", ) def test_inlineformset_factory_widgets(self): widgets = {"title": forms.TextInput(attrs={"class": "book"})} BookFormSet = inlineformset_factory( Author, Book, widgets=widgets, fields="__all__" ) form = BookFormSet.form() self.assertHTMLEqual( str(form["title"]), '<input class="book" id="id_title" maxlength="100" name="title" ' 'type="text" required>', ) def test_modelformset_factory_labels_overrides(self): BookFormSet = modelformset_factory( Book, fields="__all__", labels={"title": "Name"} ) form = BookFormSet.form() self.assertHTMLEqual( form["title"].label_tag(), '<label for="id_title">Name:</label>' ) self.assertHTMLEqual( form["title"].legend_tag(), '<legend for="id_title">Name:</legend>', ) def test_inlineformset_factory_labels_overrides(self): BookFormSet = inlineformset_factory( Author, Book, fields="__all__", labels={"title": "Name"} ) form = BookFormSet.form() self.assertHTMLEqual( form["title"].label_tag(), '<label for="id_title">Name:</label>' ) self.assertHTMLEqual( form["title"].legend_tag(), '<legend for="id_title">Name:</legend>', ) def test_modelformset_factory_help_text_overrides(self): BookFormSet = modelformset_factory( Book, fields="__all__", help_texts={"title": "Choose carefully."} ) form = BookFormSet.form() self.assertEqual(form["title"].help_text, "Choose carefully.") def test_inlineformset_factory_help_text_overrides(self): BookFormSet = inlineformset_factory( Author, Book, fields="__all__", help_texts={"title": "Choose carefully."} ) form = BookFormSet.form() self.assertEqual(form["title"].help_text, "Choose carefully.") def test_modelformset_factory_error_messages_overrides(self): author = Author.objects.create(pk=1, name="Charles Baudelaire") BookFormSet = modelformset_factory( Book, fields="__all__", error_messages={"title": {"max_length": "Title too long!!"}}, ) form = BookFormSet.form(data={"title": "Foo " * 30, "author": author.id}) form.full_clean() self.assertEqual(form.errors, {"title": ["Title too long!!"]}) def test_inlineformset_factory_error_messages_overrides(self): author = Author.objects.create(pk=1, name="Charles Baudelaire") BookFormSet = inlineformset_factory( Author, Book, fields="__all__", error_messages={"title": {"max_length": "Title too long!!"}}, ) form = BookFormSet.form(data={"title": "Foo " * 30, "author": author.id}) form.full_clean() self.assertEqual(form.errors, {"title": ["Title too long!!"]}) def test_modelformset_factory_field_class_overrides(self): author = Author.objects.create(pk=1, name="Charles Baudelaire") BookFormSet = modelformset_factory( Book, fields="__all__", field_classes={ "title": forms.SlugField, }, ) form = BookFormSet.form(data={"title": "Foo " * 30, "author": author.id}) self.assertIs(Book._meta.get_field("title").__class__, models.CharField) self.assertIsInstance(form.fields["title"], forms.SlugField) def test_inlineformset_factory_field_class_overrides(self): author = Author.objects.create(pk=1, name="Charles Baudelaire") BookFormSet = inlineformset_factory( Author, Book, fields="__all__", field_classes={ "title": forms.SlugField, }, ) form = BookFormSet.form(data={"title": "Foo " * 30, "author": author.id}) self.assertIs(Book._meta.get_field("title").__class__, models.CharField) self.assertIsInstance(form.fields["title"], forms.SlugField) def test_modelformset_factory_absolute_max(self): AuthorFormSet = modelformset_factory( Author, fields="__all__", absolute_max=1500 ) data = { "form-TOTAL_FORMS": "1501", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "0", } formset = AuthorFormSet(data=data) self.assertIs(formset.is_valid(), False) self.assertEqual(len(formset.forms), 1500) self.assertEqual( formset.non_form_errors(), ["Please submit at most 1000 forms."], ) def test_modelformset_factory_absolute_max_with_max_num(self): AuthorFormSet = modelformset_factory( Author, fields="__all__", max_num=20, absolute_max=100, ) data = { "form-TOTAL_FORMS": "101", "form-INITIAL_FORMS": "0", "form-MAX_NUM_FORMS": "0", } formset = AuthorFormSet(data=data) self.assertIs(formset.is_valid(), False) self.assertEqual(len(formset.forms), 100) self.assertEqual( formset.non_form_errors(), ["Please submit at most 20 forms."], ) def test_inlineformset_factory_absolute_max(self): author = Author.objects.create(name="Charles Baudelaire") BookFormSet = inlineformset_factory( Author, Book, fields="__all__", absolute_max=1500, ) data = { "book_set-TOTAL_FORMS": "1501", "book_set-INITIAL_FORMS": "0", "book_set-MAX_NUM_FORMS": "0", } formset = BookFormSet(data, instance=author) self.assertIs(formset.is_valid(), False) self.assertEqual(len(formset.forms), 1500) self.assertEqual( formset.non_form_errors(), ["Please submit at most 1000 forms."], ) def test_inlineformset_factory_absolute_max_with_max_num(self): author = Author.objects.create(name="Charles Baudelaire") BookFormSet = inlineformset_factory( Author, Book, fields="__all__", max_num=20, absolute_max=100, ) data = { "book_set-TOTAL_FORMS": "101", "book_set-INITIAL_FORMS": "0", "book_set-MAX_NUM_FORMS": "0", } formset = BookFormSet(data, instance=author) self.assertIs(formset.is_valid(), False) self.assertEqual(len(formset.forms), 100) self.assertEqual( formset.non_form_errors(), ["Please submit at most 20 forms."], ) def test_modelformset_factory_can_delete_extra(self): AuthorFormSet = modelformset_factory( Author, fields="__all__", can_delete=True, can_delete_extra=True, extra=2, ) formset = AuthorFormSet() self.assertEqual(len(formset), 2) self.assertIn("DELETE", formset.forms[0].fields) self.assertIn("DELETE", formset.forms[1].fields) def test_modelformset_factory_disable_delete_extra(self): AuthorFormSet = modelformset_factory( Author, fields="__all__", can_delete=True, can_delete_extra=False, extra=2, ) formset = AuthorFormSet() self.assertEqual(len(formset), 2) self.assertNotIn("DELETE", formset.forms[0].fields) self.assertNotIn("DELETE", formset.forms[1].fields) def test_inlineformset_factory_can_delete_extra(self): BookFormSet = inlineformset_factory( Author, Book, fields="__all__", can_delete=True, can_delete_extra=True, extra=2, ) formset = BookFormSet() self.assertEqual(len(formset), 2) self.assertIn("DELETE", formset.forms[0].fields) self.assertIn("DELETE", formset.forms[1].fields) def test_inlineformset_factory_can_not_delete_extra(self): BookFormSet = inlineformset_factory( Author, Book, fields="__all__", can_delete=True, can_delete_extra=False, extra=2, ) formset = BookFormSet() self.assertEqual(len(formset), 2) self.assertNotIn("DELETE", formset.forms[0].fields) self.assertNotIn("DELETE", formset.forms[1].fields) def test_inlineformset_factory_passes_renderer(self): from django.forms.renderers import Jinja2 renderer = Jinja2() BookFormSet = inlineformset_factory( Author, Book, fields="__all__", renderer=renderer, ) formset = BookFormSet() self.assertEqual(formset.renderer, renderer) def test_modelformset_factory_passes_renderer(self): from django.forms.renderers import Jinja2 renderer = Jinja2() BookFormSet = modelformset_factory(Author, fields="__all__", renderer=renderer) formset = BookFormSet() self.assertEqual(formset.renderer, renderer)
08a2391fbe037edf900a92bf9589c10b854b6476e84db9eab63d76e4ce2fd76e
import os from datetime import date from django.contrib.sitemaps import Sitemap from django.contrib.sites.models import Site from django.core.exceptions import ImproperlyConfigured from django.test import ignore_warnings, modify_settings, override_settings from django.utils import translation from django.utils.deprecation import RemovedInDjango50Warning from django.utils.formats import localize from .base import SitemapTestsBase from .models import TestModel class HTTPSitemapTests(SitemapTestsBase): use_sitemap_err_msg = ( "To use sitemaps, either enable the sites framework or pass a " "Site/RequestSite object in your view." ) def test_simple_sitemap_index(self): "A simple sitemap index can be rendered" response = self.client.get("/simple/index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc><lastmod>%s</lastmod></sitemap> </sitemapindex> """ % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_sitemap_not_callable(self): """A sitemap may not be callable.""" response = self.client.get("/simple-not-callable/index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc><lastmod>%s</lastmod></sitemap> </sitemapindex> """ % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_paged_sitemap(self): """A sitemap may have multiple pages.""" response = self.client.get("/simple-paged/index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>{0}/simple/sitemap-simple.xml</loc><lastmod>{1}</lastmod></sitemap><sitemap><loc>{0}/simple/sitemap-simple.xml?p=2</loc><lastmod>{1}</lastmod></sitemap> </sitemapindex> """.format( self.base_url, date.today() ) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [os.path.join(os.path.dirname(__file__), "templates")], } ] ) def test_simple_sitemap_custom_lastmod_index(self): "A simple sitemap index can be rendered with a custom template" response = self.client.get("/simple/custom-lastmod-index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc><lastmod>%s</lastmod></sitemap> </sitemapindex> """ % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_simple_sitemap_section(self): "A simple sitemap section can be rendered" response = self.client.get("/simple/sitemap-simple.xml") expected_content = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>%s/location/</loc><lastmod>%s</lastmod>" "<changefreq>never</changefreq><priority>0.5</priority></url>\n" "</urlset>" ) % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_no_section(self): response = self.client.get("/simple/sitemap-simple2.xml") self.assertEqual( str(response.context["exception"]), "No sitemap available for section: 'simple2'", ) self.assertEqual(response.status_code, 404) def test_empty_page(self): response = self.client.get("/simple/sitemap-simple.xml?p=0") self.assertEqual(str(response.context["exception"]), "Page 0 empty") self.assertEqual(response.status_code, 404) def test_page_not_int(self): response = self.client.get("/simple/sitemap-simple.xml?p=test") self.assertEqual(str(response.context["exception"]), "No page 'test'") self.assertEqual(response.status_code, 404) def test_simple_sitemap(self): "A simple sitemap can be rendered" response = self.client.get("/simple/sitemap.xml") expected_content = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>%s/location/</loc><lastmod>%s</lastmod>" "<changefreq>never</changefreq><priority>0.5</priority></url>\n" "</urlset>" ) % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [os.path.join(os.path.dirname(__file__), "templates")], } ] ) def test_simple_custom_sitemap(self): "A simple sitemap can be rendered with a custom template" response = self.client.get("/simple/custom-sitemap.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_sitemap_last_modified(self): "Last-Modified header is set correctly" response = self.client.get("/lastmod/sitemap.xml") self.assertEqual( response.headers["Last-Modified"], "Wed, 13 Mar 2013 10:00:00 GMT" ) def test_sitemap_last_modified_date(self): """ The Last-Modified header should be support dates (without time). """ response = self.client.get("/lastmod/date-sitemap.xml") self.assertEqual( response.headers["Last-Modified"], "Wed, 13 Mar 2013 00:00:00 GMT" ) def test_sitemap_last_modified_tz(self): """ The Last-Modified header should be converted from timezone aware dates to GMT. """ response = self.client.get("/lastmod/tz-sitemap.xml") self.assertEqual( response.headers["Last-Modified"], "Wed, 13 Mar 2013 15:00:00 GMT" ) def test_sitemap_last_modified_missing(self): "Last-Modified header is missing when sitemap has no lastmod" response = self.client.get("/generic/sitemap.xml") self.assertFalse(response.has_header("Last-Modified")) def test_sitemap_last_modified_mixed(self): "Last-Modified header is omitted when lastmod not on all items" response = self.client.get("/lastmod-mixed/sitemap.xml") self.assertFalse(response.has_header("Last-Modified")) def test_sitemaps_lastmod_mixed_ascending_last_modified_missing(self): """ The Last-Modified header is omitted when lastmod isn't found in all sitemaps. Test sitemaps are sorted by lastmod in ascending order. """ response = self.client.get("/lastmod-sitemaps/mixed-ascending.xml") self.assertFalse(response.has_header("Last-Modified")) def test_sitemaps_lastmod_mixed_descending_last_modified_missing(self): """ The Last-Modified header is omitted when lastmod isn't found in all sitemaps. Test sitemaps are sorted by lastmod in descending order. """ response = self.client.get("/lastmod-sitemaps/mixed-descending.xml") self.assertFalse(response.has_header("Last-Modified")) def test_sitemaps_lastmod_ascending(self): """ The Last-Modified header is set to the most recent sitemap lastmod. Test sitemaps are sorted by lastmod in ascending order. """ response = self.client.get("/lastmod-sitemaps/ascending.xml") self.assertEqual( response.headers["Last-Modified"], "Sat, 20 Apr 2013 05:00:00 GMT" ) def test_sitemaps_lastmod_descending(self): """ The Last-Modified header is set to the most recent sitemap lastmod. Test sitemaps are sorted by lastmod in descending order. """ response = self.client.get("/lastmod-sitemaps/descending.xml") self.assertEqual( response.headers["Last-Modified"], "Sat, 20 Apr 2013 05:00:00 GMT" ) def test_sitemap_get_latest_lastmod_none(self): """ sitemapindex.lastmod is omitted when Sitemap.lastmod is callable and Sitemap.get_latest_lastmod is not implemented """ response = self.client.get("/lastmod/get-latest-lastmod-none-sitemap.xml") self.assertNotContains(response, "<lastmod>") def test_sitemap_get_latest_lastmod(self): """ sitemapindex.lastmod is included when Sitemap.lastmod is attribute and Sitemap.get_latest_lastmod is implemented """ response = self.client.get("/lastmod/get-latest-lastmod-sitemap.xml") self.assertContains(response, "<lastmod>2013-03-13T10:00:00</lastmod>") def test_sitemap_latest_lastmod_timezone(self): """ lastmod datestamp shows timezones if Sitemap.get_latest_lastmod returns an aware datetime. """ response = self.client.get("/lastmod/latest-lastmod-timezone-sitemap.xml") self.assertContains(response, "<lastmod>2013-03-13T10:00:00-05:00</lastmod>") def test_localized_priority(self): """The priority value should not be localized.""" with translation.override("fr"): self.assertEqual("0,3", localize(0.3)) # Priorities aren't rendered in localized format. response = self.client.get("/simple/sitemap.xml") self.assertContains(response, "<priority>0.5</priority>") self.assertContains(response, "<lastmod>%s</lastmod>" % date.today()) @modify_settings(INSTALLED_APPS={"remove": "django.contrib.sites"}) def test_requestsite_sitemap(self): # Hitting the flatpages sitemap without the sites framework installed # doesn't raise an exception. response = self.client.get("/simple/sitemap.xml") expected_content = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod>" "<changefreq>never</changefreq><priority>0.5</priority></url>\n" "</urlset>" ) % date.today() self.assertXMLEqual(response.content.decode(), expected_content) @ignore_warnings(category=RemovedInDjango50Warning) def test_sitemap_get_urls_no_site_1(self): """ Check we get ImproperlyConfigured if we don't pass a site object to Sitemap.get_urls and no Site objects exist """ Site.objects.all().delete() with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg): Sitemap().get_urls() @modify_settings(INSTALLED_APPS={"remove": "django.contrib.sites"}) @ignore_warnings(category=RemovedInDjango50Warning) def test_sitemap_get_urls_no_site_2(self): """ Check we get ImproperlyConfigured when we don't pass a site object to Sitemap.get_urls if Site objects exists, but the sites framework is not actually installed. """ with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg): Sitemap().get_urls() @ignore_warnings(category=RemovedInDjango50Warning) def test_sitemap_item(self): """ Check to make sure that the raw item is included with each Sitemap.get_url() url result. """ test_sitemap = Sitemap() test_sitemap.items = TestModel.objects.order_by("pk").all def is_testmodel(url): return isinstance(url["item"], TestModel) item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls())) self.assertTrue(item_in_url_info) def test_cached_sitemap_index(self): """ A cached sitemap index can be rendered (#2713). """ response = self.client.get("/cached/index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/cached/sitemap-simple.xml</loc><lastmod>%s</lastmod></sitemap> </sitemapindex> """ % ( self.base_url, date.today(), ) self.assertXMLEqual(response.content.decode(), expected_content) def test_x_robots_sitemap(self): response = self.client.get("/simple/index.xml") self.assertEqual(response.headers["X-Robots-Tag"], "noindex, noodp, noarchive") response = self.client.get("/simple/sitemap.xml") self.assertEqual(response.headers["X-Robots-Tag"], "noindex, noodp, noarchive") def test_empty_sitemap(self): response = self.client.get("/empty/sitemap.xml") self.assertEqual(response.status_code, 200) @override_settings(LANGUAGES=(("en", "English"), ("pt", "Portuguese"))) def test_simple_i18n_sitemap_index(self): """ A simple i18n sitemap index can be rendered, without logging variable lookup errors. """ with self.assertNoLogs("django.template", "DEBUG"): response = self.client.get("/simple/i18n.xml") expected_content = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>{0}/en/i18n/testmodel/{1}/</loc><changefreq>never</changefreq>" "<priority>0.5</priority></url><url><loc>{0}/pt/i18n/testmodel/{1}/</loc>" "<changefreq>never</changefreq><priority>0.5</priority></url>\n" "</urlset>" ).format(self.base_url, self.i18n_model.pk) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings(LANGUAGES=(("en", "English"), ("pt", "Portuguese"))) def test_alternate_i18n_sitemap_index(self): """ A i18n sitemap with alternate/hreflang links can be rendered. """ response = self.client.get("/alternates/i18n.xml") url, pk = self.base_url, self.i18n_model.pk expected_urls = f""" <url><loc>{url}/en/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> </url> <url><loc>{url}/pt/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> </url> """.replace( "\n", "" ) expected_content = ( f'<?xml version="1.0" encoding="UTF-8"?>\n' f'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' f'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' f"{expected_urls}\n" f"</urlset>" ) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings( LANGUAGES=(("en", "English"), ("pt", "Portuguese"), ("es", "Spanish")) ) def test_alternate_i18n_sitemap_limited(self): """ A i18n sitemap index with limited languages can be rendered. """ response = self.client.get("/limited/i18n.xml") url, pk = self.base_url, self.i18n_model.pk expected_urls = f""" <url><loc>{url}/en/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="es" href="{url}/es/i18n/testmodel/{pk}/"/> </url> <url><loc>{url}/es/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="es" href="{url}/es/i18n/testmodel/{pk}/"/> </url> """.replace( "\n", "" ) expected_content = ( f'<?xml version="1.0" encoding="UTF-8"?>\n' f'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' f'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' f"{expected_urls}\n" f"</urlset>" ) self.assertXMLEqual(response.content.decode(), expected_content) @override_settings(LANGUAGES=(("en", "English"), ("pt", "Portuguese"))) def test_alternate_i18n_sitemap_xdefault(self): """ A i18n sitemap index with x-default can be rendered. """ response = self.client.get("/x-default/i18n.xml") url, pk = self.base_url, self.i18n_model.pk expected_urls = f""" <url><loc>{url}/en/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="x-default" href="{url}/i18n/testmodel/{pk}/"/> </url> <url><loc>{url}/pt/i18n/testmodel/{pk}/</loc><changefreq>never</changefreq><priority>0.5</priority> <xhtml:link rel="alternate" hreflang="en" href="{url}/en/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="pt" href="{url}/pt/i18n/testmodel/{pk}/"/> <xhtml:link rel="alternate" hreflang="x-default" href="{url}/i18n/testmodel/{pk}/"/> </url> """.replace( "\n", "" ) expected_content = ( f'<?xml version="1.0" encoding="UTF-8"?>\n' f'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' f'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' f"{expected_urls}\n" f"</urlset>" ) self.assertXMLEqual(response.content.decode(), expected_content) def test_sitemap_without_entries(self): response = self.client.get("/sitemap-without-entries/sitemap.xml") expected_content = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n\n' "</urlset>" ) self.assertXMLEqual(response.content.decode(), expected_content) def test_callable_sitemod_partial(self): """ Not all items have `lastmod`. Therefore the `Last-Modified` header is not set by the detail or index sitemap view. """ index_response = self.client.get("/callable-lastmod-partial/index.xml") sitemap_response = self.client.get("/callable-lastmod-partial/sitemap.xml") self.assertNotIn("Last-Modified", index_response) self.assertNotIn("Last-Modified", sitemap_response) expected_content_index = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>http://example.com/simple/sitemap-callable-lastmod.xml</loc></sitemap> </sitemapindex> """ expected_content_sitemap = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>http://example.com/location/</loc>" "<lastmod>2013-03-13</lastmod></url><url>" "<loc>http://example.com/location/</loc></url>\n" "</urlset>" ) self.assertXMLEqual(index_response.content.decode(), expected_content_index) self.assertXMLEqual(sitemap_response.content.decode(), expected_content_sitemap) def test_callable_sitemod_full(self): """ All items in the sitemap have `lastmod`. The `Last-Modified` header is set for the detail and index sitemap view. """ index_response = self.client.get("/callable-lastmod-full/index.xml") sitemap_response = self.client.get("/callable-lastmod-full/sitemap.xml") self.assertEqual( index_response.headers["Last-Modified"], "Thu, 13 Mar 2014 10:00:00 GMT" ) self.assertEqual( sitemap_response.headers["Last-Modified"], "Thu, 13 Mar 2014 10:00:00 GMT" ) expected_content_index = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>http://example.com/simple/sitemap-callable-lastmod.xml</loc><lastmod>2014-03-13T10:00:00</lastmod></sitemap> </sitemapindex> """ expected_content_sitemap = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" ' 'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n' "<url><loc>http://example.com/location/</loc>" "<lastmod>2013-03-13</lastmod></url>" "<url><loc>http://example.com/location/</loc>" "<lastmod>2014-03-13</lastmod></url>\n" "</urlset>" ) self.assertXMLEqual(index_response.content.decode(), expected_content_index) self.assertXMLEqual(sitemap_response.content.decode(), expected_content_sitemap) # RemovedInDjango50Warning class DeprecatedTests(SitemapTestsBase): @override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [os.path.join(os.path.dirname(__file__), "templates")], } ] ) def test_simple_sitemap_custom_index_warning(self): msg = ( "Calling `__str__` on SitemapIndexItem is deprecated, use the `location` " "attribute instead." ) with self.assertRaisesMessage(RemovedInDjango50Warning, msg): self.client.get("/simple/custom-index.xml") @ignore_warnings(category=RemovedInDjango50Warning) @override_settings( TEMPLATES=[ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [os.path.join(os.path.dirname(__file__), "templates")], } ] ) def test_simple_sitemap_custom_index(self): "A simple sitemap index can be rendered with a custom template" response = self.client.get("/simple/custom-index.xml") expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % ( self.base_url ) self.assertXMLEqual(response.content.decode(), expected_content)
c6fcf902e69f4a9c4edd00a409a8712574a2b31a02eec9a35b9e2f266d7cb5a0
"""Tests related to django.db.backends that haven't been organized.""" import datetime import threading import unittest import warnings from unittest import mock from django.core.management.color import no_style from django.db import ( DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections, reset_queries, transaction, ) from django.db.backends.base.base import BaseDatabaseWrapper from django.db.backends.signals import connection_created from django.db.backends.utils import CursorWrapper from django.db.models.sql.constants import CURSOR from django.test import ( TestCase, TransactionTestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature, ) from .models import ( Article, Object, ObjectReference, Person, Post, RawData, Reporter, ReporterProxy, SchoolClass, SQLKeywordsModel, Square, VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ, ) class DateQuotingTest(TestCase): def test_django_date_trunc(self): """ Test the custom ``django_date_trunc method``, in particular against fields which clash with strings passed to it (e.g. 'year') (#12818). """ updated = datetime.datetime(2010, 2, 20) SchoolClass.objects.create(year=2009, last_updated=updated) years = SchoolClass.objects.dates("last_updated", "year") self.assertEqual(list(years), [datetime.date(2010, 1, 1)]) def test_django_date_extract(self): """ Test the custom ``django_date_extract method``, in particular against fields which clash with strings passed to it (e.g. 'day') (#12818). """ updated = datetime.datetime(2010, 2, 20) SchoolClass.objects.create(year=2009, last_updated=updated) classes = SchoolClass.objects.filter(last_updated__day=20) self.assertEqual(len(classes), 1) @override_settings(DEBUG=True) class LastExecutedQueryTest(TestCase): def test_last_executed_query_without_previous_query(self): """ last_executed_query should not raise an exception even if no previous query has been run. """ with connection.cursor() as cursor: connection.ops.last_executed_query(cursor, "", ()) def test_debug_sql(self): list(Reporter.objects.filter(first_name="test")) sql = connection.queries[-1]["sql"].lower() self.assertIn("select", sql) self.assertIn(Reporter._meta.db_table, sql) def test_query_encoding(self): """last_executed_query() returns a string.""" data = RawData.objects.filter(raw_data=b"\x00\x46 \xFE").extra( select={"föö": 1} ) sql, params = data.query.sql_with_params() with data.query.get_compiler("default").execute_sql(CURSOR) as cursor: last_sql = cursor.db.ops.last_executed_query(cursor, sql, params) self.assertIsInstance(last_sql, str) def test_last_executed_query(self): # last_executed_query() interpolate all parameters, in most cases it is # not equal to QuerySet.query. for qs in ( Article.objects.filter(pk=1), Article.objects.filter(pk__in=(1, 2), reporter__pk=3), Article.objects.filter( pk=1, reporter__pk=9, ).exclude(reporter__pk__in=[2, 1]), Article.objects.filter(pk__in=list(range(20, 31))), ): sql, params = qs.query.sql_with_params() with qs.query.get_compiler(DEFAULT_DB_ALIAS).execute_sql(CURSOR) as cursor: self.assertEqual( cursor.db.ops.last_executed_query(cursor, sql, params), str(qs.query), ) @skipUnlessDBFeature("supports_paramstyle_pyformat") def test_last_executed_query_dict(self): square_opts = Square._meta sql = "INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)" % ( connection.introspection.identifier_converter(square_opts.db_table), connection.ops.quote_name(square_opts.get_field("root").column), connection.ops.quote_name(square_opts.get_field("square").column), ) with connection.cursor() as cursor: params = {"root": 2, "square": 4} cursor.execute(sql, params) self.assertEqual( cursor.db.ops.last_executed_query(cursor, sql, params), sql % params, ) @skipUnlessDBFeature("supports_paramstyle_pyformat") def test_last_executed_query_dict_overlap_keys(self): square_opts = Square._meta sql = "INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(root2)s)" % ( connection.introspection.identifier_converter(square_opts.db_table), connection.ops.quote_name(square_opts.get_field("root").column), connection.ops.quote_name(square_opts.get_field("square").column), ) with connection.cursor() as cursor: params = {"root": 2, "root2": 4} cursor.execute(sql, params) self.assertEqual( cursor.db.ops.last_executed_query(cursor, sql, params), sql % params, ) class ParameterHandlingTest(TestCase): def test_bad_parameter_count(self): """ An executemany call with too many/not enough parameters will raise an exception. """ with connection.cursor() as cursor: query = "INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % ( connection.introspection.identifier_converter("backends_square"), connection.ops.quote_name("root"), connection.ops.quote_name("square"), ) with self.assertRaises(Exception): cursor.executemany(query, [(1, 2, 3)]) with self.assertRaises(Exception): cursor.executemany(query, [(1,)]) class LongNameTest(TransactionTestCase): """Long primary keys and model names can result in a sequence name that exceeds the database limits, which will result in truncation on certain databases (e.g., Postgres). The backend needs to use the correct sequence name in last_insert_id and other places, so check it is. Refs #8901. """ available_apps = ["backends"] def test_sequence_name_length_limits_create(self): """Creation of model with long name and long pk name doesn't error.""" VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create() def test_sequence_name_length_limits_m2m(self): """ An m2m save of a model with a long name and a long m2m field name doesn't error (#8901). """ obj = ( VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create() ) rel_obj = Person.objects.create(first_name="Django", last_name="Reinhardt") obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj) def test_sequence_name_length_limits_flush(self): """ Sequence resetting as part of a flush with model with long name and long pk name doesn't error (#8901). """ # A full flush is expensive to the full test, so we dig into the # internals to generate the likely offending SQL and run it manually # Some convenience aliases VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ VLM_m2m = ( VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through ) tables = [ VLM._meta.db_table, VLM_m2m._meta.db_table, ] sql_list = connection.ops.sql_flush(no_style(), tables, reset_sequences=True) connection.ops.execute_sql_flush(sql_list) class SequenceResetTest(TestCase): def test_generic_relation(self): "Sequence names are correct when resetting generic relations (Ref #13941)" # Create an object with a manually specified PK Post.objects.create(id=10, name="1st post", text="hello world") # Reset the sequences for the database commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql( no_style(), [Post] ) with connection.cursor() as cursor: for sql in commands: cursor.execute(sql) # If we create a new object now, it should have a PK greater # than the PK we specified manually. obj = Post.objects.create(name="New post", text="goodbye world") self.assertGreater(obj.pk, 10) # This test needs to run outside of a transaction, otherwise closing the # connection would implicitly rollback and cause problems during teardown. class ConnectionCreatedSignalTest(TransactionTestCase): available_apps = [] # Unfortunately with sqlite3 the in-memory test database cannot be closed, # and so it cannot be re-opened during testing. @skipUnlessDBFeature("test_db_allows_multiple_connections") def test_signal(self): data = {} def receiver(sender, connection, **kwargs): data["connection"] = connection connection_created.connect(receiver) connection.close() with connection.cursor(): pass self.assertIs(data["connection"].connection, connection.connection) connection_created.disconnect(receiver) data.clear() with connection.cursor(): pass self.assertEqual(data, {}) class EscapingChecks(TestCase): """ All tests in this test case are also run with settings.DEBUG=True in EscapingChecksDebug test case, to also test CursorDebugWrapper. """ bare_select_suffix = connection.features.bare_select_suffix def test_paramless_no_escaping(self): with connection.cursor() as cursor: cursor.execute("SELECT '%s'" + self.bare_select_suffix) self.assertEqual(cursor.fetchall()[0][0], "%s") def test_parameter_escaping(self): with connection.cursor() as cursor: cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ("%d",)) self.assertEqual(cursor.fetchall()[0], ("%", "%d")) @override_settings(DEBUG=True) class EscapingChecksDebug(EscapingChecks): pass class BackendTestCase(TransactionTestCase): available_apps = ["backends"] def create_squares_with_executemany(self, args): self.create_squares(args, "format", True) def create_squares(self, args, paramstyle, multiple): opts = Square._meta tbl = connection.introspection.identifier_converter(opts.db_table) f1 = connection.ops.quote_name(opts.get_field("root").column) f2 = connection.ops.quote_name(opts.get_field("square").column) if paramstyle == "format": query = "INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % (tbl, f1, f2) elif paramstyle == "pyformat": query = "INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)" % ( tbl, f1, f2, ) else: raise ValueError("unsupported paramstyle in test") with connection.cursor() as cursor: if multiple: cursor.executemany(query, args) else: cursor.execute(query, args) def test_cursor_executemany(self): # Test cursor.executemany #4896 args = [(i, i**2) for i in range(-5, 6)] self.create_squares_with_executemany(args) self.assertEqual(Square.objects.count(), 11) for i in range(-5, 6): square = Square.objects.get(root=i) self.assertEqual(square.square, i**2) def test_cursor_executemany_with_empty_params_list(self): # Test executemany with params=[] does nothing #4765 args = [] self.create_squares_with_executemany(args) self.assertEqual(Square.objects.count(), 0) def test_cursor_executemany_with_iterator(self): # Test executemany accepts iterators #10320 args = ((i, i**2) for i in range(-3, 2)) self.create_squares_with_executemany(args) self.assertEqual(Square.objects.count(), 5) args = ((i, i**2) for i in range(3, 7)) with override_settings(DEBUG=True): # same test for DebugCursorWrapper self.create_squares_with_executemany(args) self.assertEqual(Square.objects.count(), 9) @skipUnlessDBFeature("supports_paramstyle_pyformat") def test_cursor_execute_with_pyformat(self): # Support pyformat style passing of parameters #10070 args = {"root": 3, "square": 9} self.create_squares(args, "pyformat", multiple=False) self.assertEqual(Square.objects.count(), 1) @skipUnlessDBFeature("supports_paramstyle_pyformat") def test_cursor_executemany_with_pyformat(self): # Support pyformat style passing of parameters #10070 args = [{"root": i, "square": i**2} for i in range(-5, 6)] self.create_squares(args, "pyformat", multiple=True) self.assertEqual(Square.objects.count(), 11) for i in range(-5, 6): square = Square.objects.get(root=i) self.assertEqual(square.square, i**2) @skipUnlessDBFeature("supports_paramstyle_pyformat") def test_cursor_executemany_with_pyformat_iterator(self): args = ({"root": i, "square": i**2} for i in range(-3, 2)) self.create_squares(args, "pyformat", multiple=True) self.assertEqual(Square.objects.count(), 5) args = ({"root": i, "square": i**2} for i in range(3, 7)) with override_settings(DEBUG=True): # same test for DebugCursorWrapper self.create_squares(args, "pyformat", multiple=True) self.assertEqual(Square.objects.count(), 9) def test_unicode_fetches(self): # fetchone, fetchmany, fetchall return strings as Unicode objects. qn = connection.ops.quote_name Person(first_name="John", last_name="Doe").save() Person(first_name="Jane", last_name="Doe").save() Person(first_name="Mary", last_name="Agnelline").save() Person(first_name="Peter", last_name="Parker").save() Person(first_name="Clark", last_name="Kent").save() opts2 = Person._meta f3, f4 = opts2.get_field("first_name"), opts2.get_field("last_name") with connection.cursor() as cursor: cursor.execute( "SELECT %s, %s FROM %s ORDER BY %s" % ( qn(f3.column), qn(f4.column), connection.introspection.identifier_converter(opts2.db_table), qn(f3.column), ) ) self.assertEqual(cursor.fetchone(), ("Clark", "Kent")) self.assertEqual( list(cursor.fetchmany(2)), [("Jane", "Doe"), ("John", "Doe")] ) self.assertEqual( list(cursor.fetchall()), [("Mary", "Agnelline"), ("Peter", "Parker")] ) def test_unicode_password(self): old_password = connection.settings_dict["PASSWORD"] connection.settings_dict["PASSWORD"] = "françois" try: with connection.cursor(): pass except DatabaseError: # As password is probably wrong, a database exception is expected pass except Exception as e: self.fail("Unexpected error raised with Unicode password: %s" % e) finally: connection.settings_dict["PASSWORD"] = old_password def test_database_operations_helper_class(self): # Ticket #13630 self.assertTrue(hasattr(connection, "ops")) self.assertTrue(hasattr(connection.ops, "connection")) self.assertEqual(connection, connection.ops.connection) def test_database_operations_init(self): """ DatabaseOperations initialization doesn't query the database. See #17656. """ with self.assertNumQueries(0): connection.ops.__class__(connection) def test_cached_db_features(self): self.assertIn(connection.features.supports_transactions, (True, False)) self.assertIn(connection.features.can_introspect_foreign_keys, (True, False)) def test_duplicate_table_error(self): """Creating an existing table returns a DatabaseError""" query = "CREATE TABLE %s (id INTEGER);" % Article._meta.db_table with connection.cursor() as cursor: with self.assertRaises(DatabaseError): cursor.execute(query) def test_cursor_contextmanager(self): """ Cursors can be used as a context manager """ with connection.cursor() as cursor: self.assertIsInstance(cursor, CursorWrapper) # Both InterfaceError and ProgrammingError seem to be used when # accessing closed cursor (psycopg2 has InterfaceError, rest seem # to use ProgrammingError). with self.assertRaises(connection.features.closed_cursor_error_class): # cursor should be closed, so no queries should be possible. cursor.execute("SELECT 1" + connection.features.bare_select_suffix) @unittest.skipUnless( connection.vendor == "postgresql", "Psycopg2 specific cursor.closed attribute needed", ) def test_cursor_contextmanager_closing(self): # There isn't a generic way to test that cursors are closed, but # psycopg2 offers us a way to check that by closed attribute. # So, run only on psycopg2 for that reason. with connection.cursor() as cursor: self.assertIsInstance(cursor, CursorWrapper) self.assertTrue(cursor.closed) # Unfortunately with sqlite3 the in-memory test database cannot be closed. @skipUnlessDBFeature("test_db_allows_multiple_connections") def test_is_usable_after_database_disconnects(self): """ is_usable() doesn't crash when the database disconnects (#21553). """ # Open a connection to the database. with connection.cursor(): pass # Emulate a connection close by the database. connection._close() # Even then is_usable() should not raise an exception. try: self.assertFalse(connection.is_usable()) finally: # Clean up the mess created by connection._close(). Since the # connection is already closed, this crashes on some backends. try: connection.close() except Exception: pass @override_settings(DEBUG=True) def test_queries(self): """ Test the documented API of connection.queries. """ sql = "SELECT 1" + connection.features.bare_select_suffix with connection.cursor() as cursor: reset_queries() cursor.execute(sql) self.assertEqual(1, len(connection.queries)) self.assertIsInstance(connection.queries, list) self.assertIsInstance(connection.queries[0], dict) self.assertEqual(list(connection.queries[0]), ["sql", "time"]) self.assertEqual(connection.queries[0]["sql"], sql) reset_queries() self.assertEqual(0, len(connection.queries)) sql = "INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % ( connection.introspection.identifier_converter("backends_square"), connection.ops.quote_name("root"), connection.ops.quote_name("square"), ) with connection.cursor() as cursor: cursor.executemany(sql, [(1, 1), (2, 4)]) self.assertEqual(1, len(connection.queries)) self.assertIsInstance(connection.queries, list) self.assertIsInstance(connection.queries[0], dict) self.assertEqual(list(connection.queries[0]), ["sql", "time"]) self.assertEqual(connection.queries[0]["sql"], "2 times: %s" % sql) # Unfortunately with sqlite3 the in-memory test database cannot be closed. @skipUnlessDBFeature("test_db_allows_multiple_connections") @override_settings(DEBUG=True) def test_queries_limit(self): """ The backend doesn't store an unlimited number of queries (#12581). """ old_queries_limit = BaseDatabaseWrapper.queries_limit BaseDatabaseWrapper.queries_limit = 3 new_connection = connection.copy() # Initialize the connection and clear initialization statements. with new_connection.cursor(): pass new_connection.queries_log.clear() try: with new_connection.cursor() as cursor: cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix) cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix) with warnings.catch_warnings(record=True) as w: self.assertEqual(2, len(new_connection.queries)) self.assertEqual(0, len(w)) with new_connection.cursor() as cursor: cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix) cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix) msg = ( "Limit for query logging exceeded, only the last 3 queries will be " "returned." ) with self.assertWarnsMessage(UserWarning, msg): self.assertEqual(3, len(new_connection.queries)) finally: BaseDatabaseWrapper.queries_limit = old_queries_limit new_connection.close() @mock.patch("django.db.backends.utils.logger") @override_settings(DEBUG=True) def test_queries_logger(self, mocked_logger): sql = "SELECT 1" + connection.features.bare_select_suffix with connection.cursor() as cursor: cursor.execute(sql) params, kwargs = mocked_logger.debug.call_args self.assertIn("; alias=%s", params[0]) self.assertEqual(params[2], sql) self.assertIsNone(params[3]) self.assertEqual(params[4], connection.alias) self.assertEqual( list(kwargs["extra"]), ["duration", "sql", "params", "alias"], ) self.assertEqual(tuple(kwargs["extra"].values()), params[1:]) def test_queries_bare_where(self): sql = f"SELECT 1{connection.features.bare_select_suffix} WHERE 1=1" with connection.cursor() as cursor: cursor.execute(sql) self.assertEqual(cursor.fetchone(), (1,)) def test_timezone_none_use_tz_false(self): connection.ensure_connection() with self.settings(TIME_ZONE=None, USE_TZ=False): connection.init_connection_state() # These tests aren't conditional because it would require differentiating # between MySQL+InnoDB and MySQL+MYISAM (something we currently can't do). class FkConstraintsTests(TransactionTestCase): available_apps = ["backends"] def setUp(self): # Create a Reporter. self.r = Reporter.objects.create(first_name="John", last_name="Smith") def test_integrity_checks_on_creation(self): """ Try to create a model instance that violates a FK constraint. If it fails it should fail with IntegrityError. """ a1 = Article( headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30, ) try: a1.save() except IntegrityError: pass else: self.skipTest("This backend does not support integrity checks.") # Now that we know this backend supports integrity checks we make sure # constraints are also enforced for proxy Refs #17519 a2 = Article( headline="This is another test", reporter=self.r, pub_date=datetime.datetime(2012, 8, 3), reporter_proxy_id=30, ) with self.assertRaises(IntegrityError): a2.save() def test_integrity_checks_on_update(self): """ Try to update a model instance introducing a FK constraint violation. If it fails it should fail with IntegrityError. """ # Create an Article. Article.objects.create( headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r, ) # Retrieve it from the DB a1 = Article.objects.get(headline="Test article") a1.reporter_id = 30 try: a1.save() except IntegrityError: pass else: self.skipTest("This backend does not support integrity checks.") # Now that we know this backend supports integrity checks we make sure # constraints are also enforced for proxy Refs #17519 # Create another article r_proxy = ReporterProxy.objects.get(pk=self.r.pk) Article.objects.create( headline="Another article", pub_date=datetime.datetime(1988, 5, 15), reporter=self.r, reporter_proxy=r_proxy, ) # Retrieve the second article from the DB a2 = Article.objects.get(headline="Another article") a2.reporter_proxy_id = 30 with self.assertRaises(IntegrityError): a2.save() def test_disable_constraint_checks_manually(self): """ When constraint checks are disabled, should be able to write bad data without IntegrityErrors. """ with transaction.atomic(): # Create an Article. Article.objects.create( headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r, ) # Retrieve it from the DB a = Article.objects.get(headline="Test article") a.reporter_id = 30 try: connection.disable_constraint_checking() a.save() connection.enable_constraint_checking() except IntegrityError: self.fail("IntegrityError should not have occurred.") transaction.set_rollback(True) def test_disable_constraint_checks_context_manager(self): """ When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors. """ with transaction.atomic(): # Create an Article. Article.objects.create( headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r, ) # Retrieve it from the DB a = Article.objects.get(headline="Test article") a.reporter_id = 30 try: with connection.constraint_checks_disabled(): a.save() except IntegrityError: self.fail("IntegrityError should not have occurred.") transaction.set_rollback(True) def test_check_constraints(self): """ Constraint checks should raise an IntegrityError when bad data is in the DB. """ with transaction.atomic(): # Create an Article. Article.objects.create( headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r, ) # Retrieve it from the DB a = Article.objects.get(headline="Test article") a.reporter_id = 30 with connection.constraint_checks_disabled(): a.save() try: connection.check_constraints(table_names=[Article._meta.db_table]) except IntegrityError: pass else: self.skipTest("This backend does not support integrity checks.") transaction.set_rollback(True) def test_check_constraints_sql_keywords(self): with transaction.atomic(): obj = SQLKeywordsModel.objects.create(reporter=self.r) obj.refresh_from_db() obj.reporter_id = 30 with connection.constraint_checks_disabled(): obj.save() try: connection.check_constraints(table_names=["order"]) except IntegrityError: pass else: self.skipTest("This backend does not support integrity checks.") transaction.set_rollback(True) class ThreadTests(TransactionTestCase): available_apps = ["backends"] def test_default_connection_thread_local(self): """ The default connection (i.e. django.db.connection) is different for each thread (#17258). """ # Map connections by id because connections with identical aliases # have the same hash. connections_dict = {} with connection.cursor(): pass connections_dict[id(connection)] = connection def runner(): # Passing django.db.connection between threads doesn't work while # connections[DEFAULT_DB_ALIAS] does. from django.db import connections connection = connections[DEFAULT_DB_ALIAS] # Allow thread sharing so the connection can be closed by the # main thread. connection.inc_thread_sharing() with connection.cursor(): pass connections_dict[id(connection)] = connection try: for x in range(2): t = threading.Thread(target=runner) t.start() t.join() # Each created connection got different inner connection. self.assertEqual( len({conn.connection for conn in connections_dict.values()}), 3 ) finally: # Finish by closing the connections opened by the other threads # (the connection opened in the main thread will automatically be # closed on teardown). for conn in connections_dict.values(): if conn is not connection and conn.allow_thread_sharing: conn.close() conn.dec_thread_sharing() def test_connections_thread_local(self): """ The connections are different for each thread (#17258). """ # Map connections by id because connections with identical aliases # have the same hash. connections_dict = {} for conn in connections.all(): connections_dict[id(conn)] = conn def runner(): from django.db import connections for conn in connections.all(): # Allow thread sharing so the connection can be closed by the # main thread. conn.inc_thread_sharing() connections_dict[id(conn)] = conn try: num_new_threads = 2 for x in range(num_new_threads): t = threading.Thread(target=runner) t.start() t.join() self.assertEqual( len(connections_dict), len(connections.all()) * (num_new_threads + 1), ) finally: # Finish by closing the connections opened by the other threads # (the connection opened in the main thread will automatically be # closed on teardown). for conn in connections_dict.values(): if conn is not connection and conn.allow_thread_sharing: conn.close() conn.dec_thread_sharing() def test_pass_connection_between_threads(self): """ A connection can be passed from one thread to the other (#17258). """ Person.objects.create(first_name="John", last_name="Doe") def do_thread(): def runner(main_thread_connection): from django.db import connections connections["default"] = main_thread_connection try: Person.objects.get(first_name="John", last_name="Doe") except Exception as e: exceptions.append(e) t = threading.Thread(target=runner, args=[connections["default"]]) t.start() t.join() # Without touching thread sharing, which should be False by default. exceptions = [] do_thread() # Forbidden! self.assertIsInstance(exceptions[0], DatabaseError) connections["default"].close() # After calling inc_thread_sharing() on the connection. connections["default"].inc_thread_sharing() try: exceptions = [] do_thread() # All good self.assertEqual(exceptions, []) finally: connections["default"].dec_thread_sharing() def test_closing_non_shared_connections(self): """ A connection that is not explicitly shareable cannot be closed by another thread (#17258). """ # First, without explicitly enabling the connection for sharing. exceptions = set() def runner1(): def runner2(other_thread_connection): try: other_thread_connection.close() except DatabaseError as e: exceptions.add(e) t2 = threading.Thread(target=runner2, args=[connections["default"]]) t2.start() t2.join() t1 = threading.Thread(target=runner1) t1.start() t1.join() # The exception was raised self.assertEqual(len(exceptions), 1) # Then, with explicitly enabling the connection for sharing. exceptions = set() def runner1(): def runner2(other_thread_connection): try: other_thread_connection.close() except DatabaseError as e: exceptions.add(e) # Enable thread sharing connections["default"].inc_thread_sharing() try: t2 = threading.Thread(target=runner2, args=[connections["default"]]) t2.start() t2.join() finally: connections["default"].dec_thread_sharing() t1 = threading.Thread(target=runner1) t1.start() t1.join() # No exception was raised self.assertEqual(len(exceptions), 0) def test_thread_sharing_count(self): self.assertIs(connection.allow_thread_sharing, False) connection.inc_thread_sharing() self.assertIs(connection.allow_thread_sharing, True) connection.inc_thread_sharing() self.assertIs(connection.allow_thread_sharing, True) connection.dec_thread_sharing() self.assertIs(connection.allow_thread_sharing, True) connection.dec_thread_sharing() self.assertIs(connection.allow_thread_sharing, False) msg = "Cannot decrement the thread sharing count below zero." with self.assertRaisesMessage(RuntimeError, msg): connection.dec_thread_sharing() class MySQLPKZeroTests(TestCase): """ Zero as id for AutoField should raise exception in MySQL, because MySQL does not allow zero for autoincrement primary key if the NO_AUTO_VALUE_ON_ZERO SQL mode is not enabled. """ @skipIfDBFeature("allows_auto_pk_0") def test_zero_as_autoval(self): with self.assertRaises(ValueError): Square.objects.create(id=0, root=0, square=1) class DBConstraintTestCase(TestCase): def test_can_reference_existent(self): obj = Object.objects.create() ref = ObjectReference.objects.create(obj=obj) self.assertEqual(ref.obj, obj) ref = ObjectReference.objects.get(obj=obj) self.assertEqual(ref.obj, obj) def test_can_reference_non_existent(self): self.assertFalse(Object.objects.filter(id=12345).exists()) ref = ObjectReference.objects.create(obj_id=12345) ref_new = ObjectReference.objects.get(obj_id=12345) self.assertEqual(ref, ref_new) with self.assertRaises(Object.DoesNotExist): ref.obj def test_many_to_many(self): obj = Object.objects.create() obj.related_objects.create() self.assertEqual(Object.objects.count(), 2) self.assertEqual(obj.related_objects.count(), 1) intermediary_model = Object._meta.get_field( "related_objects" ).remote_field.through intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345) self.assertEqual(obj.related_objects.count(), 1) self.assertEqual(intermediary_model.objects.count(), 2)
fb53f4d6125230b9143627f832ed13cfb58737d453db1a981fd33c7c8802e482
import datetime import itertools import unittest from copy import copy from unittest import mock from django.core.exceptions import FieldError from django.core.management.color import no_style from django.db import ( DatabaseError, DataError, IntegrityError, OperationalError, connection, ) from django.db.models import ( CASCADE, PROTECT, AutoField, BigAutoField, BigIntegerField, BinaryField, BooleanField, CharField, CheckConstraint, DateField, DateTimeField, DecimalField, DurationField, F, FloatField, ForeignKey, ForeignObject, Index, IntegerField, JSONField, ManyToManyField, Model, OneToOneField, OrderBy, PositiveIntegerField, Q, SlugField, SmallAutoField, SmallIntegerField, TextField, TimeField, UniqueConstraint, UUIDField, Value, ) from django.db.models.fields.json import KeyTextTransform from django.db.models.functions import Abs, Cast, Collate, Lower, Random, Upper from django.db.models.indexes import IndexExpression from django.db.transaction import TransactionManagementError, atomic from django.test import ( TransactionTestCase, ignore_warnings, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import CaptureQueriesContext, isolate_apps, register_lookup from django.utils.deprecation import RemovedInDjango51Warning from .fields import CustomManyToManyField, InheritedManyToManyField, MediumBlobField from .models import ( Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, AuthorWithIndexedName, AuthorWithUniqueName, AuthorWithUniqueNameAndBirthday, Book, BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK, Node, Note, NoteRename, Tag, TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps, ) class SchemaTests(TransactionTestCase): """ Tests for the schema-alteration code. Be aware that these tests are more liable than most to false results, as sometimes the code to check if a test has worked is almost as complex as the code it is testing. """ available_apps = [] models = [ Author, AuthorCharFieldWithIndex, AuthorTextFieldWithIndex, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Node, Note, Tag, TagM2MTest, TagUniqueRename, Thing, UniqueTest, ] # Utility functions def setUp(self): # local_models should contain test dependent model classes that will be # automatically removed from the app cache on test tear down. self.local_models = [] # isolated_local_models contains models that are in test methods # decorated with @isolate_apps. self.isolated_local_models = [] def tearDown(self): # Delete any tables made for our models self.delete_tables() new_apps.clear_cache() for model in new_apps.get_models(): model._meta._expire_cache() if "schema" in new_apps.all_models: for model in self.local_models: for many_to_many in model._meta.many_to_many: through = many_to_many.remote_field.through if through and through._meta.auto_created: del new_apps.all_models["schema"][through._meta.model_name] del new_apps.all_models["schema"][model._meta.model_name] if self.isolated_local_models: with connection.schema_editor() as editor: for model in self.isolated_local_models: editor.delete_model(model) def delete_tables(self): "Deletes all model tables for our models for a clean test environment" converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: connection.disable_constraint_checking() table_names = connection.introspection.table_names() if connection.features.ignores_table_name_case: table_names = [table_name.lower() for table_name in table_names] for model in itertools.chain(SchemaTests.models, self.local_models): tbl = converter(model._meta.db_table) if connection.features.ignores_table_name_case: tbl = tbl.lower() if tbl in table_names: editor.delete_model(model) table_names.remove(tbl) connection.enable_constraint_checking() def column_classes(self, model): with connection.cursor() as cursor: columns = { d[0]: (connection.introspection.get_field_type(d[1], d), d) for d in connection.introspection.get_table_description( cursor, model._meta.db_table, ) } # SQLite has a different format for field_type for name, (type, desc) in columns.items(): if isinstance(type, tuple): columns[name] = (type[0], desc) return columns def get_primary_key(self, table): with connection.cursor() as cursor: return connection.introspection.get_primary_key_column(cursor, table) def get_indexes(self, table): """ Get the indexes on the table using a new cursor. """ with connection.cursor() as cursor: return [ c["columns"][0] for c in connection.introspection.get_constraints( cursor, table ).values() if c["index"] and len(c["columns"]) == 1 ] def get_uniques(self, table): with connection.cursor() as cursor: return [ c["columns"][0] for c in connection.introspection.get_constraints( cursor, table ).values() if c["unique"] and len(c["columns"]) == 1 ] def get_constraints(self, table): """ Get the constraints on a table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def get_constraints_for_column(self, model, column_name): constraints = self.get_constraints(model._meta.db_table) constraints_for_column = [] for name, details in constraints.items(): if details["columns"] == [column_name]: constraints_for_column.append(name) return sorted(constraints_for_column) def check_added_field_default( self, schema_editor, model, field, field_name, expected_default, cast_function=None, ): with connection.cursor() as cursor: schema_editor.add_field(model, field) cursor.execute( "SELECT {} FROM {};".format(field_name, model._meta.db_table) ) database_default = cursor.fetchall()[0][0] if cast_function and type(database_default) != type(expected_default): database_default = cast_function(database_default) self.assertEqual(database_default, expected_default) def get_constraints_count(self, table, column, fk_to): """ Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the number of foreign keys, unique constraints, and indexes on `table`.`column`. The `fk_to` argument is a 2-tuple specifying the expected foreign key relationship's (table, column). """ with connection.cursor() as cursor: constraints = connection.introspection.get_constraints(cursor, table) counts = {"fks": 0, "uniques": 0, "indexes": 0} for c in constraints.values(): if c["columns"] == [column]: if c["foreign_key"] == fk_to: counts["fks"] += 1 if c["unique"]: counts["uniques"] += 1 elif c["index"]: counts["indexes"] += 1 return counts def get_column_collation(self, table, column): with connection.cursor() as cursor: return next( f.collation for f in connection.introspection.get_table_description(cursor, table) if f.name == column ) def assertIndexOrder(self, table, index, order): constraints = self.get_constraints(table) self.assertIn(index, constraints) index_orders = constraints[index]["orders"] self.assertTrue( all(val == expected for val, expected in zip(index_orders, order)) ) def assertForeignKeyExists(self, model, column, expected_fk_table, field="id"): """ Fail if the FK constraint on `model.Meta.db_table`.`column` to `expected_fk_table`.id doesn't exist. """ if not connection.features.can_introspect_foreign_keys: return constraints = self.get_constraints(model._meta.db_table) constraint_fk = None for details in constraints.values(): if details["columns"] == [column] and details["foreign_key"]: constraint_fk = details["foreign_key"] break self.assertEqual(constraint_fk, (expected_fk_table, field)) def assertForeignKeyNotExists(self, model, column, expected_fk_table): if not connection.features.can_introspect_foreign_keys: return with self.assertRaises(AssertionError): self.assertForeignKeyExists(model, column, expected_fk_table) # Tests def test_creation_deletion(self): """ Tries creating a model's table, and then deleting it. """ with connection.schema_editor() as editor: # Create the table editor.create_model(Author) # The table is there list(Author.objects.all()) # Clean up that table editor.delete_model(Author) # No deferred SQL should be left over. self.assertEqual(editor.deferred_sql, []) # The table is gone with self.assertRaises(DatabaseError): list(Author.objects.all()) @skipUnlessDBFeature("supports_foreign_keys") def test_fk(self): "Creating tables out of FK order, then repointing, works" # Create the table with connection.schema_editor() as editor: editor.create_model(Book) editor.create_model(Author) editor.create_model(Tag) # Initial tables are there list(Author.objects.all()) list(Book.objects.all()) # Make sure the FK constraint is present with self.assertRaises(IntegrityError): Book.objects.create( author_id=1, title="Much Ado About Foreign Keys", pub_date=datetime.datetime.now(), ) # Repoint the FK constraint old_field = Book._meta.get_field("author") new_field = ForeignKey(Tag, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) self.assertForeignKeyExists(Book, "author_id", "schema_tag") @skipUnlessDBFeature("can_create_inline_fk") def test_inline_fk(self): # Create some tables. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) editor.create_model(Note) self.assertForeignKeyNotExists(Note, "book_id", "schema_book") # Add a foreign key from one to the other. with connection.schema_editor() as editor: new_field = ForeignKey(Book, CASCADE) new_field.set_attributes_from_name("book") editor.add_field(Note, new_field) self.assertForeignKeyExists(Note, "book_id", "schema_book") # Creating a FK field with a constraint uses a single statement without # a deferred ALTER TABLE. self.assertFalse( [ sql for sql in (str(statement) for statement in editor.deferred_sql) if sql.startswith("ALTER TABLE") and "ADD CONSTRAINT" in sql ] ) @skipUnlessDBFeature("can_create_inline_fk") def test_add_inline_fk_update_data(self): with connection.schema_editor() as editor: editor.create_model(Node) # Add an inline foreign key and update data in the same transaction. new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True) new_field.set_attributes_from_name("new_parent_fk") parent = Node.objects.create() with connection.schema_editor() as editor: editor.add_field(Node, new_field) editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk]) assertIndex = ( self.assertIn if connection.features.indexes_foreign_keys else self.assertNotIn ) assertIndex("new_parent_fk_id", self.get_indexes(Node._meta.db_table)) @skipUnlessDBFeature( "can_create_inline_fk", "allows_multiple_constraints_on_same_fields", ) @isolate_apps("schema") def test_add_inline_fk_index_update_data(self): class Node(Model): class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Node) # Add an inline foreign key, update data, and an index in the same # transaction. new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True) new_field.set_attributes_from_name("new_parent_fk") parent = Node.objects.create() with connection.schema_editor() as editor: editor.add_field(Node, new_field) Node._meta.add_field(new_field) editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk]) editor.add_index( Node, Index(fields=["new_parent_fk"], name="new_parent_inline_fk_idx") ) self.assertIn("new_parent_fk_id", self.get_indexes(Node._meta.db_table)) @skipUnlessDBFeature("supports_foreign_keys") def test_char_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorCharFieldWithIndex) # Change CharField to FK old_field = AuthorCharFieldWithIndex._meta.get_field("char_field") new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name("char_field") with connection.schema_editor() as editor: editor.alter_field( AuthorCharFieldWithIndex, old_field, new_field, strict=True ) self.assertForeignKeyExists( AuthorCharFieldWithIndex, "char_field_id", "schema_author" ) @skipUnlessDBFeature("supports_foreign_keys") @skipUnlessDBFeature("supports_index_on_text_field") def test_text_field_with_db_index_to_fk(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorTextFieldWithIndex) # Change TextField to FK old_field = AuthorTextFieldWithIndex._meta.get_field("text_field") new_field = ForeignKey(Author, CASCADE, blank=True) new_field.set_attributes_from_name("text_field") with connection.schema_editor() as editor: editor.alter_field( AuthorTextFieldWithIndex, old_field, new_field, strict=True ) self.assertForeignKeyExists( AuthorTextFieldWithIndex, "text_field_id", "schema_author" ) @isolate_apps("schema") def test_char_field_pk_to_auto_field(self): class Foo(Model): id = CharField(max_length=255, primary_key=True) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field("id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Foo with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) @skipUnlessDBFeature("supports_foreign_keys") def test_fk_to_proxy(self): "Creating a FK to a proxy model creates database constraints." class AuthorProxy(Author): class Meta: app_label = "schema" apps = new_apps proxy = True class AuthorRef(Model): author = ForeignKey(AuthorProxy, on_delete=CASCADE) class Meta: app_label = "schema" apps = new_apps self.local_models = [AuthorProxy, AuthorRef] # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorRef) self.assertForeignKeyExists(AuthorRef, "author_id", "schema_author") @skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys") def test_fk_db_constraint(self): "The db_constraint parameter is respected" # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Author) editor.create_model(BookWeak) # Initial tables are there list(Author.objects.all()) list(Tag.objects.all()) list(BookWeak.objects.all()) self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author") # Make a db_constraint=False FK new_field = ForeignKey(Tag, CASCADE, db_constraint=False) new_field.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.add_field(Author, new_field) self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag") # Alter to one with a constraint new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) self.assertForeignKeyExists(Author, "tag_id", "schema_tag") # Alter to one without a constraint again new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field2, new_field, strict=True) self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag") @isolate_apps("schema") def test_no_db_constraint_added_during_primary_key_change(self): """ When a primary key that's pointed to by a ForeignKey with db_constraint=False is altered, a foreign key constraint isn't added. """ class Author(Model): class Meta: app_label = "schema" class BookWeak(Model): author = ForeignKey(Author, CASCADE, db_constraint=False) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWeak) self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author") old_field = Author._meta.get_field("id") new_field = BigAutoField(primary_key=True) new_field.model = Author new_field.set_attributes_from_name("id") # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author") def _test_m2m_db_constraint(self, M2MFieldClass): class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(LocalAuthorWithM2M) # Initial tables are there list(LocalAuthorWithM2M.objects.all()) list(Tag.objects.all()) # Make a db_constraint=False FK new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False) new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) self.assertForeignKeyNotExists( new_field.remote_field.through, "tag_id", "schema_tag" ) @skipUnlessDBFeature("supports_foreign_keys") def test_m2m_db_constraint(self): self._test_m2m_db_constraint(ManyToManyField) @skipUnlessDBFeature("supports_foreign_keys") def test_m2m_db_constraint_custom(self): self._test_m2m_db_constraint(CustomManyToManyField) @skipUnlessDBFeature("supports_foreign_keys") def test_m2m_db_constraint_inherited(self): self._test_m2m_db_constraint(InheritedManyToManyField) def test_add_field(self): """ Tests adding fields to models """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add the new field new_field = IntegerField(null=True) new_field.set_attributes_from_name("age") with CaptureQueriesContext( connection ) as ctx, connection.schema_editor() as editor: editor.add_field(Author, new_field) drop_default_sql = editor.sql_alter_column_no_default % { "column": editor.quote_name(new_field.name), } self.assertFalse( any(drop_default_sql in query["sql"] for query in ctx.captured_queries) ) # Table is not rebuilt. self.assertIs( any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries), False ) self.assertIs( any("DROP TABLE" in query["sql"] for query in ctx.captured_queries), False ) columns = self.column_classes(Author) self.assertEqual( columns["age"][0], connection.features.introspected_field_types["IntegerField"], ) self.assertTrue(columns["age"][1][6]) def test_add_field_remove_field(self): """ Adding a field and removing it removes all deferred sql referring to it. """ with connection.schema_editor() as editor: # Create a table with a unique constraint on the slug field. editor.create_model(Tag) # Remove the slug column. editor.remove_field(Tag, Tag._meta.get_field("slug")) self.assertEqual(editor.deferred_sql, []) def test_add_field_temp_default(self): """ Tests adding fields to models with a temporary default """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = CharField(max_length=30, default="Godwin") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) self.assertEqual( columns["surname"][0], connection.features.introspected_field_types["CharField"], ) self.assertEqual( columns["surname"][1][6], connection.features.interprets_empty_strings_as_nulls, ) def test_add_field_temp_default_boolean(self): """ Tests adding fields to models with a temporary default where the default is False. (#21783) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = BooleanField(default=False) new_field.set_attributes_from_name("awesome") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # BooleanField are stored as TINYINT(1) on MySQL. field_type = columns["awesome"][0] self.assertEqual( field_type, connection.features.introspected_field_types["BooleanField"] ) def test_add_field_default_transform(self): """ Tests adding fields to models with a default that is not directly valid in the database (#22581) """ class TestTransformField(IntegerField): # Weird field that saves the count of items in its value def get_default(self): return self.default def get_prep_value(self, value): if value is None: return 0 return len(value) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add the field with a default it needs to cast (to string in this case) new_field = TestTransformField(default={1: 2}) new_field.set_attributes_from_name("thing") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is there columns = self.column_classes(Author) field_type, field_info = columns["thing"] self.assertEqual( field_type, connection.features.introspected_field_types["IntegerField"] ) # Make sure the values were transformed correctly self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2) def test_add_field_o2o_nullable(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Note) new_field = OneToOneField(Note, CASCADE, null=True) new_field.set_attributes_from_name("note") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) self.assertIn("note_id", columns) self.assertTrue(columns["note_id"][1][6]) def test_add_field_binary(self): """ Tests binary fields get a sane default (#22851) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field new_field = BinaryField(blank=True) new_field.set_attributes_from_name("bits") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # MySQL annoyingly uses the same backend, so it'll come back as one of # these two types. self.assertIn(columns["bits"][0], ("BinaryField", "TextField")) def test_add_field_durationfield_with_default(self): with connection.schema_editor() as editor: editor.create_model(Author) new_field = DurationField(default=datetime.timedelta(minutes=10)) new_field.set_attributes_from_name("duration") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) self.assertEqual( columns["duration"][0], connection.features.introspected_field_types["DurationField"], ) @unittest.skipUnless(connection.vendor == "mysql", "MySQL specific") def test_add_binaryfield_mediumblob(self): """ Test adding a custom-sized binary field on MySQL (#24846). """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field with default new_field = MediumBlobField(blank=True, default=b"123") new_field.set_attributes_from_name("bits") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # Introspection treats BLOBs as TextFields self.assertEqual(columns["bits"][0], "TextField") def test_remove_field(self): with connection.schema_editor() as editor: editor.create_model(Author) with CaptureQueriesContext(connection) as ctx: editor.remove_field(Author, Author._meta.get_field("name")) columns = self.column_classes(Author) self.assertNotIn("name", columns) if getattr(connection.features, "can_alter_table_drop_column", True): # Table is not rebuilt. self.assertIs( any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries), False, ) self.assertIs( any("DROP TABLE" in query["sql"] for query in ctx.captured_queries), False, ) def test_alter(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual( columns["name"][0], connection.features.introspected_field_types["CharField"], ) self.assertEqual( bool(columns["name"][1][6]), bool(connection.features.interprets_empty_strings_as_nulls), ) # Alter the name field to a TextField old_field = Author._meta.get_field("name") new_field = TextField(null=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) columns = self.column_classes(Author) self.assertEqual(columns["name"][0], "TextField") self.assertTrue(columns["name"][1][6]) # Change nullability again new_field2 = TextField(null=False) new_field2.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) columns = self.column_classes(Author) self.assertEqual(columns["name"][0], "TextField") self.assertEqual( bool(columns["name"][1][6]), bool(connection.features.interprets_empty_strings_as_nulls), ) def test_alter_auto_field_to_integer_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to IntegerField old_field = Author._meta.get_field("id") new_field = IntegerField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Now that ID is an IntegerField, the database raises an error if it # isn't provided. if not connection.features.supports_unspecified_pk: with self.assertRaises(DatabaseError): Author.objects.create() def test_alter_auto_field_to_char_field(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change AutoField to CharField old_field = Author._meta.get_field("id") new_field = CharField(primary_key=True, max_length=50) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) @isolate_apps("schema") def test_alter_auto_field_quoted_db_column(self): class Foo(Model): id = AutoField(primary_key=True, db_column='"quoted_id"') class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field("id") new_field = BigAutoField(primary_key=True) new_field.model = Foo new_field.db_column = '"quoted_id"' new_field.set_attributes_from_name("id") with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) Foo.objects.create() def test_alter_not_unique_field_to_primary_key(self): # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) # Change UUIDField to primary key. old_field = Author._meta.get_field("uuid") new_field = UUIDField(primary_key=True) new_field.set_attributes_from_name("uuid") new_field.model = Author with connection.schema_editor() as editor: editor.remove_field(Author, Author._meta.get_field("id")) editor.alter_field(Author, old_field, new_field, strict=True) # Redundant unique constraint is not added. count = self.get_constraints_count( Author._meta.db_table, Author._meta.get_field("uuid").column, None, ) self.assertLessEqual(count["uniques"], 1) @isolate_apps("schema") def test_alter_primary_key_quoted_db_table(self): class Foo(Model): class Meta: app_label = "schema" db_table = '"foo"' with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field("id") new_field = BigAutoField(primary_key=True) new_field.model = Foo new_field.set_attributes_from_name("id") with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) Foo.objects.create() def test_alter_text_field(self): # Regression for "BLOB/TEXT column 'info' can't have a default value") # on MySQL. # Create the table with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("info") new_field = TextField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) def test_alter_text_field_to_not_null_with_default_value(self): with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("address") new_field = TextField(blank=True, default="", null=False) new_field.set_attributes_from_name("address") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) @skipUnlessDBFeature("can_defer_constraint_checks", "can_rollback_ddl") def test_alter_fk_checks_deferred_constraints(self): """ #25492 - Altering a foreign key's structure and data in the same transaction. """ with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field("parent") new_field = ForeignKey(Node, CASCADE) new_field.set_attributes_from_name("parent") parent = Node.objects.create() with connection.schema_editor() as editor: # Update the parent FK to create a deferred constraint check. Node.objects.update(parent=parent) editor.alter_field(Node, old_field, new_field, strict=True) @isolate_apps("schema") def test_alter_null_with_default_value_deferred_constraints(self): class Publisher(Model): class Meta: app_label = "schema" class Article(Model): publisher = ForeignKey(Publisher, CASCADE) title = CharField(max_length=50, null=True) description = CharField(max_length=100, null=True) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Publisher) editor.create_model(Article) self.isolated_local_models = [Article, Publisher] publisher = Publisher.objects.create() Article.objects.create(publisher=publisher) old_title = Article._meta.get_field("title") new_title = CharField(max_length=50, null=False, default="") new_title.set_attributes_from_name("title") old_description = Article._meta.get_field("description") new_description = CharField(max_length=100, null=False, default="") new_description.set_attributes_from_name("description") with connection.schema_editor() as editor: editor.alter_field(Article, old_title, new_title, strict=True) editor.alter_field(Article, old_description, new_description, strict=True) def test_alter_text_field_to_date_field(self): """ #25002 - Test conversion of text field to date field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info="1988-05-05") old_field = Note._meta.get_field("info") new_field = DateField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns["info"][1][6]) def test_alter_text_field_to_datetime_field(self): """ #25002 - Test conversion of text field to datetime field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info="1988-05-05 3:16:17.4567") old_field = Note._meta.get_field("info") new_field = DateTimeField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns["info"][1][6]) def test_alter_text_field_to_time_field(self): """ #25002 - Test conversion of text field to time field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info="3:16:17.4567") old_field = Note._meta.get_field("info") new_field = TimeField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns["info"][1][6]) @skipIfDBFeature("interprets_empty_strings_as_nulls") def test_alter_textual_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = CharField(max_length=50) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): Note.objects.create(info=None) @skipUnlessDBFeature("interprets_empty_strings_as_nulls") def test_alter_textual_field_not_null_to_null(self): """ Nullability for textual fields is preserved on databases that interpret empty strings as NULLs. """ with connection.schema_editor() as editor: editor.create_model(Author) columns = self.column_classes(Author) # Field is nullable. self.assertTrue(columns["uuid"][1][6]) # Change to NOT NULL. old_field = Author._meta.get_field("uuid") new_field = SlugField(null=False, blank=True) new_field.set_attributes_from_name("uuid") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) columns = self.column_classes(Author) # Nullability is preserved. self.assertTrue(columns["uuid"][1][6]) def test_alter_numeric_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(UniqueTest) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug="aaa") old_field = UniqueTest._meta.get_field("year") new_field = BigIntegerField() new_field.set_attributes_from_name("year") with connection.schema_editor() as editor: editor.alter_field(UniqueTest, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug="bbb") def test_alter_null_to_not_null(self): """ #23609 - Tests handling of default values when altering from NULL to NOT NULL. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertTrue(columns["height"][1][6]) # Create some test data Author.objects.create(name="Not null author", height=12) Author.objects.create(name="Null author") # Verify null value self.assertEqual(Author.objects.get(name="Not null author").height, 12) self.assertIsNone(Author.objects.get(name="Null author").height) # Alter the height field to NOT NULL with default old_field = Author._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) columns = self.column_classes(Author) self.assertFalse(columns["height"][1][6]) # Verify default value self.assertEqual(Author.objects.get(name="Not null author").height, 12) self.assertEqual(Author.objects.get(name="Null author").height, 42) def test_alter_charfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_nulls when changing a CharField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change the CharField to null old_field = Author._meta.get_field("name") new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_char_field_decrease_length(self): # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) Author.objects.create(name="x" * 255) # Change max_length of CharField. old_field = Author._meta.get_field("name") new_field = CharField(max_length=254) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: msg = "value too long for type character varying(254)" with self.assertRaisesMessage(DataError, msg): editor.alter_field(Author, old_field, new_field, strict=True) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_with_custom_db_type(self): from django.contrib.postgres.fields import ArrayField class Foo(Model): field = ArrayField(CharField(max_length=255)) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Foo) self.isolated_local_models = [Foo] old_field = Foo._meta.get_field("field") new_field = ArrayField(CharField(max_length=16)) new_field.set_attributes_from_name("field") new_field.model = Foo with connection.schema_editor() as editor: editor.alter_field(Foo, old_field, new_field, strict=True) @isolate_apps("schema") @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_array_field_decrease_base_field_length(self): from django.contrib.postgres.fields import ArrayField class ArrayModel(Model): field = ArrayField(CharField(max_length=16)) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(ArrayModel) self.isolated_local_models = [ArrayModel] ArrayModel.objects.create(field=["x" * 16]) old_field = ArrayModel._meta.get_field("field") new_field = ArrayField(CharField(max_length=15)) new_field.set_attributes_from_name("field") new_field.model = ArrayModel with connection.schema_editor() as editor: msg = "value too long for type character varying(15)" with self.assertRaisesMessage(DataError, msg): editor.alter_field(ArrayModel, old_field, new_field, strict=True) @isolate_apps("schema") @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_array_field_decrease_nested_base_field_length(self): from django.contrib.postgres.fields import ArrayField class ArrayModel(Model): field = ArrayField(ArrayField(CharField(max_length=16))) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(ArrayModel) self.isolated_local_models = [ArrayModel] ArrayModel.objects.create(field=[["x" * 16]]) old_field = ArrayModel._meta.get_field("field") new_field = ArrayField(ArrayField(CharField(max_length=15))) new_field.set_attributes_from_name("field") new_field.model = ArrayModel with connection.schema_editor() as editor: msg = "value too long for type character varying(15)" with self.assertRaisesMessage(DataError, msg): editor.alter_field(ArrayModel, old_field, new_field, strict=True) def test_alter_textfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_nulls when changing a TextField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Note) # Change the TextField to null old_field = Note._meta.get_field("info") new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) def test_alter_null_to_not_null_keeping_default(self): """ #23738 - Can change a nullable field with default to non-nullable with the same default. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) # Ensure the field is right to begin with columns = self.column_classes(AuthorWithDefaultHeight) self.assertTrue(columns["height"][1][6]) # Alter the height field to NOT NULL keeping the previous default old_field = AuthorWithDefaultHeight._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field( AuthorWithDefaultHeight, old_field, new_field, strict=True ) columns = self.column_classes(AuthorWithDefaultHeight) self.assertFalse(columns["height"][1][6]) @skipUnlessDBFeature("supports_foreign_keys") def test_alter_fk(self): """ Tests altering of FKs """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) self.assertForeignKeyExists(Book, "author_id", "schema_author") # Alter the FK old_field = Book._meta.get_field("author") new_field = ForeignKey(Author, CASCADE, editable=False) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) columns = self.column_classes(Book) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) self.assertForeignKeyExists(Book, "author_id", "schema_author") @skipUnlessDBFeature("supports_foreign_keys") def test_alter_to_fk(self): """ #24447 - Tests adding a FK constraint for an existing column """ class LocalBook(Model): author = IntegerField() title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalBook] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBook) # Ensure no FK constraint exists constraints = self.get_constraints(LocalBook._meta.db_table) for details in constraints.values(): if details["foreign_key"]: self.fail( "Found an unexpected FK constraint to %s" % details["columns"] ) old_field = LocalBook._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(LocalBook, old_field, new_field, strict=True) self.assertForeignKeyExists(LocalBook, "author_id", "schema_author") @skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys") def test_alter_o2o_to_fk(self): """ #24163 - Tests altering of OneToOneField to ForeignKey """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) # Ensure the field is right to begin with columns = self.column_classes(BookWithO2O) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) # Ensure the field is unique author = Author.objects.create(name="Joe") BookWithO2O.objects.create( author=author, title="Django 1", pub_date=datetime.datetime.now() ) with self.assertRaises(IntegrityError): BookWithO2O.objects.create( author=author, title="Django 2", pub_date=datetime.datetime.now() ) BookWithO2O.objects.all().delete() self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author") # Alter the OneToOneField to ForeignKey old_field = BookWithO2O._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) columns = self.column_classes(Book) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) # Ensure the field is not unique anymore Book.objects.create( author=author, title="Django 1", pub_date=datetime.datetime.now() ) Book.objects.create( author=author, title="Django 2", pub_date=datetime.datetime.now() ) self.assertForeignKeyExists(Book, "author_id", "schema_author") @skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys") def test_alter_fk_to_o2o(self): """ #24163 - Tests altering of ForeignKey to OneToOneField """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) # Ensure the field is not unique author = Author.objects.create(name="Joe") Book.objects.create( author=author, title="Django 1", pub_date=datetime.datetime.now() ) Book.objects.create( author=author, title="Django 2", pub_date=datetime.datetime.now() ) Book.objects.all().delete() self.assertForeignKeyExists(Book, "author_id", "schema_author") # Alter the ForeignKey to OneToOneField old_field = Book._meta.get_field("author") new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) columns = self.column_classes(BookWithO2O) self.assertEqual( columns["author_id"][0], connection.features.introspected_field_types["IntegerField"], ) # Ensure the field is unique now BookWithO2O.objects.create( author=author, title="Django 1", pub_date=datetime.datetime.now() ) with self.assertRaises(IntegrityError): BookWithO2O.objects.create( author=author, title="Django 2", pub_date=datetime.datetime.now() ) self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author") def test_alter_field_fk_to_o2o(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = ( 1 if connection.features.supports_foreign_keys and connection.features.can_introspect_foreign_keys else 0 ) expected_indexes = 1 if connection.features.indexes_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual( counts, {"fks": expected_fks, "uniques": 0, "indexes": expected_indexes}, ) old_field = Book._meta.get_field("author") new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) # The index on ForeignKey is replaced with a unique constraint for # OneToOneField. self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0}) def test_alter_field_fk_keeps_index(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) expected_fks = ( 1 if connection.features.supports_foreign_keys and connection.features.can_introspect_foreign_keys else 0 ) expected_indexes = 1 if connection.features.indexes_foreign_keys else 0 # Check the index is right to begin with. counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual( counts, {"fks": expected_fks, "uniques": 0, "indexes": expected_indexes}, ) old_field = Book._meta.get_field("author") # on_delete changed from CASCADE. new_field = ForeignKey(Author, PROTECT) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) counts = self.get_constraints_count( Book._meta.db_table, Book._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) # The index remains. self.assertEqual( counts, {"fks": expected_fks, "uniques": 0, "indexes": expected_indexes}, ) def test_alter_field_o2o_to_fk(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = ( 1 if connection.features.supports_foreign_keys and connection.features.can_introspect_foreign_keys else 0 ) # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0}) old_field = BookWithO2O._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint on OneToOneField is replaced with an index for # ForeignKey. self.assertEqual(counts, {"fks": expected_fks, "uniques": 0, "indexes": 1}) def test_alter_field_o2o_keeps_unique(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) expected_fks = ( 1 if connection.features.supports_foreign_keys and connection.features.can_introspect_foreign_keys else 0 ) # Check the unique constraint is right to begin with. counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0}) old_field = BookWithO2O._meta.get_field("author") # on_delete changed from CASCADE. new_field = OneToOneField(Author, PROTECT) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) counts = self.get_constraints_count( BookWithO2O._meta.db_table, BookWithO2O._meta.get_field("author").column, (Author._meta.db_table, Author._meta.pk.column), ) # The unique constraint remains. self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0}) @skipUnlessDBFeature("ignores_table_name_case") def test_alter_db_table_case(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Alter the case of the table old_table_name = Author._meta.db_table with connection.schema_editor() as editor: editor.alter_db_table(Author, old_table_name, old_table_name.upper()) def test_alter_implicit_id_to_explicit(self): """ Should be able to convert an implicit "id" field to an explicit "id" primary key field. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # This will fail if DROP DEFAULT is inadvertently executed on this # field which drops the id sequence, at least on PostgreSQL. Author.objects.create(name="Foo") Author.objects.create(name="Bar") def test_alter_autofield_pk_to_bigautofield_pk(self): with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = BigAutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) Author.objects.create(name="Foo", pk=1) with connection.cursor() as cursor: sequence_reset_sqls = connection.ops.sequence_reset_sql( no_style(), [Author] ) if sequence_reset_sqls: cursor.execute(sequence_reset_sqls[0]) self.assertIsNotNone(Author.objects.create(name="Bar")) def test_alter_autofield_pk_to_smallautofield_pk(self): with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = SmallAutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) Author.objects.create(name="Foo", pk=1) with connection.cursor() as cursor: sequence_reset_sqls = connection.ops.sequence_reset_sql( no_style(), [Author] ) if sequence_reset_sqls: cursor.execute(sequence_reset_sqls[0]) self.assertIsNotNone(Author.objects.create(name="Bar")) def test_alter_int_pk_to_autofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to AutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field("i") new_field = AutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name("i") with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # A model representing the updated model. class IntegerPKToAutoField(Model): i = AutoField(primary_key=True) j = IntegerField(unique=True) class Meta: app_label = "schema" apps = new_apps db_table = IntegerPK._meta.db_table # An id (i) is generated by the database. obj = IntegerPKToAutoField.objects.create(j=1) self.assertIsNotNone(obj.i) def test_alter_int_pk_to_bigautofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to BigAutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field("i") new_field = BigAutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name("i") with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # A model representing the updated model. class IntegerPKToBigAutoField(Model): i = BigAutoField(primary_key=True) j = IntegerField(unique=True) class Meta: app_label = "schema" apps = new_apps db_table = IntegerPK._meta.db_table # An id (i) is generated by the database. obj = IntegerPKToBigAutoField.objects.create(j=1) self.assertIsNotNone(obj.i) @isolate_apps("schema") def test_alter_smallint_pk_to_smallautofield_pk(self): """ Should be able to rename an SmallIntegerField(primary_key=True) to SmallAutoField(primary_key=True). """ class SmallIntegerPK(Model): i = SmallIntegerField(primary_key=True) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(SmallIntegerPK) self.isolated_local_models = [SmallIntegerPK] old_field = SmallIntegerPK._meta.get_field("i") new_field = SmallAutoField(primary_key=True) new_field.model = SmallIntegerPK new_field.set_attributes_from_name("i") with connection.schema_editor() as editor: editor.alter_field(SmallIntegerPK, old_field, new_field, strict=True) def test_alter_int_pk_to_int_unique(self): """ Should be able to rename an IntegerField(primary_key=True) to IntegerField(unique=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) # Delete the old PK old_field = IntegerPK._meta.get_field("i") new_field = IntegerField(unique=True) new_field.model = IntegerPK new_field.set_attributes_from_name("i") with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # The primary key constraint is gone. Result depends on database: # 'id' for SQLite, None for others (must not be 'i'). self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ("id", None)) # Set up a model class as it currently stands. The original IntegerPK # class is now out of date and some backends make use of the whole # model class when modifying a field (such as sqlite3 when remaking a # table) so an outdated model class leads to incorrect results. class Transitional(Model): i = IntegerField(unique=True) j = IntegerField(unique=True) class Meta: app_label = "schema" apps = new_apps db_table = "INTEGERPK" # model requires a new PK old_field = Transitional._meta.get_field("j") new_field = IntegerField(primary_key=True) new_field.model = Transitional new_field.set_attributes_from_name("j") with connection.schema_editor() as editor: editor.alter_field(Transitional, old_field, new_field, strict=True) # Create a model class representing the updated model. class IntegerUnique(Model): i = IntegerField(unique=True) j = IntegerField(primary_key=True) class Meta: app_label = "schema" apps = new_apps db_table = "INTEGERPK" # Ensure unique constraint works. IntegerUnique.objects.create(i=1, j=1) with self.assertRaises(IntegrityError): IntegerUnique.objects.create(i=1, j=2) def test_rename(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual( columns["name"][0], connection.features.introspected_field_types["CharField"], ) self.assertNotIn("display_name", columns) # Alter the name field's name old_field = Author._meta.get_field("name") new_field = CharField(max_length=254) new_field.set_attributes_from_name("display_name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) columns = self.column_classes(Author) self.assertEqual( columns["display_name"][0], connection.features.introspected_field_types["CharField"], ) self.assertNotIn("name", columns) @isolate_apps("schema") def test_rename_referenced_field(self): class Author(Model): name = CharField(max_length=255, unique=True) class Meta: app_label = "schema" class Book(Model): author = ForeignKey(Author, CASCADE, to_field="name") class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name("renamed") with connection.schema_editor( atomic=connection.features.supports_atomic_references_rename ) as editor: editor.alter_field(Author, Author._meta.get_field("name"), new_field) # Ensure the foreign key reference was updated. self.assertForeignKeyExists(Book, "author_id", "schema_author", "renamed") @skipIfDBFeature("interprets_empty_strings_as_nulls") def test_rename_keep_null_status(self): """ Renaming a field shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = TextField() new_field.set_attributes_from_name("detail_info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns["detail_info"][0], "TextField") self.assertNotIn("info", columns) with self.assertRaises(IntegrityError): NoteRename.objects.create(detail_info=None) def _test_m2m_create(self, M2MFieldClass): """ Tests M2M fields on models during creation """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2M) # Ensure there is now an m2m table there columns = self.column_classes( LocalBookWithM2M._meta.get_field("tags").remote_field.through ) self.assertEqual( columns["tagm2mtest_id"][0], connection.features.introspected_field_types["IntegerField"], ) def test_m2m_create(self): self._test_m2m_create(ManyToManyField) def test_m2m_create_custom(self): self._test_m2m_create(CustomManyToManyField) def test_m2m_create_inherited(self): self._test_m2m_create(InheritedManyToManyField) def _test_m2m_create_through(self, M2MFieldClass): """ Tests M2M fields on models during creation with through models """ class LocalTagThrough(Model): book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = "schema" apps = new_apps class LocalBookWithM2MThrough(Model): tags = M2MFieldClass( "TagM2MTest", related_name="books", through=LocalTagThrough ) class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalTagThrough, LocalBookWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalTagThrough) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2MThrough) # Ensure there is now an m2m table there columns = self.column_classes(LocalTagThrough) self.assertEqual( columns["book_id"][0], connection.features.introspected_field_types["IntegerField"], ) self.assertEqual( columns["tag_id"][0], connection.features.introspected_field_types["IntegerField"], ) def test_m2m_create_through(self): self._test_m2m_create_through(ManyToManyField) def test_m2m_create_through_custom(self): self._test_m2m_create_through(CustomManyToManyField) def test_m2m_create_through_inherited(self): self._test_m2m_create_through(InheritedManyToManyField) def test_m2m_through_remove(self): class LocalAuthorNoteThrough(Model): book = ForeignKey("schema.Author", CASCADE) tag = ForeignKey("self", CASCADE) class Meta: app_label = "schema" apps = new_apps class LocalNoteWithM2MThrough(Model): authors = ManyToManyField("schema.Author", through=LocalAuthorNoteThrough) class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalAuthorNoteThrough, LocalNoteWithM2MThrough] # Create the tables. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalAuthorNoteThrough) editor.create_model(LocalNoteWithM2MThrough) # Remove the through parameter. old_field = LocalNoteWithM2MThrough._meta.get_field("authors") new_field = ManyToManyField("Author") new_field.set_attributes_from_name("authors") msg = ( f"Cannot alter field {old_field} into {new_field} - they are not " f"compatible types (you cannot alter to or from M2M fields, or add or " f"remove through= on M2M fields)" ) with connection.schema_editor() as editor: with self.assertRaisesMessage(ValueError, msg): editor.alter_field(LocalNoteWithM2MThrough, old_field, new_field) def _test_m2m(self, M2MFieldClass): """ Tests adding/removing M2M fields on models """ class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorWithM2M) editor.create_model(TagM2MTest) # Create an M2M field new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors") new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Ensure there is now an m2m table there columns = self.column_classes(new_field.remote_field.through) self.assertEqual( columns["tagm2mtest_id"][0], connection.features.introspected_field_types["IntegerField"], ) # "Alter" the field. This should not rename the DB table to itself. with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True) # Remove the M2M table again with connection.schema_editor() as editor: editor.remove_field(LocalAuthorWithM2M, new_field) # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Make sure the model state is coherent with the table one now that # we've removed the tags field. opts = LocalAuthorWithM2M._meta opts.local_many_to_many.remove(new_field) del new_apps.all_models["schema"][ new_field.remote_field.through._meta.model_name ] opts._expire_cache() def test_m2m(self): self._test_m2m(ManyToManyField) def test_m2m_custom(self): self._test_m2m(CustomManyToManyField) def test_m2m_inherited(self): self._test_m2m(InheritedManyToManyField) def _test_m2m_through_alter(self, M2MFieldClass): """ Tests altering M2Ms with explicit through models (should no-op) """ class LocalAuthorTag(Model): author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = "schema" apps = new_apps class LocalAuthorWithM2MThrough(Model): name = CharField(max_length=255) tags = M2MFieldClass( "schema.TagM2MTest", related_name="authors", through=LocalAuthorTag ) class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorTag) editor.create_model(LocalAuthorWithM2MThrough) editor.create_model(TagM2MTest) # Ensure the m2m table is there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) # "Alter" the field's blankness. This should not actually do anything. old_field = LocalAuthorWithM2MThrough._meta.get_field("tags") new_field = M2MFieldClass( "schema.TagM2MTest", related_name="authors", through=LocalAuthorTag ) new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags") with connection.schema_editor() as editor: editor.alter_field( LocalAuthorWithM2MThrough, old_field, new_field, strict=True ) # Ensure the m2m table is still there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) def test_m2m_through_alter(self): self._test_m2m_through_alter(ManyToManyField) def test_m2m_through_alter_custom(self): self._test_m2m_through_alter(CustomManyToManyField) def test_m2m_through_alter_inherited(self): self._test_m2m_through_alter(InheritedManyToManyField) def _test_m2m_repoint(self, M2MFieldClass): """ Tests repointing M2M fields """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = "schema" apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBookWithM2M) editor.create_model(TagM2MTest) editor.create_model(UniqueTest) # Ensure the M2M exists and points to TagM2MTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists( LocalBookWithM2M._meta.get_field("tags").remote_field.through, "tagm2mtest_id", "schema_tagm2mtest", ) # Repoint the M2M old_field = LocalBookWithM2M._meta.get_field("tags") new_field = M2MFieldClass(UniqueTest) new_field.contribute_to_class(LocalBookWithM2M, "uniques") with connection.schema_editor() as editor: editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True) # Ensure old M2M is gone with self.assertRaises(DatabaseError): self.column_classes( LocalBookWithM2M._meta.get_field("tags").remote_field.through ) # This model looks like the new model and is used for teardown. opts = LocalBookWithM2M._meta opts.local_many_to_many.remove(old_field) # Ensure the new M2M exists and points to UniqueTest if connection.features.supports_foreign_keys: self.assertForeignKeyExists( new_field.remote_field.through, "uniquetest_id", "schema_uniquetest" ) def test_m2m_repoint(self): self._test_m2m_repoint(ManyToManyField) def test_m2m_repoint_custom(self): self._test_m2m_repoint(CustomManyToManyField) def test_m2m_repoint_inherited(self): self._test_m2m_repoint(InheritedManyToManyField) @isolate_apps("schema") def test_m2m_rename_field_in_target_model(self): class LocalTagM2MTest(Model): title = CharField(max_length=255) class Meta: app_label = "schema" class LocalM2M(Model): tags = ManyToManyField(LocalTagM2MTest) class Meta: app_label = "schema" # Create the tables. with connection.schema_editor() as editor: editor.create_model(LocalM2M) editor.create_model(LocalTagM2MTest) self.isolated_local_models = [LocalM2M, LocalTagM2MTest] # Ensure the m2m table is there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) # Alter a field in LocalTagM2MTest. old_field = LocalTagM2MTest._meta.get_field("title") new_field = CharField(max_length=254) new_field.contribute_to_class(LocalTagM2MTest, "title1") # @isolate_apps() and inner models are needed to have the model # relations populated, otherwise this doesn't act as a regression test. self.assertEqual(len(new_field.model._meta.related_objects), 1) with connection.schema_editor() as editor: editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True) # Ensure the m2m table is still there. self.assertEqual(len(self.column_classes(LocalM2M)), 1) @skipUnlessDBFeature( "supports_column_check_constraints", "can_introspect_check_constraints" ) def test_check_constraints(self): """ Tests creating/deleting CHECK constraints """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the constraint exists constraints = self.get_constraints(Author._meta.db_table) if not any( details["columns"] == ["height"] and details["check"] for details in constraints.values() ): self.fail("No check constraint for height found") # Alter the column to remove it old_field = Author._meta.get_field("height") new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) for details in constraints.values(): if details["columns"] == ["height"] and details["check"]: self.fail("Check constraint for height found") # Alter the column to re-add it new_field2 = Author._meta.get_field("height") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) if not any( details["columns"] == ["height"] and details["check"] for details in constraints.values() ): self.fail("No check constraint for height found") @skipUnlessDBFeature( "supports_column_check_constraints", "can_introspect_check_constraints" ) @isolate_apps("schema") def test_check_constraint_timedelta_param(self): class DurationModel(Model): duration = DurationField() class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(DurationModel) self.isolated_local_models = [DurationModel] constraint_name = "duration_gte_5_minutes" constraint = CheckConstraint( check=Q(duration__gt=datetime.timedelta(minutes=5)), name=constraint_name, ) DurationModel._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(DurationModel, constraint) constraints = self.get_constraints(DurationModel._meta.db_table) self.assertIn(constraint_name, constraints) with self.assertRaises(IntegrityError), atomic(): DurationModel.objects.create(duration=datetime.timedelta(minutes=4)) DurationModel.objects.create(duration=datetime.timedelta(minutes=10)) @skipUnlessDBFeature( "supports_column_check_constraints", "can_introspect_check_constraints" ) def test_remove_field_check_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(Author) # Add the custom check constraint constraint = CheckConstraint( check=Q(height__gte=0), name="author_height_gte_0_check" ) custom_constraint_name = constraint.name Author._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) # Ensure the constraints exist constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["height"] and details["check"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field check old_field = Author._meta.get_field("height") new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["height"] and details["check"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field check new_field2 = Author._meta.get_field("height") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["height"] and details["check"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the check constraint with connection.schema_editor() as editor: Author._meta.constraints = [] editor.remove_constraint(Author, constraint) def test_unique(self): """ Tests removing and adding unique constraints to a single column. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the field is unique to begin with Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be non-unique old_field = Tag._meta.get_field("slug") new_field = SlugField(unique=False) new_field.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) # Ensure the field is no longer unique Tag.objects.create(title="foo", slug="foo") Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be unique new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) # Ensure the field is unique again Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Rename the field new_field3 = SlugField(unique=True) new_field3.set_attributes_from_name("slug2") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field2, new_field3, strict=True) # Ensure the field is still unique TagUniqueRename.objects.create(title="foo", slug2="foo") with self.assertRaises(IntegrityError): TagUniqueRename.objects.create(title="bar", slug2="foo") Tag.objects.all().delete() def test_unique_name_quoting(self): old_table_name = TagUniqueRename._meta.db_table try: with connection.schema_editor() as editor: editor.create_model(TagUniqueRename) editor.alter_db_table(TagUniqueRename, old_table_name, "unique-table") TagUniqueRename._meta.db_table = "unique-table" # This fails if the unique index name isn't quoted. editor.alter_unique_together(TagUniqueRename, [], (("title", "slug2"),)) finally: with connection.schema_editor() as editor: editor.delete_model(TagUniqueRename) TagUniqueRename._meta.db_table = old_table_name @isolate_apps("schema") @skipUnlessDBFeature("supports_foreign_keys") def test_unique_no_unnecessary_fk_drops(self): """ If AlterField isn't selective about dropping foreign key constraints when modifying a field with a unique constraint, the AlterField incorrectly drops and recreates the Book.author foreign key even though it doesn't restrict the field being changed (#29193). """ class Author(Model): name = CharField(max_length=254, unique=True) class Meta: app_label = "schema" class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) new_field = CharField(max_length=255, unique=True) new_field.model = Author new_field.set_attributes_from_name("name") with self.assertLogs("django.db.backends.schema", "DEBUG") as cm: with connection.schema_editor() as editor: editor.alter_field(Author, Author._meta.get_field("name"), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) @isolate_apps("schema") def test_unique_and_reverse_m2m(self): """ AlterField can modify a unique field when there's a reverse M2M relation on the model. """ class Tag(Model): title = CharField(max_length=255) slug = SlugField(unique=True) class Meta: app_label = "schema" class Book(Model): tags = ManyToManyField(Tag, related_name="books") class Meta: app_label = "schema" self.isolated_local_models = [Book._meta.get_field("tags").remote_field.through] with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Book) new_field = SlugField(max_length=75, unique=True) new_field.model = Tag new_field.set_attributes_from_name("slug") with self.assertLogs("django.db.backends.schema", "DEBUG") as cm: with connection.schema_editor() as editor: editor.alter_field(Tag, Tag._meta.get_field("slug"), new_field) # One SQL statement is executed to alter the field. self.assertEqual(len(cm.records), 1) # Ensure that the field is still unique. Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") @skipUnlessDBFeature("allows_multiple_constraints_on_same_fields") def test_remove_field_unique_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueName) self.local_models = [AuthorWithUniqueName] # Add the custom unique constraint constraint = UniqueConstraint(fields=["name"], name="author_name_uniq") custom_constraint_name = constraint.name AuthorWithUniqueName._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueName, constraint) # Ensure the constraints exist constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Alter the column to remove field uniqueness old_field = AuthorWithUniqueName._meta.get_field("name") new_field = CharField(max_length=255) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Alter the column to re-add field uniqueness new_field2 = AuthorWithUniqueName._meta.get_field("name") with connection.schema_editor() as editor: editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True) constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueName._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueName, constraint) def test_unique_together(self): """ Tests removing and adding unique_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(UniqueTest) # Ensure the fields are unique to begin with UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2011, slug="foo") UniqueTest.objects.create(year=2011, slug="bar") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter the model to its non-unique-together companion with connection.schema_editor() as editor: editor.alter_unique_together( UniqueTest, UniqueTest._meta.unique_together, [] ) # Ensure the fields are no longer unique UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_unique_together( UniqueTest, [], UniqueTest._meta.unique_together ) # Ensure the fields are unique again UniqueTest.objects.create(year=2012, slug="foo") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() def test_unique_together_with_fk(self): """ Tests removing and adding unique_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [["author", "title"]]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [["author", "title"]], []) def test_unique_together_with_fk_with_existing_index(self): """ Tests removing and adding unique_together constraints that include a foreign key, where the foreign key is added after the model is created. """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithoutAuthor) new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") editor.add_field(BookWithoutAuthor, new_field) # Ensure the fields aren't unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [["author", "title"]]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [["author", "title"]], []) @skipUnlessDBFeature("allows_multiple_constraints_on_same_fields") def test_remove_unique_together_does_not_remove_meta_constraints(self): with connection.schema_editor() as editor: editor.create_model(AuthorWithUniqueNameAndBirthday) self.local_models = [AuthorWithUniqueNameAndBirthday] # Add the custom unique constraint constraint = UniqueConstraint( fields=["name", "birthday"], name="author_name_birthday_uniq" ) custom_constraint_name = constraint.name AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint] with connection.schema_editor() as editor: editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint) # Ensure the constraints exist constraints = self.get_constraints( AuthorWithUniqueNameAndBirthday._meta.db_table ) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Remove unique together unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together with connection.schema_editor() as editor: editor.alter_unique_together( AuthorWithUniqueNameAndBirthday, unique_together, [] ) constraints = self.get_constraints( AuthorWithUniqueNameAndBirthday._meta.db_table ) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 0) # Re-add unique together with connection.schema_editor() as editor: editor.alter_unique_together( AuthorWithUniqueNameAndBirthday, [], unique_together ) constraints = self.get_constraints( AuthorWithUniqueNameAndBirthday._meta.db_table ) self.assertIn(custom_constraint_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["unique"] and name != custom_constraint_name ] self.assertEqual(len(other_constraints), 1) # Drop the unique constraint with connection.schema_editor() as editor: AuthorWithUniqueNameAndBirthday._meta.constraints = [] editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint) def test_unique_constraint(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint(fields=["name"], name="name_uq") # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table self.assertIs(sql.references_table(table), True) self.assertIs(sql.references_column(table, "name"), True) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint(Upper("name").desc(), name="func_upper_uq") # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table constraints = self.get_constraints(table) if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, constraint.name, ["DESC"]) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) # SQL contains a database function. self.assertIs(sql.references_column(table, "name"), True) self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql)) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_composite_func_unique_constraint(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithSlug) constraint = UniqueConstraint( Upper("title"), Lower("slug"), name="func_upper_lower_unq", ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(BookWithSlug, constraint) sql = constraint.create_sql(BookWithSlug, editor) table = BookWithSlug._meta.db_table constraints = self.get_constraints(table) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) # SQL contains database functions. self.assertIs(sql.references_column(table, "title"), True) self.assertIs(sql.references_column(table, "slug"), True) sql = str(sql) self.assertIn("UPPER(%s)" % editor.quote_name("title"), sql) self.assertIn("LOWER(%s)" % editor.quote_name("slug"), sql) self.assertLess(sql.index("UPPER"), sql.index("LOWER")) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(BookWithSlug, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_unique_constraint_field_and_expression(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint( F("height").desc(), "uuid", Lower("name").asc(), name="func_f_lower_field_unq", ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, constraint.name, ["DESC", "ASC", "ASC"]) constraints = self.get_constraints(table) self.assertIs(constraints[constraint.name]["unique"], True) self.assertEqual(len(constraints[constraint.name]["columns"]), 3) self.assertEqual(constraints[constraint.name]["columns"][1], "uuid") # SQL contains database functions and columns. self.assertIs(sql.references_column(table, "height"), True) self.assertIs(sql.references_column(table, "name"), True) self.assertIs(sql.references_column(table, "uuid"), True) self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql)) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes", "supports_partial_indexes") def test_func_unique_constraint_partial(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint( Upper("name"), name="func_upper_cond_weight_uq", condition=Q(weight__isnull=False), ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table constraints = self.get_constraints(table) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) self.assertIs(sql.references_column(table, "name"), True) self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql)) self.assertIn( "WHERE %s IS NOT NULL" % editor.quote_name("weight"), str(sql), ) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes", "supports_covering_indexes") def test_func_unique_constraint_covering(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint( Upper("name"), name="func_upper_covering_uq", include=["weight", "height"], ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table constraints = self.get_constraints(table) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) self.assertEqual( constraints[constraint.name]["columns"], [None, "weight", "height"], ) self.assertIs(sql.references_column(table, "name"), True) self.assertIs(sql.references_column(table, "weight"), True) self.assertIs(sql.references_column(table, "height"), True) self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql)) self.assertIn( "INCLUDE (%s, %s)" % ( editor.quote_name("weight"), editor.quote_name("height"), ), str(sql), ) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_lookups(self): with connection.schema_editor() as editor: editor.create_model(Author) with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs): constraint = UniqueConstraint( F("name__lower"), F("weight__abs"), name="func_lower_abs_lookup_uq", ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) sql = constraint.create_sql(Author, editor) table = Author._meta.db_table constraints = self.get_constraints(table) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) # SQL contains columns. self.assertIs(sql.references_column(table, "name"), True) self.assertIs(sql.references_column(table, "weight"), True) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(Author, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_collate(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("This backend does not support case-insensitive collations.") with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithSlug) constraint = UniqueConstraint( Collate(F("title"), collation=collation).desc(), Collate("slug", collation=collation), name="func_collate_uq", ) # Add constraint. with connection.schema_editor() as editor: editor.add_constraint(BookWithSlug, constraint) sql = constraint.create_sql(BookWithSlug, editor) table = BookWithSlug._meta.db_table constraints = self.get_constraints(table) self.assertIn(constraint.name, constraints) self.assertIs(constraints[constraint.name]["unique"], True) if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, constraint.name, ["DESC", "ASC"]) # SQL contains columns and a collation. self.assertIs(sql.references_column(table, "title"), True) self.assertIs(sql.references_column(table, "slug"), True) self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql)) # Remove constraint. with connection.schema_editor() as editor: editor.remove_constraint(BookWithSlug, constraint) self.assertNotIn(constraint.name, self.get_constraints(table)) @skipIfDBFeature("supports_expression_indexes") def test_func_unique_constraint_unsupported(self): # UniqueConstraint is ignored on databases that don't support indexes on # expressions. with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint(F("name"), name="func_name_uq") with connection.schema_editor() as editor, self.assertNumQueries(0): self.assertIsNone(editor.add_constraint(Author, constraint)) self.assertIsNone(editor.remove_constraint(Author, constraint)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_nonexistent_field(self): constraint = UniqueConstraint(Lower("nonexistent"), name="func_nonexistent_uq") msg = ( "Cannot resolve keyword 'nonexistent' into field. Choices are: " "height, id, name, uuid, weight" ) with self.assertRaisesMessage(FieldError, msg): with connection.schema_editor() as editor: editor.add_constraint(Author, constraint) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_nondeterministic(self): with connection.schema_editor() as editor: editor.create_model(Author) constraint = UniqueConstraint(Random(), name="func_random_uq") with connection.schema_editor() as editor: with self.assertRaises(DatabaseError): editor.add_constraint(Author, constraint) @ignore_warnings(category=RemovedInDjango51Warning) def test_index_together(self): """ Tests removing and adding index_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure there's no index on the year/slug columns first self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c["columns"] == ["slug", "title"] ), False, ) # Alter the model to add an index with connection.schema_editor() as editor: editor.alter_index_together(Tag, [], [("slug", "title")]) # Ensure there is now an index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c["columns"] == ["slug", "title"] ), True, ) # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_index_together(Tag, [("slug", "title")], []) # Ensure there's no index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tag").values() if c["columns"] == ["slug", "title"] ), False, ) @ignore_warnings(category=RemovedInDjango51Warning) def test_index_together_with_fk(self): """ Tests removing and adding index_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.index_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_index_together(Book, [], [["author", "title"]]) # Alter it back with connection.schema_editor() as editor: editor.alter_index_together(Book, [["author", "title"]], []) @ignore_warnings(category=RemovedInDjango51Warning) @isolate_apps("schema") def test_create_index_together(self): """ Tests creating models with index_together already defined """ class TagIndexed(Model): title = CharField(max_length=255) slug = SlugField(unique=True) class Meta: app_label = "schema" index_together = [["slug", "title"]] # Create the table with connection.schema_editor() as editor: editor.create_model(TagIndexed) self.isolated_local_models = [TagIndexed] # Ensure there is an index self.assertIs( any( c["index"] for c in self.get_constraints("schema_tagindexed").values() if c["columns"] == ["slug", "title"] ), True, ) @skipUnlessDBFeature("allows_multiple_constraints_on_same_fields") @ignore_warnings(category=RemovedInDjango51Warning) @isolate_apps("schema") def test_remove_index_together_does_not_remove_meta_indexes(self): class AuthorWithIndexedNameAndBirthday(Model): name = CharField(max_length=255) birthday = DateField() class Meta: app_label = "schema" index_together = [["name", "birthday"]] with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedNameAndBirthday) self.isolated_local_models = [AuthorWithIndexedNameAndBirthday] # Add the custom index index = Index(fields=["name", "birthday"], name="author_name_birthday_idx") custom_index_name = index.name AuthorWithIndexedNameAndBirthday._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedNameAndBirthday, index) # Ensure the indexes exist constraints = self.get_constraints( AuthorWithIndexedNameAndBirthday._meta.db_table ) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["index"] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Remove index together index_together = AuthorWithIndexedNameAndBirthday._meta.index_together with connection.schema_editor() as editor: editor.alter_index_together( AuthorWithIndexedNameAndBirthday, index_together, [] ) constraints = self.get_constraints( AuthorWithIndexedNameAndBirthday._meta.db_table ) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["index"] and name != custom_index_name ] self.assertEqual(len(other_constraints), 0) # Re-add index together with connection.schema_editor() as editor: editor.alter_index_together( AuthorWithIndexedNameAndBirthday, [], index_together ) constraints = self.get_constraints( AuthorWithIndexedNameAndBirthday._meta.db_table ) self.assertIn(custom_index_name, constraints) other_constraints = [ name for name, details in constraints.items() if details["columns"] == ["name", "birthday"] and details["index"] and name != custom_index_name ] self.assertEqual(len(other_constraints), 1) # Drop the index with connection.schema_editor() as editor: AuthorWithIndexedNameAndBirthday._meta.indexes = [] editor.remove_index(AuthorWithIndexedNameAndBirthday, index) @isolate_apps("schema") def test_db_table(self): """ Tests renaming of the table """ class Author(Model): name = CharField(max_length=255) class Meta: app_label = "schema" class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = "schema" # Create the table and one referring it. with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there to begin with columns = self.column_classes(Author) self.assertEqual( columns["name"][0], connection.features.introspected_field_types["CharField"], ) # Alter the table with connection.schema_editor( atomic=connection.features.supports_atomic_references_rename ) as editor: editor.alter_db_table(Author, "schema_author", "schema_otherauthor") Author._meta.db_table = "schema_otherauthor" columns = self.column_classes(Author) self.assertEqual( columns["name"][0], connection.features.introspected_field_types["CharField"], ) # Ensure the foreign key reference was updated self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor") # Alter the table again with connection.schema_editor( atomic=connection.features.supports_atomic_references_rename ) as editor: editor.alter_db_table(Author, "schema_otherauthor", "schema_author") # Ensure the table is still there Author._meta.db_table = "schema_author" columns = self.column_classes(Author) self.assertEqual( columns["name"][0], connection.features.introspected_field_types["CharField"], ) def test_add_remove_index(self): """ Tests index addition and removal """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the table is there and has no index self.assertNotIn("title", self.get_indexes(Author._meta.db_table)) # Add the index index = Index(fields=["name"], name="author_title_idx") with connection.schema_editor() as editor: editor.add_index(Author, index) self.assertIn("name", self.get_indexes(Author._meta.db_table)) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn("name", self.get_indexes(Author._meta.db_table)) def test_remove_db_index_doesnt_remove_custom_indexes(self): """ Changing db_index to False doesn't remove indexes from Meta.indexes. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithIndexedName) self.local_models = [AuthorWithIndexedName] # Ensure the table has its index self.assertIn("name", self.get_indexes(AuthorWithIndexedName._meta.db_table)) # Add the custom index index = Index(fields=["-name"], name="author_name_idx") author_index_name = index.name with connection.schema_editor() as editor: db_index_name = editor._create_index_name( table_name=AuthorWithIndexedName._meta.db_table, column_names=("name",), ) try: AuthorWithIndexedName._meta.indexes = [index] with connection.schema_editor() as editor: editor.add_index(AuthorWithIndexedName, index) old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertIn(author_index_name, old_constraints) self.assertIn(db_index_name, old_constraints) # Change name field to db_index=False old_field = AuthorWithIndexedName._meta.get_field("name") new_field = CharField(max_length=255) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field( AuthorWithIndexedName, old_field, new_field, strict=True ) new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table) self.assertNotIn(db_index_name, new_constraints) # The index from Meta.indexes is still in the database. self.assertIn(author_index_name, new_constraints) # Drop the index with connection.schema_editor() as editor: editor.remove_index(AuthorWithIndexedName, index) finally: AuthorWithIndexedName._meta.indexes = [] def test_order_index(self): """ Indexes defined with ordering (ASC/DESC) defined on column """ with connection.schema_editor() as editor: editor.create_model(Author) # The table doesn't have an index self.assertNotIn("title", self.get_indexes(Author._meta.db_table)) index_name = "author_name_idx" # Add the index index = Index(fields=["name", "-weight"], name=index_name) with connection.schema_editor() as editor: editor.add_index(Author, index) if connection.features.supports_index_column_ordering: self.assertIndexOrder(Author._meta.db_table, index_name, ["ASC", "DESC"]) # Drop the index with connection.schema_editor() as editor: editor.remove_index(Author, index) def test_indexes(self): """ Tests creation/altering of indexes """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there and has the right index self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to remove the index old_field = Book._meta.get_field("title") new_field = CharField(max_length=100, db_index=False) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the table is there and has no index self.assertNotIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to re-add the index new_field2 = Book._meta.get_field("title") with connection.schema_editor() as editor: editor.alter_field(Book, new_field, new_field2, strict=True) # Ensure the table is there and has the index again self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Add a unique column, verify that creates an implicit index new_field3 = BookWithSlug._meta.get_field("slug") with connection.schema_editor() as editor: editor.add_field(Book, new_field3) self.assertIn( "slug", self.get_uniques(Book._meta.db_table), ) # Remove the unique, check the index goes with it new_field4 = CharField(max_length=20, unique=False) new_field4.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True) self.assertNotIn( "slug", self.get_uniques(Book._meta.db_table), ) def test_text_field_with_db_index(self): with connection.schema_editor() as editor: editor.create_model(AuthorTextFieldWithIndex) # The text_field index is present if the database supports it. assertion = ( self.assertIn if connection.features.supports_index_on_text_field else self.assertNotIn ) assertion( "text_field", self.get_indexes(AuthorTextFieldWithIndex._meta.db_table) ) def _index_expressions_wrappers(self): index_expression = IndexExpression() index_expression.set_wrapper_classes(connection) return ", ".join( [ wrapper_cls.__qualname__ for wrapper_cls in index_expression.wrapper_classes ] ) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_multiple_wrapper_references(self): index = Index(OrderBy(F("name").desc(), descending=True), name="name") msg = ( "Multiple references to %s can't be used in an indexed expression." % self._index_expressions_wrappers() ) with connection.schema_editor() as editor: with self.assertRaisesMessage(ValueError, msg): editor.add_index(Author, index) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_invalid_topmost_expressions(self): index = Index(Upper(F("name").desc()), name="name") msg = ( "%s must be topmost expressions in an indexed expression." % self._index_expressions_wrappers() ) with connection.schema_editor() as editor: with self.assertRaisesMessage(ValueError, msg): editor.add_index(Author, index) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index(self): with connection.schema_editor() as editor: editor.create_model(Author) index = Index(Lower("name").desc(), name="func_lower_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, index.name, ["DESC"]) # SQL contains a database function. self.assertIs(sql.references_column(table, "name"), True) self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql)) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_f(self): with connection.schema_editor() as editor: editor.create_model(Tag) index = Index("slug", F("title").desc(), name="func_f_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Tag, index) sql = index.create_sql(Tag, editor) table = Tag._meta.db_table self.assertIn(index.name, self.get_constraints(table)) if connection.features.supports_index_column_ordering: self.assertIndexOrder(Tag._meta.db_table, index.name, ["ASC", "DESC"]) # SQL contains columns. self.assertIs(sql.references_column(table, "slug"), True) self.assertIs(sql.references_column(table, "title"), True) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Tag, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_lookups(self): with connection.schema_editor() as editor: editor.create_model(Author) with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs): index = Index( F("name__lower"), F("weight__abs"), name="func_lower_abs_lookup_idx", ) # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table self.assertIn(index.name, self.get_constraints(table)) # SQL contains columns. self.assertIs(sql.references_column(table, "name"), True) self.assertIs(sql.references_column(table, "weight"), True) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_composite_func_index(self): with connection.schema_editor() as editor: editor.create_model(Author) index = Index(Lower("name"), Upper("name"), name="func_lower_upper_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table self.assertIn(index.name, self.get_constraints(table)) # SQL contains database functions. self.assertIs(sql.references_column(table, "name"), True) sql = str(sql) self.assertIn("LOWER(%s)" % editor.quote_name("name"), sql) self.assertIn("UPPER(%s)" % editor.quote_name("name"), sql) self.assertLess(sql.index("LOWER"), sql.index("UPPER")) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_composite_func_index_field_and_expression(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) index = Index( F("author").desc(), Lower("title").asc(), "pub_date", name="func_f_lower_field_idx", ) # Add index. with connection.schema_editor() as editor: editor.add_index(Book, index) sql = index.create_sql(Book, editor) table = Book._meta.db_table constraints = self.get_constraints(table) if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, index.name, ["DESC", "ASC", "ASC"]) self.assertEqual(len(constraints[index.name]["columns"]), 3) self.assertEqual(constraints[index.name]["columns"][2], "pub_date") # SQL contains database functions and columns. self.assertIs(sql.references_column(table, "author_id"), True) self.assertIs(sql.references_column(table, "title"), True) self.assertIs(sql.references_column(table, "pub_date"), True) self.assertIn("LOWER(%s)" % editor.quote_name("title"), str(sql)) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Book, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") @isolate_apps("schema") def test_func_index_f_decimalfield(self): class Node(Model): value = DecimalField(max_digits=5, decimal_places=2) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Node) index = Index(F("value"), name="func_f_decimalfield_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Node, index) sql = index.create_sql(Node, editor) table = Node._meta.db_table self.assertIn(index.name, self.get_constraints(table)) self.assertIs(sql.references_column(table, "value"), True) # SQL doesn't contain casting. self.assertNotIn("CAST", str(sql)) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Node, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_cast(self): with connection.schema_editor() as editor: editor.create_model(Author) index = Index(Cast("weight", FloatField()), name="func_cast_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table self.assertIn(index.name, self.get_constraints(table)) self.assertIs(sql.references_column(table, "weight"), True) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_collate(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("This backend does not support case-insensitive collations.") with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithSlug) index = Index( Collate(F("title"), collation=collation).desc(), Collate("slug", collation=collation), name="func_collate_idx", ) # Add index. with connection.schema_editor() as editor: editor.add_index(BookWithSlug, index) sql = index.create_sql(BookWithSlug, editor) table = Book._meta.db_table self.assertIn(index.name, self.get_constraints(table)) if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, index.name, ["DESC", "ASC"]) # SQL contains columns and a collation. self.assertIs(sql.references_column(table, "title"), True) self.assertIs(sql.references_column(table, "slug"), True) self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql)) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Book, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") @skipIfDBFeature("collate_as_index_expression") def test_func_index_collate_f_ordered(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("This backend does not support case-insensitive collations.") with connection.schema_editor() as editor: editor.create_model(Author) index = Index( Collate(F("name").desc(), collation=collation), name="func_collate_f_desc_idx", ) # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table self.assertIn(index.name, self.get_constraints(table)) if connection.features.supports_index_column_ordering: self.assertIndexOrder(table, index.name, ["DESC"]) # SQL contains columns and a collation. self.assertIs(sql.references_column(table, "name"), True) self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql)) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_calc(self): with connection.schema_editor() as editor: editor.create_model(Author) index = Index(F("height") / (F("weight") + Value(5)), name="func_calc_idx") # Add index. with connection.schema_editor() as editor: editor.add_index(Author, index) sql = index.create_sql(Author, editor) table = Author._meta.db_table self.assertIn(index.name, self.get_constraints(table)) # SQL contains columns and expressions. self.assertIs(sql.references_column(table, "height"), True) self.assertIs(sql.references_column(table, "weight"), True) sql = str(sql) self.assertIs( sql.index(editor.quote_name("height")) < sql.index("/") < sql.index(editor.quote_name("weight")) < sql.index("+") < sql.index("5"), True, ) # Remove index. with connection.schema_editor() as editor: editor.remove_index(Author, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes", "supports_json_field") @isolate_apps("schema") def test_func_index_json_key_transform(self): class JSONModel(Model): field = JSONField() class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(JSONModel) self.isolated_local_models = [JSONModel] index = Index("field__some_key", name="func_json_key_idx") with connection.schema_editor() as editor: editor.add_index(JSONModel, index) sql = index.create_sql(JSONModel, editor) table = JSONModel._meta.db_table self.assertIn(index.name, self.get_constraints(table)) self.assertIs(sql.references_column(table, "field"), True) with connection.schema_editor() as editor: editor.remove_index(JSONModel, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipUnlessDBFeature("supports_expression_indexes", "supports_json_field") @isolate_apps("schema") def test_func_index_json_key_transform_cast(self): class JSONModel(Model): field = JSONField() class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(JSONModel) self.isolated_local_models = [JSONModel] index = Index( Cast(KeyTextTransform("some_key", "field"), IntegerField()), name="func_json_key_cast_idx", ) with connection.schema_editor() as editor: editor.add_index(JSONModel, index) sql = index.create_sql(JSONModel, editor) table = JSONModel._meta.db_table self.assertIn(index.name, self.get_constraints(table)) self.assertIs(sql.references_column(table, "field"), True) with connection.schema_editor() as editor: editor.remove_index(JSONModel, index) self.assertNotIn(index.name, self.get_constraints(table)) @skipIfDBFeature("supports_expression_indexes") def test_func_index_unsupported(self): # Index is ignored on databases that don't support indexes on # expressions. with connection.schema_editor() as editor: editor.create_model(Author) index = Index(F("name"), name="random_idx") with connection.schema_editor() as editor, self.assertNumQueries(0): self.assertIsNone(editor.add_index(Author, index)) self.assertIsNone(editor.remove_index(Author, index)) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_nonexistent_field(self): index = Index(Lower("nonexistent"), name="func_nonexistent_idx") msg = ( "Cannot resolve keyword 'nonexistent' into field. Choices are: " "height, id, name, uuid, weight" ) with self.assertRaisesMessage(FieldError, msg): with connection.schema_editor() as editor: editor.add_index(Author, index) @skipUnlessDBFeature("supports_expression_indexes") def test_func_index_nondeterministic(self): with connection.schema_editor() as editor: editor.create_model(Author) index = Index(Random(), name="func_random_idx") with connection.schema_editor() as editor: with self.assertRaises(DatabaseError): editor.add_index(Author, index) def test_primary_key(self): """ Tests altering of the primary key """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the table is there and has the right PK self.assertEqual(self.get_primary_key(Tag._meta.db_table), "id") # Alter to change the PK id_field = Tag._meta.get_field("id") old_field = Tag._meta.get_field("slug") new_field = SlugField(primary_key=True) new_field.set_attributes_from_name("slug") new_field.model = Tag with connection.schema_editor() as editor: editor.remove_field(Tag, id_field) editor.alter_field(Tag, old_field, new_field) # Ensure the PK changed self.assertNotIn( "id", self.get_indexes(Tag._meta.db_table), ) self.assertEqual(self.get_primary_key(Tag._meta.db_table), "slug") def test_alter_primary_key_the_same_name(self): with connection.schema_editor() as editor: editor.create_model(Thing) old_field = Thing._meta.get_field("when") new_field = CharField(max_length=2, primary_key=True) new_field.set_attributes_from_name("when") new_field.model = Thing with connection.schema_editor() as editor: editor.alter_field(Thing, old_field, new_field, strict=True) self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when") with connection.schema_editor() as editor: editor.alter_field(Thing, new_field, old_field, strict=True) self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when") def test_context_manager_exit(self): """ Ensures transaction is correctly closed when an error occurs inside a SchemaEditor context. """ class SomeError(Exception): pass try: with connection.schema_editor(): raise SomeError except SomeError: self.assertFalse(connection.in_atomic_block) @skipIfDBFeature("can_rollback_ddl") def test_unsupported_transactional_ddl_disallowed(self): message = ( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) with atomic(), connection.schema_editor() as editor: with self.assertRaisesMessage(TransactionManagementError, message): editor.execute( editor.sql_create_table % {"table": "foo", "definition": ""} ) @skipUnlessDBFeature("supports_foreign_keys", "indexes_foreign_keys") def test_foreign_key_index_long_names_regression(self): """ Regression test for #21497. Only affects databases that supports foreign keys. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Find the properly shortened column name column_name = connection.ops.quote_name( "author_foreign_key_with_really_long_field_name_id" ) column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase # Ensure the table is there and has an index on the column self.assertIn( column_name, self.get_indexes(BookWithLongName._meta.db_table), ) @skipUnlessDBFeature("supports_foreign_keys") def test_add_foreign_key_long_names(self): """ Regression test for #23009. Only affects databases that supports foreign keys. """ # Create the initial tables with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Add a second FK, this would fail due to long ref name before the fix new_field = ForeignKey( AuthorWithEvenLongerName, CASCADE, related_name="something" ) new_field.set_attributes_from_name( "author_other_really_long_named_i_mean_so_long_fk" ) with connection.schema_editor() as editor: editor.add_field(BookWithLongName, new_field) @isolate_apps("schema") @skipUnlessDBFeature("supports_foreign_keys") def test_add_foreign_key_quoted_db_table(self): class Author(Model): class Meta: db_table = '"table_author_double_quoted"' app_label = "schema" class Book(Model): author = ForeignKey(Author, CASCADE) class Meta: app_label = "schema" with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) self.isolated_local_models = [Author] if connection.vendor == "mysql": self.assertForeignKeyExists( Book, "author_id", '"table_author_double_quoted"' ) else: self.assertForeignKeyExists(Book, "author_id", "table_author_double_quoted") def test_add_foreign_object(self): with connection.schema_editor() as editor: editor.create_model(BookForeignObj) self.local_models = [BookForeignObj] new_field = ForeignObject( Author, on_delete=CASCADE, from_fields=["author_id"], to_fields=["id"] ) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.add_field(BookForeignObj, new_field) def test_creation_deletion_reserved_names(self): """ Tries creating a model's table, and then deleting it when it has a SQL reserved name. """ # Create the table with connection.schema_editor() as editor: try: editor.create_model(Thing) except OperationalError as e: self.fail( "Errors when applying initial migration for a model " "with a table named after an SQL reserved word: %s" % e ) # The table is there list(Thing.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Thing) # The table is gone with self.assertRaises(DatabaseError): list(Thing.objects.all()) def test_remove_constraints_capital_letters(self): """ #23065 - Constraint names must be quoted if they contain capital letters. """ def get_field(*args, field_class=IntegerField, **kwargs): kwargs["db_column"] = "CamelCase" field = field_class(*args, **kwargs) field.set_attributes_from_name("CamelCase") return field model = Author field = get_field() table = model._meta.db_table column = field.column identifier_converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: editor.create_model(model) editor.add_field(model, field) constraint_name = "CamelCaseIndex" expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_index % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "using": "", "columns": editor.quote_name(column), "extra": "", "condition": "", "include": "", } ) self.assertIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) editor.alter_field(model, get_field(db_index=True), field, strict=True) self.assertNotIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) constraint_name = "CamelCaseUniqConstraint" expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor._create_unique_sql(model, [field], constraint_name)) self.assertIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) editor.alter_field(model, get_field(unique=True), field, strict=True) self.assertNotIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) if editor.sql_create_fk and connection.features.can_introspect_foreign_keys: constraint_name = "CamelCaseFKConstraint" expected_constraint_name = identifier_converter(constraint_name) editor.execute( editor.sql_create_fk % { "table": editor.quote_name(table), "name": editor.quote_name(constraint_name), "column": editor.quote_name(column), "to_table": editor.quote_name(table), "to_column": editor.quote_name(model._meta.auto_field.column), "deferrable": connection.ops.deferrable_sql(), } ) self.assertIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) editor.alter_field( model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True, ) self.assertNotIn( expected_constraint_name, self.get_constraints(model._meta.db_table) ) def test_add_field_use_effective_default(self): """ #23987 - effective_default() should be used as the field default when adding a new field. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name="Anonymous1") # Add new CharField to ensure default will be used from effective_default new_field = CharField(max_length=15, blank=True) new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual( item[0], None if connection.features.interprets_empty_strings_as_nulls else "", ) def test_add_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name="Anonymous1") # Add new CharField with a default new_field = CharField(max_length=15, blank=True, default="surname default") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], "surname default") # And that the default is no longer set in the database. field = next( f for f in connection.introspection.get_table_description( cursor, "schema_author" ) if f.name == "surname" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_add_field_default_nullable(self): with connection.schema_editor() as editor: editor.create_model(Author) # Add new nullable CharField with a default. new_field = CharField(max_length=15, blank=True, null=True, default="surname") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) Author.objects.create(name="Anonymous1") with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertIsNone(item[0]) field = next( f for f in connection.introspection.get_table_description( cursor, "schema_author", ) if f.name == "surname" ) # Field is still nullable. self.assertTrue(field.null_ok) # The database default is no longer set. if connection.features.can_introspect_default: self.assertIn(field.default, ["NULL", None]) def test_add_textfield_default_nullable(self): with connection.schema_editor() as editor: editor.create_model(Author) # Add new nullable TextField with a default. new_field = TextField(blank=True, null=True, default="text") new_field.set_attributes_from_name("description") with connection.schema_editor() as editor: editor.add_field(Author, new_field) Author.objects.create(name="Anonymous1") with connection.cursor() as cursor: cursor.execute("SELECT description FROM schema_author;") item = cursor.fetchall()[0] self.assertIsNone(item[0]) field = next( f for f in connection.introspection.get_table_description( cursor, "schema_author", ) if f.name == "description" ) # Field is still nullable. self.assertTrue(field.null_ok) # The database default is no longer set. if connection.features.can_introspect_default: self.assertIn(field.default, ["NULL", None]) def test_alter_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name="Anonymous1") self.assertIsNone(Author.objects.get().height) old_field = Author._meta.get_field("height") # The default from the new field is used in updating existing rows. new_field = IntegerField(blank=True, default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual(Author.objects.get().height, 42) # The database default should be removed. with connection.cursor() as cursor: field = next( f for f in connection.introspection.get_table_description( cursor, "schema_author" ) if f.name == "height" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_alter_field_default_doesnt_perform_queries(self): """ No queries are performed if a field default changes and the field's not changing from null to non-null. """ with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) old_field = AuthorWithDefaultHeight._meta.get_field("height") new_default = old_field.default * 2 new_field = PositiveIntegerField(null=True, blank=True, default=new_default) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field( AuthorWithDefaultHeight, old_field, new_field, strict=True ) @skipUnlessDBFeature("supports_foreign_keys") def test_alter_field_fk_attributes_noop(self): """ No queries are performed when changing field attributes that don't affect the schema. """ with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) old_field = Book._meta.get_field("author") new_field = ForeignKey( Author, blank=True, editable=False, error_messages={"invalid": "error message"}, help_text="help text", limit_choices_to={"limit": "choice"}, on_delete=PROTECT, related_name="related_name", related_query_name="related_query_name", validators=[lambda x: x], verbose_name="verbose name", ) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(Book, old_field, new_field, strict=True) with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(Book, new_field, old_field, strict=True) def test_alter_field_choices_noop(self): with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("name") new_field = CharField( choices=(("Jane", "Jane"), ("Joe", "Joe")), max_length=255, ) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(Author, old_field, new_field, strict=True) with connection.schema_editor() as editor, self.assertNumQueries(0): editor.alter_field(Author, new_field, old_field, strict=True) def test_add_textfield_unhashable_default(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name="Anonymous1") # Create a field that has an unhashable default new_field = TextField(default={}) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.add_field(Author, new_field) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_add_indexed_charfield(self): field = CharField(max_length=255, db_index=True) field.set_attributes_from_name("nom_de_plume") with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, "nom_de_plume"), [ "schema_author_nom_de_plume_7570a851", "schema_author_nom_de_plume_7570a851_like", ], ) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_add_unique_charfield(self): field = CharField(max_length=255, unique=True) field.set_attributes_from_name("nom_de_plume") with connection.schema_editor() as editor: editor.create_model(Author) editor.add_field(Author, field) # Should create two indexes; one for like operator. self.assertEqual( self.get_constraints_for_column(Author, "nom_de_plume"), [ "schema_author_nom_de_plume_7570a851_like", "schema_author_nom_de_plume_key", ], ) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_add_index_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, "name"), []) # Alter to add db_index=True and create 2 indexes. old_field = Author._meta.get_field("name") new_field = CharField(max_length=255, db_index=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, "name"), ["schema_author_name_1fbc5617", "schema_author_name_1fbc5617_like"], ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, "name"), []) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_add_unique_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, "name"), []) # Alter to add unique=True and create 2 indexes. old_field = Author._meta.get_field("name") new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, "name"), ["schema_author_name_1fbc5617_like", "schema_author_name_1fbc5617_uniq"], ) # Remove unique=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, "name"), []) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_add_index_to_textfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Note) self.assertEqual(self.get_constraints_for_column(Note, "info"), []) # Alter to add db_index=True and create 2 indexes. old_field = Note._meta.get_field("info") new_field = TextField(db_index=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Note, "info"), ["schema_note_info_4b0ea695", "schema_note_info_4b0ea695_like"], ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Note, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Note, "info"), []) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_add_unique_to_charfield_with_db_index(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"], ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field("title") new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"], ) # Alter to remove unique=True (should drop unique index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"], ) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_remove_unique_and_db_index_from_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"], ) # Alter to add unique=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field("title") new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"], ) # Alter to remove both unique=True and db_index=True (should drop all indexes) new_field2 = CharField(max_length=100) new_field2.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), [] ) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_swap_unique_and_db_index_with_charfield(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"], ) # Alter to set unique=True and remove db_index=True (should replace the index) old_field = BookWithoutAuthor._meta.get_field("title") new_field = CharField(max_length=100, unique=True) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"], ) # Alter to set db_index=True and remove unique=True (should restore index) new_field2 = CharField(max_length=100, db_index=True) new_field2.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, "title"), ["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"], ) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific") def test_alter_field_add_db_index_to_charfield_with_unique(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(Tag) self.assertEqual( self.get_constraints_for_column(Tag, "slug"), ["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"], ) # Alter to add db_index=True old_field = Tag._meta.get_field("slug") new_field = SlugField(db_index=True, unique=True) new_field.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, "slug"), ["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"], ) # Alter to remove db_index=True new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, "slug"), ["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"], ) def test_alter_field_add_index_to_integerfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, "weight"), []) # Alter to add db_index=True and create index. old_field = Author._meta.get_field("weight") new_field = IntegerField(null=True, db_index=True) new_field.set_attributes_from_name("weight") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, "weight"), ["schema_author_weight_587740f9"], ) # Remove db_index=True to drop index. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, "weight"), []) def test_alter_pk_with_self_referential_field(self): """ Changing the primary key field name of a model with a self-referential foreign key (#26384). """ with connection.schema_editor() as editor: editor.create_model(Node) old_field = Node._meta.get_field("node_id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") with connection.schema_editor() as editor: editor.alter_field(Node, old_field, new_field, strict=True) self.assertForeignKeyExists(Node, "parent_id", Node._meta.db_table) @mock.patch("django.db.backends.base.schema.datetime") @mock.patch("django.db.backends.base.schema.timezone") def test_add_datefield_and_datetimefield_use_effective_default( self, mocked_datetime, mocked_tz ): """ effective_default() should be used for DateField, DateTimeField, and TimeField if auto_now or auto_now_add is set (#25005). """ now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1) now_tz = datetime.datetime( month=1, day=1, year=2000, hour=1, minute=1, tzinfo=datetime.timezone.utc ) mocked_datetime.now = mock.MagicMock(return_value=now) mocked_tz.now = mock.MagicMock(return_value=now_tz) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Check auto_now/auto_now_add attributes are not defined columns = self.column_classes(Author) self.assertNotIn("dob_auto_now", columns) self.assertNotIn("dob_auto_now_add", columns) self.assertNotIn("dtob_auto_now", columns) self.assertNotIn("dtob_auto_now_add", columns) self.assertNotIn("tob_auto_now", columns) self.assertNotIn("tob_auto_now_add", columns) # Create a row Author.objects.create(name="Anonymous1") # Ensure fields were added with the correct defaults dob_auto_now = DateField(auto_now=True) dob_auto_now.set_attributes_from_name("dob_auto_now") self.check_added_field_default( editor, Author, dob_auto_now, "dob_auto_now", now.date(), cast_function=lambda x: x.date(), ) dob_auto_now_add = DateField(auto_now_add=True) dob_auto_now_add.set_attributes_from_name("dob_auto_now_add") self.check_added_field_default( editor, Author, dob_auto_now_add, "dob_auto_now_add", now.date(), cast_function=lambda x: x.date(), ) dtob_auto_now = DateTimeField(auto_now=True) dtob_auto_now.set_attributes_from_name("dtob_auto_now") self.check_added_field_default( editor, Author, dtob_auto_now, "dtob_auto_now", now, ) dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True) dt_tm_of_birth_auto_now_add.set_attributes_from_name("dtob_auto_now_add") self.check_added_field_default( editor, Author, dt_tm_of_birth_auto_now_add, "dtob_auto_now_add", now, ) tob_auto_now = TimeField(auto_now=True) tob_auto_now.set_attributes_from_name("tob_auto_now") self.check_added_field_default( editor, Author, tob_auto_now, "tob_auto_now", now.time(), cast_function=lambda x: x.time(), ) tob_auto_now_add = TimeField(auto_now_add=True) tob_auto_now_add.set_attributes_from_name("tob_auto_now_add") self.check_added_field_default( editor, Author, tob_auto_now_add, "tob_auto_now_add", now.time(), cast_function=lambda x: x.time(), ) def test_namespaced_db_table_create_index_name(self): """ Table names are stripped of their namespace/schema before being used to generate index names. """ with connection.schema_editor() as editor: max_name_length = connection.ops.max_name_length() or 200 namespace = "n" * max_name_length table_name = "t" * max_name_length namespaced_table_name = '"%s"."%s"' % (namespace, table_name) self.assertEqual( editor._create_index_name(table_name, []), editor._create_index_name(namespaced_table_name, []), ) @unittest.skipUnless( connection.vendor == "oracle", "Oracle specific db_table syntax" ) def test_creation_with_db_table_double_quotes(self): oracle_user = connection.creation._test_database_user() class Student(Model): name = CharField(max_length=30) class Meta: app_label = "schema" apps = new_apps db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user class Document(Model): name = CharField(max_length=30) students = ManyToManyField(Student) class Meta: app_label = "schema" apps = new_apps db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user self.isolated_local_models = [Student, Document] with connection.schema_editor() as editor: editor.create_model(Student) editor.create_model(Document) doc = Document.objects.create(name="Test Name") student = Student.objects.create(name="Some man") doc.students.add(student) @isolate_apps("schema") @unittest.skipUnless( connection.vendor == "postgresql", "PostgreSQL specific db_table syntax." ) def test_namespaced_db_table_foreign_key_reference(self): with connection.cursor() as cursor: cursor.execute("CREATE SCHEMA django_schema_tests") def delete_schema(): with connection.cursor() as cursor: cursor.execute("DROP SCHEMA django_schema_tests CASCADE") self.addCleanup(delete_schema) class Author(Model): class Meta: app_label = "schema" class Book(Model): class Meta: app_label = "schema" db_table = '"django_schema_tests"."schema_book"' author = ForeignKey(Author, CASCADE) author.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) editor.add_field(Book, author) def test_rename_table_renames_deferred_sql_references(self): atomic_rename = connection.features.supports_atomic_references_rename with connection.schema_editor(atomic=atomic_rename) as editor: editor.create_model(Author) editor.create_model(Book) editor.alter_db_table(Author, "schema_author", "schema_renamed_author") editor.alter_db_table(Author, "schema_book", "schema_renamed_book") try: self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_table("schema_author"), False) self.assertIs(statement.references_table("schema_book"), False) finally: editor.alter_db_table(Author, "schema_renamed_author", "schema_author") editor.alter_db_table(Author, "schema_renamed_book", "schema_book") def test_rename_column_renames_deferred_sql_references(self): with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) old_title = Book._meta.get_field("title") new_title = CharField(max_length=100, db_index=True) new_title.set_attributes_from_name("renamed_title") editor.alter_field(Book, old_title, new_title) old_author = Book._meta.get_field("author") new_author = ForeignKey(Author, CASCADE) new_author.set_attributes_from_name("renamed_author") editor.alter_field(Book, old_author, new_author) self.assertGreater(len(editor.deferred_sql), 0) for statement in editor.deferred_sql: self.assertIs(statement.references_column("book", "title"), False) self.assertIs(statement.references_column("book", "author_id"), False) @isolate_apps("schema") def test_referenced_field_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the field they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = "schema" class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False) class Meta: app_label = "schema" self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name("renamed") with connection.schema_editor(atomic=True) as editor: editor.alter_field(Foo, Foo._meta.get_field("field"), new_field) @isolate_apps("schema") def test_referenced_table_without_constraint_rename_inside_atomic_block(self): """ Foreign keys without database level constraint don't prevent the table they reference from being renamed in an atomic block. """ class Foo(Model): field = CharField(max_length=255, unique=True) class Meta: app_label = "schema" class Bar(Model): foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False) class Meta: app_label = "schema" self.isolated_local_models = [Foo, Bar] with connection.schema_editor() as editor: editor.create_model(Foo) editor.create_model(Bar) new_field = CharField(max_length=255, unique=True) new_field.set_attributes_from_name("renamed") with connection.schema_editor(atomic=True) as editor: editor.alter_db_table(Foo, Foo._meta.db_table, "renamed_table") Foo._meta.db_table = "renamed_table" @isolate_apps("schema") @skipUnlessDBFeature("supports_collation_on_charfield") def test_db_collation_charfield(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") class Foo(Model): field = CharField(max_length=255, db_collation=collation) class Meta: app_label = "schema" self.isolated_local_models = [Foo] with connection.schema_editor() as editor: editor.create_model(Foo) self.assertEqual( self.get_column_collation(Foo._meta.db_table, "field"), collation, ) @isolate_apps("schema") @skipUnlessDBFeature("supports_collation_on_textfield") def test_db_collation_textfield(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") class Foo(Model): field = TextField(db_collation=collation) class Meta: app_label = "schema" self.isolated_local_models = [Foo] with connection.schema_editor() as editor: editor.create_model(Foo) self.assertEqual( self.get_column_collation(Foo._meta.db_table, "field"), collation, ) @skipUnlessDBFeature("supports_collation_on_charfield") def test_add_field_db_collation(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") with connection.schema_editor() as editor: editor.create_model(Author) new_field = CharField(max_length=255, db_collation=collation) new_field.set_attributes_from_name("alias") with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) self.assertEqual( columns["alias"][0], connection.features.introspected_field_types["CharField"], ) self.assertEqual(columns["alias"][1][8], collation) @skipUnlessDBFeature("supports_collation_on_charfield") def test_alter_field_db_collation(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("name") new_field = CharField(max_length=255, db_collation=collation) new_field.set_attributes_from_name("name") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_column_collation(Author._meta.db_table, "name"), collation, ) with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertIsNone(self.get_column_collation(Author._meta.db_table, "name")) @skipUnlessDBFeature("supports_collation_on_charfield") def test_alter_primary_key_db_collation(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") with connection.schema_editor() as editor: editor.create_model(Thing) old_field = Thing._meta.get_field("when") new_field = CharField(max_length=1, db_collation=collation, primary_key=True) new_field.set_attributes_from_name("when") new_field.model = Thing with connection.schema_editor() as editor: editor.alter_field(Thing, old_field, new_field, strict=True) self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when") self.assertEqual( self.get_column_collation(Thing._meta.db_table, "when"), collation, ) with connection.schema_editor() as editor: editor.alter_field(Thing, new_field, old_field, strict=True) self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when") self.assertIsNone(self.get_column_collation(Thing._meta.db_table, "when")) @skipUnlessDBFeature( "supports_collation_on_charfield", "supports_collation_on_textfield" ) def test_alter_field_type_and_db_collation(self): collation = connection.features.test_collations.get("non_default") if not collation: self.skipTest("Language collations are not supported.") with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("info") new_field = CharField(max_length=255, db_collation=collation) new_field.set_attributes_from_name("info") new_field.model = Note with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual( columns["info"][0], connection.features.introspected_field_types["CharField"], ) self.assertEqual(columns["info"][1][8], collation) with connection.schema_editor() as editor: editor.alter_field(Note, new_field, old_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns["info"][0], "TextField") self.assertIsNone(columns["info"][1][8]) @skipUnlessDBFeature( "supports_collation_on_charfield", "supports_non_deterministic_collations", ) def test_ci_cs_db_collation(self): cs_collation = connection.features.test_collations.get("cs") ci_collation = connection.features.test_collations.get("ci") try: if connection.vendor == "mysql": cs_collation = "latin1_general_cs" elif connection.vendor == "postgresql": cs_collation = "en-x-icu" with connection.cursor() as cursor: cursor.execute( "CREATE COLLATION IF NOT EXISTS case_insensitive " "(provider = icu, locale = 'und-u-ks-level2', " "deterministic = false)" ) ci_collation = "case_insensitive" # Create the table. with connection.schema_editor() as editor: editor.create_model(Author) # Case-insensitive collation. old_field = Author._meta.get_field("name") new_field_ci = CharField(max_length=255, db_collation=ci_collation) new_field_ci.set_attributes_from_name("name") new_field_ci.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field_ci, strict=True) Author.objects.create(name="ANDREW") self.assertIs(Author.objects.filter(name="Andrew").exists(), True) # Case-sensitive collation. new_field_cs = CharField(max_length=255, db_collation=cs_collation) new_field_cs.set_attributes_from_name("name") new_field_cs.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, new_field_ci, new_field_cs, strict=True) self.assertIs(Author.objects.filter(name="Andrew").exists(), False) finally: if connection.vendor == "postgresql": with connection.cursor() as cursor: cursor.execute("DROP COLLATION IF EXISTS case_insensitive")
7e5b8f837e080b14adda9e1af5a13139edcb4242a0693bfcb10aba03d36c32cf
from unittest.mock import ANY from django.core import checks from django.core.checks.migrations import check_migration_operations from django.db import migrations from django.db.migrations.operations.base import Operation from django.test import TestCase from django.test.utils import override_settings class DeprecatedMigrationOperationTests(TestCase): def test_default_operation(self): class MyOperation(Operation): system_check_deprecated_details = {} my_operation = MyOperation() class Migration(migrations.Migration): operations = [my_operation] self.assertEqual( Migration("name", "app_label").check(), [ checks.Warning( msg="MyOperation has been deprecated.", obj=my_operation, id="migrations.WXXX", ) ], ) def test_user_specified_details(self): class MyOperation(Operation): system_check_deprecated_details = { "msg": "This operation is deprecated and will be removed soon.", "hint": "Use something else.", "id": "migrations.W999", } my_operation = MyOperation() class Migration(migrations.Migration): operations = [my_operation] self.assertEqual( Migration("name", "app_label").check(), [ checks.Warning( msg="This operation is deprecated and will be removed soon.", obj=my_operation, hint="Use something else.", id="migrations.W999", ) ], ) @override_settings( INSTALLED_APPS=["check_framework.migrations_test_apps.index_together_app"] ) def tests_check_alter_index_together(self): errors = check_migration_operations() self.assertEqual( errors, [ checks.Warning( "AlterIndexTogether is deprecated. Support for it (except in " "historical migrations) will be removed in Django 5.1.", obj=ANY, id="migrations.W001", ) ], ) class RemovedMigrationOperationTests(TestCase): def test_default_operation(self): class MyOperation(Operation): system_check_removed_details = {} my_operation = MyOperation() class Migration(migrations.Migration): operations = [my_operation] self.assertEqual( Migration("name", "app_label").check(), [ checks.Error( msg=( "MyOperation has been removed except for support in historical " "migrations." ), obj=my_operation, id="migrations.EXXX", ) ], ) def test_user_specified_details(self): class MyOperation(Operation): system_check_removed_details = { "msg": "Support for this operation is gone.", "hint": "Use something else.", "id": "migrations.E999", } my_operation = MyOperation() class Migration(migrations.Migration): operations = [my_operation] self.assertEqual( Migration("name", "app_label").check(), [ checks.Error( msg="Support for this operation is gone.", obj=my_operation, hint="Use something else.", id="migrations.E999", ) ], )
f27a1b4224823c983f8952f37b3ad09cd845dc42c4c9c483908bd9b7f09f7631
from django.contrib import admin from django.contrib.auth.models import User as AuthUser from django.contrib.contenttypes.models import ContentType from django.core import checks, management from django.db import DEFAULT_DB_ALIAS, models from django.db.models import signals from django.test import TestCase, override_settings from django.test.utils import isolate_apps from django.urls import reverse from .admin import admin as force_admin_model_registration # NOQA from .models import ( Abstract, BaseUser, Bug, Country, Improvement, Issue, LowerStatusPerson, MultiUserProxy, MyPerson, MyPersonProxy, OtherPerson, Person, ProxyBug, ProxyImprovement, ProxyProxyBug, ProxyTrackerUser, State, StateProxy, StatusPerson, TrackerUser, User, UserProxy, UserProxyProxy, ) class ProxyModelTests(TestCase): def test_same_manager_queries(self): """ The MyPerson model should be generating the same database queries as the Person model (when the same manager is used in each case). """ my_person_sql = ( MyPerson.other.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql() ) person_sql = ( Person.objects.order_by("name") .query.get_compiler(DEFAULT_DB_ALIAS) .as_sql() ) self.assertEqual(my_person_sql, person_sql) def test_inheritance_new_table(self): """ The StatusPerson models should have its own table (it's using ORM-level inheritance). """ sp_sql = ( StatusPerson.objects.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql() ) p_sql = Person.objects.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql() self.assertNotEqual(sp_sql, p_sql) def test_basic_proxy(self): """ Creating a Person makes them accessible through the MyPerson proxy. """ person = Person.objects.create(name="Foo McBar") self.assertEqual(len(Person.objects.all()), 1) self.assertEqual(len(MyPerson.objects.all()), 1) self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id) self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name()) def test_no_proxy(self): """ Person is not proxied by StatusPerson subclass. """ Person.objects.create(name="Foo McBar") self.assertEqual(list(StatusPerson.objects.all()), []) def test_basic_proxy_reverse(self): """ A new MyPerson also shows up as a standard Person. """ MyPerson.objects.create(name="Bazza del Frob") self.assertEqual(len(MyPerson.objects.all()), 1) self.assertEqual(len(Person.objects.all()), 1) LowerStatusPerson.objects.create(status="low", name="homer") lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()] self.assertEqual(lsps, ["homer"]) def test_correct_type_proxy_of_proxy(self): """ Correct type when querying a proxy of proxy """ Person.objects.create(name="Foo McBar") MyPerson.objects.create(name="Bazza del Frob") LowerStatusPerson.objects.create(status="low", name="homer") pp = sorted(mpp.name for mpp in MyPersonProxy.objects.all()) self.assertEqual(pp, ["Bazza del Frob", "Foo McBar", "homer"]) def test_proxy_included_in_ancestors(self): """ Proxy models are included in the ancestors for a model's DoesNotExist and MultipleObjectsReturned """ Person.objects.create(name="Foo McBar") MyPerson.objects.create(name="Bazza del Frob") LowerStatusPerson.objects.create(status="low", name="homer") max_id = Person.objects.aggregate(max_id=models.Max("id"))["max_id"] with self.assertRaises(Person.DoesNotExist): MyPersonProxy.objects.get(name="Zathras") with self.assertRaises(Person.MultipleObjectsReturned): MyPersonProxy.objects.get(id__lt=max_id + 1) with self.assertRaises(Person.DoesNotExist): StatusPerson.objects.get(name="Zathras") StatusPerson.objects.create(name="Bazza Jr.") StatusPerson.objects.create(name="Foo Jr.") max_id = Person.objects.aggregate(max_id=models.Max("id"))["max_id"] with self.assertRaises(Person.MultipleObjectsReturned): StatusPerson.objects.get(id__lt=max_id + 1) def test_abstract_base_with_model_fields(self): msg = ( "Abstract base class containing model fields not permitted for proxy model " "'NoAbstract'." ) with self.assertRaisesMessage(TypeError, msg): class NoAbstract(Abstract): class Meta: proxy = True def test_too_many_concrete_classes(self): msg = ( "Proxy model 'TooManyBases' has more than one non-abstract model base " "class." ) with self.assertRaisesMessage(TypeError, msg): class TooManyBases(User, Person): class Meta: proxy = True def test_no_base_classes(self): msg = "Proxy model 'NoBaseClasses' has no non-abstract model base class." with self.assertRaisesMessage(TypeError, msg): class NoBaseClasses(models.Model): class Meta: proxy = True @isolate_apps("proxy_models") def test_new_fields(self): class NoNewFields(Person): newfield = models.BooleanField() class Meta: proxy = True errors = NoNewFields.check() expected = [ checks.Error( "Proxy model 'NoNewFields' contains model fields.", id="models.E017", ) ] self.assertEqual(errors, expected) @override_settings(TEST_SWAPPABLE_MODEL="proxy_models.AlternateModel") @isolate_apps("proxy_models") def test_swappable(self): class SwappableModel(models.Model): class Meta: swappable = "TEST_SWAPPABLE_MODEL" class AlternateModel(models.Model): pass # You can't proxy a swapped model with self.assertRaises(TypeError): class ProxyModel(SwappableModel): class Meta: proxy = True def test_myperson_manager(self): Person.objects.create(name="fred") Person.objects.create(name="wilma") Person.objects.create(name="barney") resp = [p.name for p in MyPerson.objects.all()] self.assertEqual(resp, ["barney", "fred"]) resp = [p.name for p in MyPerson._default_manager.all()] self.assertEqual(resp, ["barney", "fred"]) def test_otherperson_manager(self): Person.objects.create(name="fred") Person.objects.create(name="wilma") Person.objects.create(name="barney") resp = [p.name for p in OtherPerson.objects.all()] self.assertEqual(resp, ["barney", "wilma"]) resp = [p.name for p in OtherPerson.excluder.all()] self.assertEqual(resp, ["barney", "fred"]) resp = [p.name for p in OtherPerson._default_manager.all()] self.assertEqual(resp, ["barney", "wilma"]) def test_permissions_created(self): from django.contrib.auth.models import Permission Permission.objects.get(name="May display users information") def test_proxy_model_signals(self): """ Test save signals for proxy models """ output = [] def make_handler(model, event): def _handler(*args, **kwargs): output.append("%s %s save" % (model, event)) return _handler h1 = make_handler("MyPerson", "pre") h2 = make_handler("MyPerson", "post") h3 = make_handler("Person", "pre") h4 = make_handler("Person", "post") signals.pre_save.connect(h1, sender=MyPerson) signals.post_save.connect(h2, sender=MyPerson) signals.pre_save.connect(h3, sender=Person) signals.post_save.connect(h4, sender=Person) MyPerson.objects.create(name="dino") self.assertEqual(output, ["MyPerson pre save", "MyPerson post save"]) output = [] h5 = make_handler("MyPersonProxy", "pre") h6 = make_handler("MyPersonProxy", "post") signals.pre_save.connect(h5, sender=MyPersonProxy) signals.post_save.connect(h6, sender=MyPersonProxy) MyPersonProxy.objects.create(name="pebbles") self.assertEqual(output, ["MyPersonProxy pre save", "MyPersonProxy post save"]) signals.pre_save.disconnect(h1, sender=MyPerson) signals.post_save.disconnect(h2, sender=MyPerson) signals.pre_save.disconnect(h3, sender=Person) signals.post_save.disconnect(h4, sender=Person) signals.pre_save.disconnect(h5, sender=MyPersonProxy) signals.post_save.disconnect(h6, sender=MyPersonProxy) def test_content_type(self): ctype = ContentType.objects.get_for_model self.assertIs(ctype(Person), ctype(OtherPerson)) def test_user_proxy_models(self): User.objects.create(name="Bruce") resp = [u.name for u in User.objects.all()] self.assertEqual(resp, ["Bruce"]) resp = [u.name for u in UserProxy.objects.all()] self.assertEqual(resp, ["Bruce"]) resp = [u.name for u in UserProxyProxy.objects.all()] self.assertEqual(resp, ["Bruce"]) self.assertEqual([u.name for u in MultiUserProxy.objects.all()], ["Bruce"]) def test_proxy_for_model(self): self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model) def test_concrete_model(self): self.assertEqual(User, UserProxyProxy._meta.concrete_model) def test_proxy_delete(self): """ Proxy objects can be deleted """ User.objects.create(name="Bruce") u2 = UserProxy.objects.create(name="George") resp = [u.name for u in UserProxy.objects.all()] self.assertEqual(resp, ["Bruce", "George"]) u2.delete() resp = [u.name for u in UserProxy.objects.all()] self.assertEqual(resp, ["Bruce"]) def test_proxy_update(self): user = User.objects.create(name="Bruce") with self.assertNumQueries(1): UserProxy.objects.filter(id=user.id).update(name="George") user.refresh_from_db() self.assertEqual(user.name, "George") def test_select_related(self): """ We can still use `select_related()` to include related models in our querysets. """ country = Country.objects.create(name="Australia") State.objects.create(name="New South Wales", country=country) resp = [s.name for s in State.objects.select_related()] self.assertEqual(resp, ["New South Wales"]) resp = [s.name for s in StateProxy.objects.select_related()] self.assertEqual(resp, ["New South Wales"]) self.assertEqual( StateProxy.objects.get(name="New South Wales").name, "New South Wales" ) resp = StateProxy.objects.select_related().get(name="New South Wales") self.assertEqual(resp.name, "New South Wales") def test_filter_proxy_relation_reverse(self): tu = TrackerUser.objects.create(name="Contributor", status="contrib") ptu = ProxyTrackerUser.objects.get() issue = Issue.objects.create(assignee=tu) self.assertEqual(tu.issues.get(), issue) self.assertEqual(ptu.issues.get(), issue) self.assertSequenceEqual(TrackerUser.objects.filter(issues=issue), [tu]) self.assertSequenceEqual(ProxyTrackerUser.objects.filter(issues=issue), [ptu]) def test_proxy_bug(self): contributor = ProxyTrackerUser.objects.create( name="Contributor", status="contrib" ) someone = BaseUser.objects.create(name="Someone") Bug.objects.create( summary="fix this", version="1.1beta", assignee=contributor, reporter=someone, ) pcontributor = ProxyTrackerUser.objects.create( name="OtherContributor", status="proxy" ) Improvement.objects.create( summary="improve that", version="1.1beta", assignee=contributor, reporter=pcontributor, associated_bug=ProxyProxyBug.objects.all()[0], ) # Related field filter on proxy resp = ProxyBug.objects.get(version__icontains="beta") self.assertEqual(repr(resp), "<ProxyBug: ProxyBug:fix this>") # Select related + filter on proxy resp = ProxyBug.objects.select_related().get(version__icontains="beta") self.assertEqual(repr(resp), "<ProxyBug: ProxyBug:fix this>") # Proxy of proxy, select_related + filter resp = ProxyProxyBug.objects.select_related().get(version__icontains="beta") self.assertEqual(repr(resp), "<ProxyProxyBug: ProxyProxyBug:fix this>") # Select related + filter on a related proxy field resp = ProxyImprovement.objects.select_related().get( reporter__name__icontains="butor" ) self.assertEqual( repr(resp), "<ProxyImprovement: ProxyImprovement:improve that>" ) # Select related + filter on a related proxy of proxy field resp = ProxyImprovement.objects.select_related().get( associated_bug__summary__icontains="fix" ) self.assertEqual( repr(resp), "<ProxyImprovement: ProxyImprovement:improve that>" ) def test_proxy_load_from_fixture(self): management.call_command("loaddata", "mypeople.json", verbosity=0) p = MyPerson.objects.get(pk=100) self.assertEqual(p.name, "Elvis Presley") def test_select_related_only(self): user = ProxyTrackerUser.objects.create(name="Joe Doe", status="test") issue = Issue.objects.create(summary="New issue", assignee=user) qs = Issue.objects.select_related("assignee").only("assignee__status") self.assertEqual(qs.get(), issue) def test_eq(self): self.assertEqual(MyPerson(id=100), Person(id=100)) @override_settings(ROOT_URLCONF="proxy_models.urls") class ProxyModelAdminTests(TestCase): @classmethod def setUpTestData(cls): cls.superuser = AuthUser.objects.create(is_superuser=True, is_staff=True) cls.tu1 = ProxyTrackerUser.objects.create(name="Django Pony", status="emperor") cls.i1 = Issue.objects.create(summary="Pony's Issue", assignee=cls.tu1) def test_cascade_delete_proxy_model_admin_warning(self): """ Test if admin gives warning about cascade deleting models referenced to concrete model by deleting proxy object. """ tracker_user = TrackerUser.objects.all()[0] base_user = BaseUser.objects.all()[0] issue = Issue.objects.all()[0] with self.assertNumQueries(6): collector = admin.utils.NestedObjects("default") collector.collect(ProxyTrackerUser.objects.all()) self.assertIn(tracker_user, collector.edges.get(None, ())) self.assertIn(base_user, collector.edges.get(None, ())) self.assertIn(issue, collector.edges.get(tracker_user, ())) def test_delete_str_in_model_admin(self): """ Test if the admin delete page shows the correct string representation for a proxy model. """ user = TrackerUser.objects.get(name="Django Pony") proxy = ProxyTrackerUser.objects.get(name="Django Pony") user_str = 'Tracker user: <a href="%s">%s</a>' % ( reverse("admin_proxy:proxy_models_trackeruser_change", args=(user.pk,)), user, ) proxy_str = 'Proxy tracker user: <a href="%s">%s</a>' % ( reverse( "admin_proxy:proxy_models_proxytrackeruser_change", args=(proxy.pk,) ), proxy, ) self.client.force_login(self.superuser) response = self.client.get( reverse("admin_proxy:proxy_models_trackeruser_delete", args=(user.pk,)) ) delete_str = response.context["deleted_objects"][0] self.assertEqual(delete_str, user_str) response = self.client.get( reverse( "admin_proxy:proxy_models_proxytrackeruser_delete", args=(proxy.pk,) ) ) delete_str = response.context["deleted_objects"][0] self.assertEqual(delete_str, proxy_str)
645009788763f468ad424a52c347664aed449fbb7675ff08b2e2424269cd18a3
import unittest from django.core.checks import Error, Warning from django.core.checks.model_checks import _check_lazy_references from django.db import connection, connections, models from django.db.models.functions import Abs, Lower, Round from django.db.models.signals import post_init from django.test import SimpleTestCase, TestCase, ignore_warnings, skipUnlessDBFeature from django.test.utils import isolate_apps, override_settings, register_lookup from django.utils.deprecation import RemovedInDjango51Warning class EmptyRouter: pass def get_max_column_name_length(): allowed_len = None db_alias = None for db in ("default", "other"): connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is not None and not connection.features.truncates_names: if allowed_len is None or max_name_length < allowed_len: allowed_len = max_name_length db_alias = db return (allowed_len, db_alias) @isolate_apps("invalid_models_tests") @ignore_warnings(category=RemovedInDjango51Warning) class IndexTogetherTests(SimpleTestCase): def test_non_iterable(self): class Model(models.Model): class Meta: index_together = 42 self.assertEqual( Model.check(), [ Error( "'index_together' must be a list or tuple.", obj=Model, id="models.E008", ), ], ) def test_non_list(self): class Model(models.Model): class Meta: index_together = "not-a-list" self.assertEqual( Model.check(), [ Error( "'index_together' must be a list or tuple.", obj=Model, id="models.E008", ), ], ) def test_list_containing_non_iterable(self): class Model(models.Model): class Meta: index_together = [("a", "b"), 42] self.assertEqual( Model.check(), [ Error( "All 'index_together' elements must be lists or tuples.", obj=Model, id="models.E009", ), ], ) def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: index_together = [["missing_field"]] self.assertEqual( Model.check(), [ Error( "'index_together' refers to the nonexistent field 'missing_field'.", obj=Model, id="models.E012", ), ], ) def test_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.IntegerField() class Bar(Foo): field2 = models.IntegerField() class Meta: index_together = [["field2", "field1"]] self.assertEqual( Bar.check(), [ Error( "'index_together' refers to field 'field1' which is not " "local to model 'Bar'.", hint="This issue may be caused by multi-table inheritance.", obj=Bar, id="models.E016", ), ], ) def test_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField("self") class Meta: index_together = [["m2m"]] self.assertEqual( Model.check(), [ Error( "'index_together' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'index_together'.", obj=Model, id="models.E013", ), ], ) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey( Foo, on_delete=models.CASCADE, related_name="bar_1" ) foo_2 = models.ForeignKey( Foo, on_delete=models.CASCADE, related_name="bar_2" ) class Meta: index_together = [["foo_1_id", "foo_2"]] self.assertEqual(Bar.check(), []) # unique_together tests are very similar to index_together tests. @isolate_apps("invalid_models_tests") class UniqueTogetherTests(SimpleTestCase): def test_non_iterable(self): class Model(models.Model): class Meta: unique_together = 42 self.assertEqual( Model.check(), [ Error( "'unique_together' must be a list or tuple.", obj=Model, id="models.E010", ), ], ) def test_list_containing_non_iterable(self): class Model(models.Model): one = models.IntegerField() two = models.IntegerField() class Meta: unique_together = [("a", "b"), 42] self.assertEqual( Model.check(), [ Error( "All 'unique_together' elements must be lists or tuples.", obj=Model, id="models.E011", ), ], ) def test_non_list(self): class Model(models.Model): class Meta: unique_together = "not-a-list" self.assertEqual( Model.check(), [ Error( "'unique_together' must be a list or tuple.", obj=Model, id="models.E010", ), ], ) def test_valid_model(self): class Model(models.Model): one = models.IntegerField() two = models.IntegerField() class Meta: # unique_together can be a simple tuple unique_together = ("one", "two") self.assertEqual(Model.check(), []) def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: unique_together = [["missing_field"]] self.assertEqual( Model.check(), [ Error( "'unique_together' refers to the nonexistent field " "'missing_field'.", obj=Model, id="models.E012", ), ], ) def test_pointing_to_m2m(self): class Model(models.Model): m2m = models.ManyToManyField("self") class Meta: unique_together = [["m2m"]] self.assertEqual( Model.check(), [ Error( "'unique_together' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'unique_together'.", obj=Model, id="models.E013", ), ], ) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey( Foo, on_delete=models.CASCADE, related_name="bar_1" ) foo_2 = models.ForeignKey( Foo, on_delete=models.CASCADE, related_name="bar_2" ) class Meta: unique_together = [["foo_1_id", "foo_2"]] self.assertEqual(Bar.check(), []) @isolate_apps("invalid_models_tests") class IndexesTests(TestCase): def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: indexes = [models.Index(fields=["missing_field"], name="name")] self.assertEqual( Model.check(), [ Error( "'indexes' refers to the nonexistent field 'missing_field'.", obj=Model, id="models.E012", ), ], ) def test_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField("self") class Meta: indexes = [models.Index(fields=["m2m"], name="name")] self.assertEqual( Model.check(), [ Error( "'indexes' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'indexes'.", obj=Model, id="models.E013", ), ], ) def test_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.IntegerField() class Bar(Foo): field2 = models.IntegerField() class Meta: indexes = [models.Index(fields=["field2", "field1"], name="name")] self.assertEqual( Bar.check(), [ Error( "'indexes' refers to field 'field1' which is not local to " "model 'Bar'.", hint="This issue may be caused by multi-table inheritance.", obj=Bar, id="models.E016", ), ], ) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey( Foo, on_delete=models.CASCADE, related_name="bar_1" ) foo_2 = models.ForeignKey( Foo, on_delete=models.CASCADE, related_name="bar_2" ) class Meta: indexes = [ models.Index(fields=["foo_1_id", "foo_2"], name="index_name") ] self.assertEqual(Bar.check(), []) def test_name_constraints(self): class Model(models.Model): class Meta: indexes = [ models.Index(fields=["id"], name="_index_name"), models.Index(fields=["id"], name="5index_name"), ] self.assertEqual( Model.check(), [ Error( "The index name '%sindex_name' cannot start with an " "underscore or a number." % prefix, obj=Model, id="models.E033", ) for prefix in ("_", "5") ], ) def test_max_name_length(self): index_name = "x" * 31 class Model(models.Model): class Meta: indexes = [models.Index(fields=["id"], name=index_name)] self.assertEqual( Model.check(), [ Error( "The index name '%s' cannot be longer than 30 characters." % index_name, obj=Model, id="models.E034", ), ], ) def test_index_with_condition(self): class Model(models.Model): age = models.IntegerField() class Meta: indexes = [ models.Index( fields=["age"], name="index_age_gte_10", condition=models.Q(age__gte=10), ), ] errors = Model.check(databases=self.databases) expected = ( [] if connection.features.supports_partial_indexes else [ Warning( "%s does not support indexes with conditions." % connection.display_name, hint=( "Conditions will be ignored. Silence this warning if you " "don't care about it." ), obj=Model, id="models.W037", ) ] ) self.assertEqual(errors, expected) def test_index_with_condition_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {"supports_partial_indexes"} indexes = [ models.Index( fields=["age"], name="index_age_gte_10", condition=models.Q(age__gte=10), ), ] self.assertEqual(Model.check(databases=self.databases), []) def test_index_with_include(self): class Model(models.Model): age = models.IntegerField() class Meta: indexes = [ models.Index( fields=["age"], name="index_age_include_id", include=["id"], ), ] errors = Model.check(databases=self.databases) expected = ( [] if connection.features.supports_covering_indexes else [ Warning( "%s does not support indexes with non-key columns." % connection.display_name, hint=( "Non-key columns will be ignored. Silence this warning if " "you don't care about it." ), obj=Model, id="models.W040", ) ] ) self.assertEqual(errors, expected) def test_index_with_include_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {"supports_covering_indexes"} indexes = [ models.Index( fields=["age"], name="index_age_include_id", include=["id"], ), ] self.assertEqual(Model.check(databases=self.databases), []) @skipUnlessDBFeature("supports_covering_indexes") def test_index_include_pointing_to_missing_field(self): class Model(models.Model): class Meta: indexes = [ models.Index(fields=["id"], include=["missing_field"], name="name"), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'indexes' refers to the nonexistent field 'missing_field'.", obj=Model, id="models.E012", ), ], ) @skipUnlessDBFeature("supports_covering_indexes") def test_index_include_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField("self") class Meta: indexes = [models.Index(fields=["id"], include=["m2m"], name="name")] self.assertEqual( Model.check(databases=self.databases), [ Error( "'indexes' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'indexes'.", obj=Model, id="models.E013", ), ], ) @skipUnlessDBFeature("supports_covering_indexes") def test_index_include_pointing_to_non_local_field(self): class Parent(models.Model): field1 = models.IntegerField() class Child(Parent): field2 = models.IntegerField() class Meta: indexes = [ models.Index(fields=["field2"], include=["field1"], name="name"), ] self.assertEqual( Child.check(databases=self.databases), [ Error( "'indexes' refers to field 'field1' which is not local to " "model 'Child'.", hint="This issue may be caused by multi-table inheritance.", obj=Child, id="models.E016", ), ], ) @skipUnlessDBFeature("supports_covering_indexes") def test_index_include_pointing_to_fk(self): class Target(models.Model): pass class Model(models.Model): fk_1 = models.ForeignKey(Target, models.CASCADE, related_name="target_1") fk_2 = models.ForeignKey(Target, models.CASCADE, related_name="target_2") class Meta: constraints = [ models.Index( fields=["id"], include=["fk_1_id", "fk_2"], name="name", ), ] self.assertEqual(Model.check(databases=self.databases), []) def test_func_index(self): class Model(models.Model): name = models.CharField(max_length=10) class Meta: indexes = [models.Index(Lower("name"), name="index_lower_name")] warn = Warning( "%s does not support indexes on expressions." % connection.display_name, hint=( "An index won't be created. Silence this warning if you don't " "care about it." ), obj=Model, id="models.W043", ) expected = [] if connection.features.supports_expression_indexes else [warn] self.assertEqual(Model.check(databases=self.databases), expected) def test_func_index_required_db_features(self): class Model(models.Model): name = models.CharField(max_length=10) class Meta: indexes = [models.Index(Lower("name"), name="index_lower_name")] required_db_features = {"supports_expression_indexes"} self.assertEqual(Model.check(databases=self.databases), []) def test_func_index_complex_expression_custom_lookup(self): class Model(models.Model): height = models.IntegerField() weight = models.IntegerField() class Meta: indexes = [ models.Index( models.F("height") / (models.F("weight__abs") + models.Value(5)), name="name", ), ] with register_lookup(models.IntegerField, Abs): self.assertEqual(Model.check(), []) def test_func_index_pointing_to_missing_field(self): class Model(models.Model): class Meta: indexes = [models.Index(Lower("missing_field").desc(), name="name")] self.assertEqual( Model.check(), [ Error( "'indexes' refers to the nonexistent field 'missing_field'.", obj=Model, id="models.E012", ), ], ) def test_func_index_pointing_to_missing_field_nested(self): class Model(models.Model): class Meta: indexes = [ models.Index(Abs(Round("missing_field")), name="name"), ] self.assertEqual( Model.check(), [ Error( "'indexes' refers to the nonexistent field 'missing_field'.", obj=Model, id="models.E012", ), ], ) def test_func_index_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField("self") class Meta: indexes = [models.Index(Lower("m2m"), name="name")] self.assertEqual( Model.check(), [ Error( "'indexes' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'indexes'.", obj=Model, id="models.E013", ), ], ) def test_func_index_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.CharField(max_length=15) class Bar(Foo): class Meta: indexes = [models.Index(Lower("field1"), name="name")] self.assertEqual( Bar.check(), [ Error( "'indexes' refers to field 'field1' which is not local to " "model 'Bar'.", hint="This issue may be caused by multi-table inheritance.", obj=Bar, id="models.E016", ), ], ) def test_func_index_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, models.CASCADE, related_name="bar_1") foo_2 = models.ForeignKey(Foo, models.CASCADE, related_name="bar_2") class Meta: indexes = [ models.Index(Lower("foo_1_id"), Lower("foo_2"), name="index_name"), ] self.assertEqual(Bar.check(), []) @isolate_apps("invalid_models_tests") class FieldNamesTests(TestCase): databases = {"default", "other"} def test_ending_with_underscore(self): class Model(models.Model): field_ = models.CharField(max_length=10) m2m_ = models.ManyToManyField("self") self.assertEqual( Model.check(), [ Error( "Field names must not end with an underscore.", obj=Model._meta.get_field("field_"), id="fields.E001", ), Error( "Field names must not end with an underscore.", obj=Model._meta.get_field("m2m_"), id="fields.E001", ), ], ) max_column_name_length, column_limit_db_alias = get_max_column_name_length() @unittest.skipIf( max_column_name_length is None, "The database doesn't have a column name length limit.", ) def test_M2M_long_column_name(self): """ #13711 -- Model check for long M2M column names when database has column name length limits. """ # A model with very long name which will be used to set relations to. class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz( models.Model ): title = models.CharField(max_length=11) # Main model for which checks will be performed. class ModelWithLongField(models.Model): m2m_field = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name="rn1", ) m2m_field2 = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name="rn2", through="m2msimple", ) m2m_field3 = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name="rn3", through="m2mcomplex", ) fk = models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, related_name="rn4", ) # Models used for setting `through` in M2M field. class m2msimple(models.Model): id2 = models.ForeignKey(ModelWithLongField, models.CASCADE) class m2mcomplex(models.Model): id2 = models.ForeignKey(ModelWithLongField, models.CASCADE) long_field_name = "a" * (self.max_column_name_length + 1) models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, ).contribute_to_class(m2msimple, long_field_name) models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, db_column=long_field_name, ).contribute_to_class(m2mcomplex, long_field_name) errors = ModelWithLongField.check(databases=("default", "other")) # First error because of M2M field set on the model with long name. m2m_long_name = ( "verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id" ) if self.max_column_name_length > len(m2m_long_name): # Some databases support names longer than the test name. expected = [] else: expected = [ Error( 'Autogenerated column name too long for M2M field "%s". ' 'Maximum length is "%s" for database "%s".' % ( m2m_long_name, self.max_column_name_length, self.column_limit_db_alias, ), hint="Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'.", obj=ModelWithLongField, id="models.E019", ) ] # Second error because the FK specified in the `through` model # `m2msimple` has auto-generated name longer than allowed. # There will be no check errors in the other M2M because it # specifies db_column for the FK in `through` model even if the actual # name is longer than the limits of the database. expected.append( Error( 'Autogenerated column name too long for M2M field "%s_id". ' 'Maximum length is "%s" for database "%s".' % ( long_field_name, self.max_column_name_length, self.column_limit_db_alias, ), hint="Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'.", obj=ModelWithLongField, id="models.E019", ) ) self.assertEqual(errors, expected) # Check for long column names is called only for specified database # aliases. self.assertEqual(ModelWithLongField.check(databases=None), []) @unittest.skipIf( max_column_name_length is None, "The database doesn't have a column name length limit.", ) def test_local_field_long_column_name(self): """ #13711 -- Model check for long column names when database does not support long names. """ class ModelWithLongField(models.Model): title = models.CharField(max_length=11) long_field_name = "a" * (self.max_column_name_length + 1) long_field_name2 = "b" * (self.max_column_name_length + 1) models.CharField(max_length=11).contribute_to_class( ModelWithLongField, long_field_name ) models.CharField(max_length=11, db_column="vlmn").contribute_to_class( ModelWithLongField, long_field_name2 ) self.assertEqual( ModelWithLongField.check(databases=("default", "other")), [ Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % ( long_field_name, self.max_column_name_length, self.column_limit_db_alias, ), hint="Set the column name manually using 'db_column'.", obj=ModelWithLongField, id="models.E018", ) ], ) # Check for long column names is called only for specified database # aliases. self.assertEqual(ModelWithLongField.check(databases=None), []) def test_including_separator(self): class Model(models.Model): some__field = models.IntegerField() self.assertEqual( Model.check(), [ Error( 'Field names must not contain "__".', obj=Model._meta.get_field("some__field"), id="fields.E002", ) ], ) def test_pk(self): class Model(models.Model): pk = models.IntegerField() self.assertEqual( Model.check(), [ Error( "'pk' is a reserved word that cannot be used as a field name.", obj=Model._meta.get_field("pk"), id="fields.E003", ) ], ) def test_db_column_clash(self): class Model(models.Model): foo = models.IntegerField() bar = models.IntegerField(db_column="foo") self.assertEqual( Model.check(), [ Error( "Field 'bar' has column name 'foo' that is used by " "another field.", hint="Specify a 'db_column' for the field.", obj=Model, id="models.E007", ) ], ) @isolate_apps("invalid_models_tests") class ShadowingFieldsTests(SimpleTestCase): def test_field_name_clash_with_child_accessor(self): class Parent(models.Model): pass class Child(Parent): child = models.CharField(max_length=100) self.assertEqual( Child.check(), [ Error( "The field 'child' clashes with the field " "'child' from model 'invalid_models_tests.parent'.", obj=Child._meta.get_field("child"), id="models.E006", ) ], ) def test_field_name_clash_with_m2m_through(self): class Parent(models.Model): clash_id = models.IntegerField() class Child(Parent): clash = models.ForeignKey("Child", models.CASCADE) class Model(models.Model): parents = models.ManyToManyField( to=Parent, through="Through", through_fields=["parent", "model"], ) class Through(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) model = models.ForeignKey(Model, models.CASCADE) self.assertEqual( Child.check(), [ Error( "The field 'clash' clashes with the field 'clash_id' from " "model 'invalid_models_tests.parent'.", obj=Child._meta.get_field("clash"), id="models.E006", ) ], ) def test_multiinheritance_clash(self): class Mother(models.Model): clash = models.IntegerField() class Father(models.Model): clash = models.IntegerField() class Child(Mother, Father): # Here we have two clashed: id (automatic field) and clash, because # both parents define these fields. pass self.assertEqual( Child.check(), [ Error( "The field 'id' from parent model " "'invalid_models_tests.mother' clashes with the field 'id' " "from parent model 'invalid_models_tests.father'.", obj=Child, id="models.E005", ), Error( "The field 'clash' from parent model " "'invalid_models_tests.mother' clashes with the field 'clash' " "from parent model 'invalid_models_tests.father'.", obj=Child, id="models.E005", ), ], ) def test_inheritance_clash(self): class Parent(models.Model): f_id = models.IntegerField() class Target(models.Model): # This field doesn't result in a clash. f_id = models.IntegerField() class Child(Parent): # This field clashes with parent "f_id" field. f = models.ForeignKey(Target, models.CASCADE) self.assertEqual( Child.check(), [ Error( "The field 'f' clashes with the field 'f_id' " "from model 'invalid_models_tests.parent'.", obj=Child._meta.get_field("f"), id="models.E006", ) ], ) def test_multigeneration_inheritance(self): class GrandParent(models.Model): clash = models.IntegerField() class Parent(GrandParent): pass class Child(Parent): pass class GrandChild(Child): clash = models.IntegerField() self.assertEqual( GrandChild.check(), [ Error( "The field 'clash' clashes with the field 'clash' " "from model 'invalid_models_tests.grandparent'.", obj=GrandChild._meta.get_field("clash"), id="models.E006", ) ], ) def test_id_clash(self): class Target(models.Model): pass class Model(models.Model): fk = models.ForeignKey(Target, models.CASCADE) fk_id = models.IntegerField() self.assertEqual( Model.check(), [ Error( "The field 'fk_id' clashes with the field 'fk' from model " "'invalid_models_tests.model'.", obj=Model._meta.get_field("fk_id"), id="models.E006", ) ], ) @isolate_apps("invalid_models_tests") class OtherModelTests(SimpleTestCase): def test_unique_primary_key(self): invalid_id = models.IntegerField(primary_key=False) class Model(models.Model): id = invalid_id self.assertEqual( Model.check(), [ Error( "'id' can only be used as a field name if the field also sets " "'primary_key=True'.", obj=Model, id="models.E004", ), ], ) def test_ordering_non_iterable(self): class Model(models.Model): class Meta: ordering = "missing_field" self.assertEqual( Model.check(), [ Error( "'ordering' must be a tuple or list " "(even if you want to order by only one field).", obj=Model, id="models.E014", ), ], ) def test_just_ordering_no_errors(self): class Model(models.Model): order = models.PositiveIntegerField() class Meta: ordering = ["order"] self.assertEqual(Model.check(), []) def test_just_order_with_respect_to_no_errors(self): class Question(models.Model): pass class Answer(models.Model): question = models.ForeignKey(Question, models.CASCADE) class Meta: order_with_respect_to = "question" self.assertEqual(Answer.check(), []) def test_ordering_with_order_with_respect_to(self): class Question(models.Model): pass class Answer(models.Model): question = models.ForeignKey(Question, models.CASCADE) order = models.IntegerField() class Meta: order_with_respect_to = "question" ordering = ["order"] self.assertEqual( Answer.check(), [ Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=Answer, id="models.E021", ), ], ) def test_non_valid(self): class RelationModel(models.Model): pass class Model(models.Model): relation = models.ManyToManyField(RelationModel) class Meta: ordering = ["relation"] self.assertEqual( Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'relation'.", obj=Model, id="models.E015", ), ], ) def test_ordering_pointing_to_missing_field(self): class Model(models.Model): class Meta: ordering = ("missing_field",) self.assertEqual( Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_field'.", obj=Model, id="models.E015", ) ], ) def test_ordering_pointing_to_missing_foreignkey_field(self): class Model(models.Model): missing_fk_field = models.IntegerField() class Meta: ordering = ("missing_fk_field_id",) self.assertEqual( Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_fk_field_id'.", obj=Model, id="models.E015", ) ], ) def test_ordering_pointing_to_missing_related_field(self): class Model(models.Model): test = models.IntegerField() class Meta: ordering = ("missing_related__id",) self.assertEqual( Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_related__id'.", obj=Model, id="models.E015", ) ], ) def test_ordering_pointing_to_missing_related_model_field(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ("parent__missing_field",) self.assertEqual( Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__missing_field'.", obj=Child, id="models.E015", ) ], ) def test_ordering_pointing_to_non_related_field(self): class Child(models.Model): parent = models.IntegerField() class Meta: ordering = ("parent__missing_field",) self.assertEqual( Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__missing_field'.", obj=Child, id="models.E015", ) ], ) def test_ordering_pointing_to_two_related_model_field(self): class Parent2(models.Model): pass class Parent1(models.Model): parent2 = models.ForeignKey(Parent2, models.CASCADE) class Child(models.Model): parent1 = models.ForeignKey(Parent1, models.CASCADE) class Meta: ordering = ("parent1__parent2__missing_field",) self.assertEqual( Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent1__parent2__missing_field'.", obj=Child, id="models.E015", ) ], ) def test_ordering_pointing_multiple_times_to_model_fields(self): class Parent(models.Model): field1 = models.CharField(max_length=100) field2 = models.CharField(max_length=100) class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ("parent__field1__field2",) self.assertEqual( Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__field1__field2'.", obj=Child, id="models.E015", ) ], ) def test_ordering_allows_registered_lookups(self): class Model(models.Model): test = models.CharField(max_length=100) class Meta: ordering = ("test__lower",) with register_lookup(models.CharField, Lower): self.assertEqual(Model.check(), []) def test_ordering_pointing_to_lookup_not_transform(self): class Model(models.Model): test = models.CharField(max_length=100) class Meta: ordering = ("test__isnull",) self.assertEqual(Model.check(), []) def test_ordering_pointing_to_related_model_pk(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ("parent__pk",) self.assertEqual(Child.check(), []) def test_ordering_pointing_to_foreignkey_field(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ("parent_id",) self.assertFalse(Child.check()) def test_name_beginning_with_underscore(self): class _Model(models.Model): pass self.assertEqual( _Model.check(), [ Error( "The model name '_Model' cannot start or end with an underscore " "as it collides with the query lookup syntax.", obj=_Model, id="models.E023", ) ], ) def test_name_ending_with_underscore(self): class Model_(models.Model): pass self.assertEqual( Model_.check(), [ Error( "The model name 'Model_' cannot start or end with an underscore " "as it collides with the query lookup syntax.", obj=Model_, id="models.E023", ) ], ) def test_name_contains_double_underscores(self): class Test__Model(models.Model): pass self.assertEqual( Test__Model.check(), [ Error( "The model name 'Test__Model' cannot contain double underscores " "as it collides with the query lookup syntax.", obj=Test__Model, id="models.E024", ) ], ) def test_property_and_related_field_accessor_clash(self): class Model(models.Model): fk = models.ForeignKey("self", models.CASCADE) # Override related field accessor. Model.fk_id = property(lambda self: "ERROR") self.assertEqual( Model.check(), [ Error( "The property 'fk_id' clashes with a related field accessor.", obj=Model, id="models.E025", ) ], ) def test_single_primary_key(self): class Model(models.Model): foo = models.IntegerField(primary_key=True) bar = models.IntegerField(primary_key=True) self.assertEqual( Model.check(), [ Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=Model, id="models.E026", ) ], ) @override_settings(TEST_SWAPPED_MODEL_BAD_VALUE="not-a-model") def test_swappable_missing_app_name(self): class Model(models.Model): class Meta: swappable = "TEST_SWAPPED_MODEL_BAD_VALUE" self.assertEqual( Model.check(), [ Error( "'TEST_SWAPPED_MODEL_BAD_VALUE' is not of the form " "'app_label.app_name'.", id="models.E001", ), ], ) @override_settings(TEST_SWAPPED_MODEL_BAD_MODEL="not_an_app.Target") def test_swappable_missing_app(self): class Model(models.Model): class Meta: swappable = "TEST_SWAPPED_MODEL_BAD_MODEL" self.assertEqual( Model.check(), [ Error( "'TEST_SWAPPED_MODEL_BAD_MODEL' references 'not_an_app.Target', " "which has not been installed, or is abstract.", id="models.E002", ), ], ) def test_two_m2m_through_same_relationship(self): class Person(models.Model): pass class Group(models.Model): primary = models.ManyToManyField( Person, through="Membership", related_name="primary" ) secondary = models.ManyToManyField( Person, through="Membership", related_name="secondary" ) class Membership(models.Model): person = models.ForeignKey(Person, models.CASCADE) group = models.ForeignKey(Group, models.CASCADE) self.assertEqual( Group.check(), [ Error( "The model has two identical many-to-many relations through " "the intermediate model 'invalid_models_tests.Membership'.", obj=Group, id="models.E003", ) ], ) def test_two_m2m_through_same_model_with_different_through_fields(self): class Country(models.Model): pass class ShippingMethod(models.Model): to_countries = models.ManyToManyField( Country, through="ShippingMethodPrice", through_fields=("method", "to_country"), ) from_countries = models.ManyToManyField( Country, through="ShippingMethodPrice", through_fields=("method", "from_country"), related_name="+", ) class ShippingMethodPrice(models.Model): method = models.ForeignKey(ShippingMethod, models.CASCADE) to_country = models.ForeignKey(Country, models.CASCADE) from_country = models.ForeignKey(Country, models.CASCADE) self.assertEqual(ShippingMethod.check(), []) def test_onetoone_with_parent_model(self): class Place(models.Model): pass class ParkingLot(Place): other_place = models.OneToOneField( Place, models.CASCADE, related_name="other_parking" ) self.assertEqual(ParkingLot.check(), []) def test_onetoone_with_explicit_parent_link_parent_model(self): class Place(models.Model): pass class ParkingLot(Place): place = models.OneToOneField( Place, models.CASCADE, parent_link=True, primary_key=True ) other_place = models.OneToOneField( Place, models.CASCADE, related_name="other_parking" ) self.assertEqual(ParkingLot.check(), []) def test_m2m_table_name_clash(self): class Foo(models.Model): bar = models.ManyToManyField("Bar", db_table="myapp_bar") class Meta: db_table = "myapp_foo" class Bar(models.Model): class Meta: db_table = "myapp_bar" self.assertEqual( Foo.check(), [ Error( "The field's intermediary table 'myapp_bar' clashes with the " "table name of 'invalid_models_tests.Bar'.", obj=Foo._meta.get_field("bar"), id="fields.E340", ) ], ) @override_settings( DATABASE_ROUTERS=["invalid_models_tests.test_models.EmptyRouter"] ) def test_m2m_table_name_clash_database_routers_installed(self): class Foo(models.Model): bar = models.ManyToManyField("Bar", db_table="myapp_bar") class Meta: db_table = "myapp_foo" class Bar(models.Model): class Meta: db_table = "myapp_bar" self.assertEqual( Foo.check(), [ Warning( "The field's intermediary table 'myapp_bar' clashes with the " "table name of 'invalid_models_tests.Bar'.", obj=Foo._meta.get_field("bar"), hint=( "You have configured settings.DATABASE_ROUTERS. Verify " "that the table of 'invalid_models_tests.Bar' is " "correctly routed to a separate database." ), id="fields.W344", ), ], ) def test_m2m_field_table_name_clash(self): class Foo(models.Model): pass class Bar(models.Model): foos = models.ManyToManyField(Foo, db_table="clash") class Baz(models.Model): foos = models.ManyToManyField(Foo, db_table="clash") self.assertEqual( Bar.check() + Baz.check(), [ Error( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.Baz.foos'.", obj=Bar._meta.get_field("foos"), id="fields.E340", ), Error( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.Bar.foos'.", obj=Baz._meta.get_field("foos"), id="fields.E340", ), ], ) @override_settings( DATABASE_ROUTERS=["invalid_models_tests.test_models.EmptyRouter"] ) def test_m2m_field_table_name_clash_database_routers_installed(self): class Foo(models.Model): pass class Bar(models.Model): foos = models.ManyToManyField(Foo, db_table="clash") class Baz(models.Model): foos = models.ManyToManyField(Foo, db_table="clash") self.assertEqual( Bar.check() + Baz.check(), [ Warning( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.%s.foos'." % clashing_model, obj=model_cls._meta.get_field("foos"), hint=( "You have configured settings.DATABASE_ROUTERS. Verify " "that the table of 'invalid_models_tests.%s.foos' is " "correctly routed to a separate database." % clashing_model ), id="fields.W344", ) for model_cls, clashing_model in [(Bar, "Baz"), (Baz, "Bar")] ], ) def test_m2m_autogenerated_table_name_clash(self): class Foo(models.Model): class Meta: db_table = "bar_foos" class Bar(models.Model): # The autogenerated `db_table` will be bar_foos. foos = models.ManyToManyField(Foo) class Meta: db_table = "bar" self.assertEqual( Bar.check(), [ Error( "The field's intermediary table 'bar_foos' clashes with the " "table name of 'invalid_models_tests.Foo'.", obj=Bar._meta.get_field("foos"), id="fields.E340", ) ], ) @override_settings( DATABASE_ROUTERS=["invalid_models_tests.test_models.EmptyRouter"] ) def test_m2m_autogenerated_table_name_clash_database_routers_installed(self): class Foo(models.Model): class Meta: db_table = "bar_foos" class Bar(models.Model): # The autogenerated db_table is bar_foos. foos = models.ManyToManyField(Foo) class Meta: db_table = "bar" self.assertEqual( Bar.check(), [ Warning( "The field's intermediary table 'bar_foos' clashes with the " "table name of 'invalid_models_tests.Foo'.", obj=Bar._meta.get_field("foos"), hint=( "You have configured settings.DATABASE_ROUTERS. Verify " "that the table of 'invalid_models_tests.Foo' is " "correctly routed to a separate database." ), id="fields.W344", ), ], ) def test_m2m_unmanaged_shadow_models_not_checked(self): class A1(models.Model): pass class C1(models.Model): mm_a = models.ManyToManyField(A1, db_table="d1") # Unmanaged models that shadow the above models. Reused table names # shouldn't be flagged by any checks. class A2(models.Model): class Meta: managed = False class C2(models.Model): mm_a = models.ManyToManyField(A2, through="Intermediate") class Meta: managed = False class Intermediate(models.Model): a2 = models.ForeignKey(A2, models.CASCADE, db_column="a1_id") c2 = models.ForeignKey(C2, models.CASCADE, db_column="c1_id") class Meta: db_table = "d1" managed = False self.assertEqual(C1.check(), []) self.assertEqual(C2.check(), []) def test_m2m_to_concrete_and_proxy_allowed(self): class A(models.Model): pass class Through(models.Model): a = models.ForeignKey("A", models.CASCADE) c = models.ForeignKey("C", models.CASCADE) class ThroughProxy(Through): class Meta: proxy = True class C(models.Model): mm_a = models.ManyToManyField(A, through=Through) mm_aproxy = models.ManyToManyField( A, through=ThroughProxy, related_name="proxied_m2m" ) self.assertEqual(C.check(), []) @isolate_apps("django.contrib.auth", kwarg_name="apps") def test_lazy_reference_checks(self, apps): class DummyModel(models.Model): author = models.ForeignKey("Author", models.CASCADE) class Meta: app_label = "invalid_models_tests" class DummyClass: def __call__(self, **kwargs): pass def dummy_method(self): pass def dummy_function(*args, **kwargs): pass apps.lazy_model_operation(dummy_function, ("auth", "imaginarymodel")) apps.lazy_model_operation(dummy_function, ("fanciful_app", "imaginarymodel")) post_init.connect(dummy_function, sender="missing-app.Model", apps=apps) post_init.connect(DummyClass(), sender="missing-app.Model", apps=apps) post_init.connect( DummyClass().dummy_method, sender="missing-app.Model", apps=apps ) self.assertEqual( _check_lazy_references(apps), [ Error( "%r contains a lazy reference to auth.imaginarymodel, " "but app 'auth' doesn't provide model 'imaginarymodel'." % dummy_function, obj=dummy_function, id="models.E022", ), Error( "%r contains a lazy reference to fanciful_app.imaginarymodel, " "but app 'fanciful_app' isn't installed." % dummy_function, obj=dummy_function, id="models.E022", ), Error( "An instance of class 'DummyClass' was connected to " "the 'post_init' signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj="invalid_models_tests.test_models", id="signals.E001", ), Error( "Bound method 'DummyClass.dummy_method' was connected to the " "'post_init' signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj="invalid_models_tests.test_models", id="signals.E001", ), Error( "The field invalid_models_tests.DummyModel.author was declared " "with a lazy reference to 'invalid_models_tests.author', but app " "'invalid_models_tests' isn't installed.", hint=None, obj=DummyModel.author.field, id="fields.E307", ), Error( "The function 'dummy_function' was connected to the 'post_init' " "signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj="invalid_models_tests.test_models", id="signals.E001", ), ], ) class MultipleAutoFieldsTests(TestCase): def test_multiple_autofields(self): msg = ( "Model invalid_models_tests.MultipleAutoFields can't have more " "than one auto-generated field." ) with self.assertRaisesMessage(ValueError, msg): class MultipleAutoFields(models.Model): auto1 = models.AutoField(primary_key=True) auto2 = models.AutoField(primary_key=True) @isolate_apps("invalid_models_tests") class JSONFieldTests(TestCase): @skipUnlessDBFeature("supports_json_field") def test_ordering_pointing_to_json_field_value(self): class Model(models.Model): field = models.JSONField() class Meta: ordering = ["field__value"] self.assertEqual(Model.check(databases=self.databases), []) def test_check_jsonfield(self): class Model(models.Model): field = models.JSONField() error = Error( "%s does not support JSONFields." % connection.display_name, obj=Model, id="fields.E180", ) expected = [] if connection.features.supports_json_field else [error] self.assertEqual(Model.check(databases=self.databases), expected) def test_check_jsonfield_required_db_features(self): class Model(models.Model): field = models.JSONField() class Meta: required_db_features = {"supports_json_field"} self.assertEqual(Model.check(databases=self.databases), []) @isolate_apps("invalid_models_tests") class ConstraintsTests(TestCase): def test_check_constraints(self): class Model(models.Model): age = models.IntegerField() class Meta: constraints = [ models.CheckConstraint(check=models.Q(age__gte=18), name="is_adult") ] errors = Model.check(databases=self.databases) warn = Warning( "%s does not support check constraints." % connection.display_name, hint=( "A constraint won't be created. Silence this warning if you " "don't care about it." ), obj=Model, id="models.W027", ) expected = ( [] if connection.features.supports_table_check_constraints else [warn] ) self.assertCountEqual(errors, expected) def test_check_constraints_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {"supports_table_check_constraints"} constraints = [ models.CheckConstraint(check=models.Q(age__gte=18), name="is_adult") ] self.assertEqual(Model.check(databases=self.databases), []) def test_check_constraint_pointing_to_missing_field(self): class Model(models.Model): class Meta: required_db_features = {"supports_table_check_constraints"} constraints = [ models.CheckConstraint( name="name", check=models.Q(missing_field=2), ), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field 'missing_field'.", obj=Model, id="models.E012", ), ] if connection.features.supports_table_check_constraints else [], ) @skipUnlessDBFeature("supports_table_check_constraints") def test_check_constraint_pointing_to_reverse_fk(self): class Model(models.Model): parent = models.ForeignKey("self", models.CASCADE, related_name="parents") class Meta: constraints = [ models.CheckConstraint(name="name", check=models.Q(parents=3)), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field 'parents'.", obj=Model, id="models.E012", ), ], ) @skipUnlessDBFeature("supports_table_check_constraints") def test_check_constraint_pointing_to_reverse_o2o(self): class Model(models.Model): parent = models.OneToOneField("self", models.CASCADE) class Meta: constraints = [ models.CheckConstraint( name="name", check=models.Q(model__isnull=True), ), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field 'model'.", obj=Model, id="models.E012", ), ], ) @skipUnlessDBFeature("supports_table_check_constraints") def test_check_constraint_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField("self") class Meta: constraints = [ models.CheckConstraint(name="name", check=models.Q(m2m=2)), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'constraints'.", obj=Model, id="models.E013", ), ], ) @skipUnlessDBFeature("supports_table_check_constraints") def test_check_constraint_pointing_to_fk(self): class Target(models.Model): pass class Model(models.Model): fk_1 = models.ForeignKey(Target, models.CASCADE, related_name="target_1") fk_2 = models.ForeignKey(Target, models.CASCADE, related_name="target_2") class Meta: constraints = [ models.CheckConstraint( name="name", check=models.Q(fk_1_id=2) | models.Q(fk_2=2), ), ] self.assertEqual(Model.check(databases=self.databases), []) @skipUnlessDBFeature("supports_table_check_constraints") def test_check_constraint_pointing_to_pk(self): class Model(models.Model): age = models.SmallIntegerField() class Meta: constraints = [ models.CheckConstraint( name="name", check=models.Q(pk__gt=5) & models.Q(age__gt=models.F("pk")), ), ] self.assertEqual(Model.check(databases=self.databases), []) @skipUnlessDBFeature("supports_table_check_constraints") def test_check_constraint_pointing_to_non_local_field(self): class Parent(models.Model): field1 = models.IntegerField() class Child(Parent): pass class Meta: constraints = [ models.CheckConstraint(name="name", check=models.Q(field1=1)), ] self.assertEqual( Child.check(databases=self.databases), [ Error( "'constraints' refers to field 'field1' which is not local to " "model 'Child'.", hint="This issue may be caused by multi-table inheritance.", obj=Child, id="models.E016", ), ], ) @skipUnlessDBFeature("supports_table_check_constraints") def test_check_constraint_pointing_to_joined_fields(self): class Model(models.Model): name = models.CharField(max_length=10) field1 = models.PositiveSmallIntegerField() field2 = models.PositiveSmallIntegerField() field3 = models.PositiveSmallIntegerField() parent = models.ForeignKey("self", models.CASCADE) previous = models.OneToOneField("self", models.CASCADE, related_name="next") class Meta: constraints = [ models.CheckConstraint( name="name1", check=models.Q( field1__lt=models.F("parent__field1") + models.F("parent__field2") ), ), models.CheckConstraint( name="name2", check=models.Q(name=Lower("parent__name")) ), models.CheckConstraint( name="name3", check=models.Q(parent__field3=models.F("field1")) ), models.CheckConstraint( name="name4", check=models.Q(name=Lower("previous__name")), ), ] joined_fields = [ "parent__field1", "parent__field2", "parent__field3", "parent__name", "previous__name", ] errors = Model.check(databases=self.databases) expected_errors = [ Error( "'constraints' refers to the joined field '%s'." % field_name, obj=Model, id="models.E041", ) for field_name in joined_fields ] self.assertCountEqual(errors, expected_errors) @skipUnlessDBFeature("supports_table_check_constraints") def test_check_constraint_pointing_to_joined_fields_complex_check(self): class Model(models.Model): name = models.PositiveSmallIntegerField() field1 = models.PositiveSmallIntegerField() field2 = models.PositiveSmallIntegerField() parent = models.ForeignKey("self", models.CASCADE) class Meta: constraints = [ models.CheckConstraint( name="name", check=models.Q( ( models.Q(name="test") & models.Q(field1__lt=models.F("parent__field1")) ) | ( models.Q(name__startswith=Lower("parent__name")) & models.Q( field1__gte=( models.F("parent__field1") + models.F("parent__field2") ) ) ) ) | (models.Q(name="test1")), ), ] joined_fields = ["parent__field1", "parent__field2", "parent__name"] errors = Model.check(databases=self.databases) expected_errors = [ Error( "'constraints' refers to the joined field '%s'." % field_name, obj=Model, id="models.E041", ) for field_name in joined_fields ] self.assertCountEqual(errors, expected_errors) def test_check_constraint_raw_sql_check(self): class Model(models.Model): class Meta: required_db_features = {"supports_table_check_constraints"} constraints = [ models.CheckConstraint(check=models.Q(id__gt=0), name="q_check"), models.CheckConstraint( check=models.ExpressionWrapper( models.Q(price__gt=20), output_field=models.BooleanField(), ), name="expression_wrapper_check", ), models.CheckConstraint( check=models.expressions.RawSQL( "id = 0", params=(), output_field=models.BooleanField(), ), name="raw_sql_check", ), models.CheckConstraint( check=models.Q( models.ExpressionWrapper( models.Q( models.expressions.RawSQL( "id = 0", params=(), output_field=models.BooleanField(), ) ), output_field=models.BooleanField(), ) ), name="nested_raw_sql_check", ), ] expected_warnings = ( [ Warning( "Check constraint 'raw_sql_check' contains RawSQL() expression and " "won't be validated during the model full_clean().", hint="Silence this warning if you don't care about it.", obj=Model, id="models.W045", ), Warning( "Check constraint 'nested_raw_sql_check' contains RawSQL() " "expression and won't be validated during the model full_clean().", hint="Silence this warning if you don't care about it.", obj=Model, id="models.W045", ), ] if connection.features.supports_table_check_constraints else [] ) self.assertEqual(Model.check(databases=self.databases), expected_warnings) def test_unique_constraint_with_condition(self): class Model(models.Model): age = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint( fields=["age"], name="unique_age_gte_100", condition=models.Q(age__gte=100), ), ] errors = Model.check(databases=self.databases) expected = ( [] if connection.features.supports_partial_indexes else [ Warning( "%s does not support unique constraints with conditions." % connection.display_name, hint=( "A constraint won't be created. Silence this warning if " "you don't care about it." ), obj=Model, id="models.W036", ), ] ) self.assertEqual(errors, expected) def test_unique_constraint_with_condition_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {"supports_partial_indexes"} constraints = [ models.UniqueConstraint( fields=["age"], name="unique_age_gte_100", condition=models.Q(age__gte=100), ), ] self.assertEqual(Model.check(databases=self.databases), []) def test_unique_constraint_condition_pointing_to_missing_field(self): class Model(models.Model): age = models.SmallIntegerField() class Meta: required_db_features = {"supports_partial_indexes"} constraints = [ models.UniqueConstraint( name="name", fields=["age"], condition=models.Q(missing_field=2), ), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field 'missing_field'.", obj=Model, id="models.E012", ), ] if connection.features.supports_partial_indexes else [], ) def test_unique_constraint_condition_pointing_to_joined_fields(self): class Model(models.Model): age = models.SmallIntegerField() parent = models.ForeignKey("self", models.CASCADE) class Meta: required_db_features = {"supports_partial_indexes"} constraints = [ models.UniqueConstraint( name="name", fields=["age"], condition=models.Q(parent__age__lt=2), ), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to the joined field 'parent__age__lt'.", obj=Model, id="models.E041", ) ] if connection.features.supports_partial_indexes else [], ) def test_unique_constraint_pointing_to_reverse_o2o(self): class Model(models.Model): parent = models.OneToOneField("self", models.CASCADE) class Meta: required_db_features = {"supports_partial_indexes"} constraints = [ models.UniqueConstraint( fields=["parent"], name="name", condition=models.Q(model__isnull=True), ), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field 'model'.", obj=Model, id="models.E012", ), ] if connection.features.supports_partial_indexes else [], ) def test_deferrable_unique_constraint(self): class Model(models.Model): age = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint( fields=["age"], name="unique_age_deferrable", deferrable=models.Deferrable.DEFERRED, ), ] errors = Model.check(databases=self.databases) expected = ( [] if connection.features.supports_deferrable_unique_constraints else [ Warning( "%s does not support deferrable unique constraints." % connection.display_name, hint=( "A constraint won't be created. Silence this warning if " "you don't care about it." ), obj=Model, id="models.W038", ), ] ) self.assertEqual(errors, expected) def test_deferrable_unique_constraint_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {"supports_deferrable_unique_constraints"} constraints = [ models.UniqueConstraint( fields=["age"], name="unique_age_deferrable", deferrable=models.Deferrable.IMMEDIATE, ), ] self.assertEqual(Model.check(databases=self.databases), []) def test_unique_constraint_pointing_to_missing_field(self): class Model(models.Model): class Meta: constraints = [ models.UniqueConstraint(fields=["missing_field"], name="name") ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field 'missing_field'.", obj=Model, id="models.E012", ), ], ) def test_unique_constraint_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField("self") class Meta: constraints = [models.UniqueConstraint(fields=["m2m"], name="name")] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'constraints'.", obj=Model, id="models.E013", ), ], ) def test_unique_constraint_pointing_to_non_local_field(self): class Parent(models.Model): field1 = models.IntegerField() class Child(Parent): field2 = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint(fields=["field2", "field1"], name="name"), ] self.assertEqual( Child.check(databases=self.databases), [ Error( "'constraints' refers to field 'field1' which is not local to " "model 'Child'.", hint="This issue may be caused by multi-table inheritance.", obj=Child, id="models.E016", ), ], ) def test_unique_constraint_pointing_to_fk(self): class Target(models.Model): pass class Model(models.Model): fk_1 = models.ForeignKey(Target, models.CASCADE, related_name="target_1") fk_2 = models.ForeignKey(Target, models.CASCADE, related_name="target_2") class Meta: constraints = [ models.UniqueConstraint(fields=["fk_1_id", "fk_2"], name="name"), ] self.assertEqual(Model.check(databases=self.databases), []) def test_unique_constraint_with_include(self): class Model(models.Model): age = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint( fields=["age"], name="unique_age_include_id", include=["id"], ), ] errors = Model.check(databases=self.databases) expected = ( [] if connection.features.supports_covering_indexes else [ Warning( "%s does not support unique constraints with non-key columns." % connection.display_name, hint=( "A constraint won't be created. Silence this warning if " "you don't care about it." ), obj=Model, id="models.W039", ), ] ) self.assertEqual(errors, expected) def test_unique_constraint_with_include_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {"supports_covering_indexes"} constraints = [ models.UniqueConstraint( fields=["age"], name="unique_age_include_id", include=["id"], ), ] self.assertEqual(Model.check(databases=self.databases), []) @skipUnlessDBFeature("supports_covering_indexes") def test_unique_constraint_include_pointing_to_missing_field(self): class Model(models.Model): class Meta: constraints = [ models.UniqueConstraint( fields=["id"], include=["missing_field"], name="name", ), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field 'missing_field'.", obj=Model, id="models.E012", ), ], ) @skipUnlessDBFeature("supports_covering_indexes") def test_unique_constraint_include_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField("self") class Meta: constraints = [ models.UniqueConstraint( fields=["id"], include=["m2m"], name="name", ), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'constraints'.", obj=Model, id="models.E013", ), ], ) @skipUnlessDBFeature("supports_covering_indexes") def test_unique_constraint_include_pointing_to_non_local_field(self): class Parent(models.Model): field1 = models.IntegerField() class Child(Parent): field2 = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint( fields=["field2"], include=["field1"], name="name", ), ] self.assertEqual( Child.check(databases=self.databases), [ Error( "'constraints' refers to field 'field1' which is not local to " "model 'Child'.", hint="This issue may be caused by multi-table inheritance.", obj=Child, id="models.E016", ), ], ) @skipUnlessDBFeature("supports_covering_indexes") def test_unique_constraint_include_pointing_to_fk(self): class Target(models.Model): pass class Model(models.Model): fk_1 = models.ForeignKey(Target, models.CASCADE, related_name="target_1") fk_2 = models.ForeignKey(Target, models.CASCADE, related_name="target_2") class Meta: constraints = [ models.UniqueConstraint( fields=["id"], include=["fk_1_id", "fk_2"], name="name", ), ] self.assertEqual(Model.check(databases=self.databases), []) def test_func_unique_constraint(self): class Model(models.Model): name = models.CharField(max_length=10) class Meta: constraints = [ models.UniqueConstraint(Lower("name"), name="lower_name_uq"), ] warn = Warning( "%s does not support unique constraints on expressions." % connection.display_name, hint=( "A constraint won't be created. Silence this warning if you " "don't care about it." ), obj=Model, id="models.W044", ) expected = [] if connection.features.supports_expression_indexes else [warn] self.assertEqual(Model.check(databases=self.databases), expected) def test_func_unique_constraint_required_db_features(self): class Model(models.Model): name = models.CharField(max_length=10) class Meta: constraints = [ models.UniqueConstraint(Lower("name"), name="lower_name_unq"), ] required_db_features = {"supports_expression_indexes"} self.assertEqual(Model.check(databases=self.databases), []) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_expression_custom_lookup(self): class Model(models.Model): height = models.IntegerField() weight = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint( models.F("height") / (models.F("weight__abs") + models.Value(5)), name="name", ), ] with register_lookup(models.IntegerField, Abs): self.assertEqual(Model.check(databases=self.databases), []) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_pointing_to_missing_field(self): class Model(models.Model): class Meta: constraints = [ models.UniqueConstraint(Lower("missing_field").desc(), name="name"), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field 'missing_field'.", obj=Model, id="models.E012", ), ], ) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_pointing_to_missing_field_nested(self): class Model(models.Model): class Meta: constraints = [ models.UniqueConstraint(Abs(Round("missing_field")), name="name"), ] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field 'missing_field'.", obj=Model, id="models.E012", ), ], ) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField("self") class Meta: constraints = [models.UniqueConstraint(Lower("m2m"), name="name")] self.assertEqual( Model.check(databases=self.databases), [ Error( "'constraints' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'constraints'.", obj=Model, id="models.E013", ), ], ) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.CharField(max_length=15) class Bar(Foo): class Meta: constraints = [models.UniqueConstraint(Lower("field1"), name="name")] self.assertEqual( Bar.check(databases=self.databases), [ Error( "'constraints' refers to field 'field1' which is not local to " "model 'Bar'.", hint="This issue may be caused by multi-table inheritance.", obj=Bar, id="models.E016", ), ], ) @skipUnlessDBFeature("supports_expression_indexes") def test_func_unique_constraint_pointing_to_fk(self): class Foo(models.Model): id = models.CharField(primary_key=True, max_length=255) class Bar(models.Model): foo_1 = models.ForeignKey(Foo, models.CASCADE, related_name="bar_1") foo_2 = models.ForeignKey(Foo, models.CASCADE, related_name="bar_2") class Meta: constraints = [ models.UniqueConstraint( Lower("foo_1_id"), Lower("foo_2"), name="name", ), ] self.assertEqual(Bar.check(databases=self.databases), [])
6cedf14c718f8f0d2b4db6c1f2ceacc04bf4f3c4c734749ebbb53959d9c9b71b
from unittest import mock from django.core.exceptions import ValidationError from django.db import IntegrityError, connection, models from django.db.models import F from django.db.models.constraints import BaseConstraint from django.db.models.functions import Lower from django.db.transaction import atomic from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from .models import ( ChildModel, ChildUniqueConstraintProduct, Product, UniqueConstraintConditionProduct, UniqueConstraintDeferrable, UniqueConstraintInclude, UniqueConstraintProduct, ) def get_constraints(table): with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) class BaseConstraintTests(SimpleTestCase): def test_constraint_sql(self): c = BaseConstraint("name") msg = "This method must be implemented by a subclass." with self.assertRaisesMessage(NotImplementedError, msg): c.constraint_sql(None, None) def test_contains_expressions(self): c = BaseConstraint("name") self.assertIs(c.contains_expressions, False) def test_create_sql(self): c = BaseConstraint("name") msg = "This method must be implemented by a subclass." with self.assertRaisesMessage(NotImplementedError, msg): c.create_sql(None, None) def test_remove_sql(self): c = BaseConstraint("name") msg = "This method must be implemented by a subclass." with self.assertRaisesMessage(NotImplementedError, msg): c.remove_sql(None, None) def test_validate(self): c = BaseConstraint("name") msg = "This method must be implemented by a subclass." with self.assertRaisesMessage(NotImplementedError, msg): c.validate(None, None) def test_default_violation_error_message(self): c = BaseConstraint("name") self.assertEqual( c.get_violation_error_message(), "Constraint “name” is violated." ) def test_custom_violation_error_message(self): c = BaseConstraint( "base_name", violation_error_message="custom %(name)s message" ) self.assertEqual(c.get_violation_error_message(), "custom base_name message") def test_custom_violation_error_message_clone(self): constraint = BaseConstraint( "base_name", violation_error_message="custom %(name)s message", ).clone() self.assertEqual( constraint.get_violation_error_message(), "custom base_name message", ) def test_deconstruction(self): constraint = BaseConstraint( "base_name", violation_error_message="custom %(name)s message", ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, "django.db.models.BaseConstraint") self.assertEqual(args, ()) self.assertEqual( kwargs, {"name": "base_name", "violation_error_message": "custom %(name)s message"}, ) class CheckConstraintTests(TestCase): def test_eq(self): check1 = models.Q(price__gt=models.F("discounted_price")) check2 = models.Q(price__lt=models.F("discounted_price")) self.assertEqual( models.CheckConstraint(check=check1, name="price"), models.CheckConstraint(check=check1, name="price"), ) self.assertEqual(models.CheckConstraint(check=check1, name="price"), mock.ANY) self.assertNotEqual( models.CheckConstraint(check=check1, name="price"), models.CheckConstraint(check=check1, name="price2"), ) self.assertNotEqual( models.CheckConstraint(check=check1, name="price"), models.CheckConstraint(check=check2, name="price"), ) self.assertNotEqual(models.CheckConstraint(check=check1, name="price"), 1) self.assertNotEqual( models.CheckConstraint(check=check1, name="price"), models.CheckConstraint( check=check1, name="price", violation_error_message="custom error" ), ) self.assertNotEqual( models.CheckConstraint( check=check1, name="price", violation_error_message="custom error" ), models.CheckConstraint( check=check1, name="price", violation_error_message="other custom error" ), ) self.assertEqual( models.CheckConstraint( check=check1, name="price", violation_error_message="custom error" ), models.CheckConstraint( check=check1, name="price", violation_error_message="custom error" ), ) def test_repr(self): constraint = models.CheckConstraint( check=models.Q(price__gt=models.F("discounted_price")), name="price_gt_discounted_price", ) self.assertEqual( repr(constraint), "<CheckConstraint: check=(AND: ('price__gt', F(discounted_price))) " "name='price_gt_discounted_price'>", ) def test_invalid_check_types(self): msg = "CheckConstraint.check must be a Q instance or boolean expression." with self.assertRaisesMessage(TypeError, msg): models.CheckConstraint(check=models.F("discounted_price"), name="check") def test_deconstruction(self): check = models.Q(price__gt=models.F("discounted_price")) name = "price_gt_discounted_price" constraint = models.CheckConstraint(check=check, name=name) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, "django.db.models.CheckConstraint") self.assertEqual(args, ()) self.assertEqual(kwargs, {"check": check, "name": name}) @skipUnlessDBFeature("supports_table_check_constraints") def test_database_constraint(self): Product.objects.create(price=10, discounted_price=5) with self.assertRaises(IntegrityError): Product.objects.create(price=10, discounted_price=20) @skipUnlessDBFeature("supports_table_check_constraints") def test_database_constraint_unicode(self): Product.objects.create(price=10, discounted_price=5, unit="μg/mL") with self.assertRaises(IntegrityError): Product.objects.create(price=10, discounted_price=7, unit="l") @skipUnlessDBFeature( "supports_table_check_constraints", "can_introspect_check_constraints" ) def test_name(self): constraints = get_constraints(Product._meta.db_table) for expected_name in ( "price_gt_discounted_price", "constraints_product_price_gt_0", ): with self.subTest(expected_name): self.assertIn(expected_name, constraints) @skipUnlessDBFeature( "supports_table_check_constraints", "can_introspect_check_constraints" ) def test_abstract_name(self): constraints = get_constraints(ChildModel._meta.db_table) self.assertIn("constraints_childmodel_adult", constraints) def test_validate(self): check = models.Q(price__gt=models.F("discounted_price")) constraint = models.CheckConstraint(check=check, name="price") # Invalid product. invalid_product = Product(price=10, discounted_price=42) with self.assertRaises(ValidationError): constraint.validate(Product, invalid_product) with self.assertRaises(ValidationError): constraint.validate(Product, invalid_product, exclude={"unit"}) # Fields used by the check constraint are excluded. constraint.validate(Product, invalid_product, exclude={"price"}) constraint.validate(Product, invalid_product, exclude={"discounted_price"}) constraint.validate( Product, invalid_product, exclude={"discounted_price", "price"}, ) # Valid product. constraint.validate(Product, Product(price=10, discounted_price=5)) def test_validate_boolean_expressions(self): constraint = models.CheckConstraint( check=models.expressions.ExpressionWrapper( models.Q(price__gt=500) | models.Q(price__lt=500), output_field=models.BooleanField(), ), name="price_neq_500_wrap", ) msg = f"Constraint “{constraint.name}” is violated." with self.assertRaisesMessage(ValidationError, msg): constraint.validate(Product, Product(price=500, discounted_price=5)) constraint.validate(Product, Product(price=501, discounted_price=5)) constraint.validate(Product, Product(price=499, discounted_price=5)) def test_validate_rawsql_expressions_noop(self): constraint = models.CheckConstraint( check=models.expressions.RawSQL( "price < %s OR price > %s", (500, 500), output_field=models.BooleanField(), ), name="price_neq_500_raw", ) # RawSQL can not be checked and is always considered valid. constraint.validate(Product, Product(price=500, discounted_price=5)) constraint.validate(Product, Product(price=501, discounted_price=5)) constraint.validate(Product, Product(price=499, discounted_price=5)) class UniqueConstraintTests(TestCase): @classmethod def setUpTestData(cls): cls.p1 = UniqueConstraintProduct.objects.create(name="p1", color="red") cls.p2 = UniqueConstraintProduct.objects.create(name="p2") def test_eq(self): self.assertEqual( models.UniqueConstraint(fields=["foo", "bar"], name="unique"), models.UniqueConstraint(fields=["foo", "bar"], name="unique"), ) self.assertEqual( models.UniqueConstraint(fields=["foo", "bar"], name="unique"), mock.ANY, ) self.assertNotEqual( models.UniqueConstraint(fields=["foo", "bar"], name="unique"), models.UniqueConstraint(fields=["foo", "bar"], name="unique2"), ) self.assertNotEqual( models.UniqueConstraint(fields=["foo", "bar"], name="unique"), models.UniqueConstraint(fields=["foo", "baz"], name="unique"), ) self.assertNotEqual( models.UniqueConstraint(fields=["foo", "bar"], name="unique"), 1 ) self.assertNotEqual( models.UniqueConstraint(fields=["foo", "bar"], name="unique"), models.UniqueConstraint( fields=["foo", "bar"], name="unique", violation_error_message="custom error", ), ) self.assertNotEqual( models.UniqueConstraint( fields=["foo", "bar"], name="unique", violation_error_message="custom error", ), models.UniqueConstraint( fields=["foo", "bar"], name="unique", violation_error_message="other custom error", ), ) self.assertEqual( models.UniqueConstraint( fields=["foo", "bar"], name="unique", violation_error_message="custom error", ), models.UniqueConstraint( fields=["foo", "bar"], name="unique", violation_error_message="custom error", ), ) def test_eq_with_condition(self): self.assertEqual( models.UniqueConstraint( fields=["foo", "bar"], name="unique", condition=models.Q(foo=models.F("bar")), ), models.UniqueConstraint( fields=["foo", "bar"], name="unique", condition=models.Q(foo=models.F("bar")), ), ) self.assertNotEqual( models.UniqueConstraint( fields=["foo", "bar"], name="unique", condition=models.Q(foo=models.F("bar")), ), models.UniqueConstraint( fields=["foo", "bar"], name="unique", condition=models.Q(foo=models.F("baz")), ), ) def test_eq_with_deferrable(self): constraint_1 = models.UniqueConstraint( fields=["foo", "bar"], name="unique", deferrable=models.Deferrable.DEFERRED, ) constraint_2 = models.UniqueConstraint( fields=["foo", "bar"], name="unique", deferrable=models.Deferrable.IMMEDIATE, ) self.assertEqual(constraint_1, constraint_1) self.assertNotEqual(constraint_1, constraint_2) def test_eq_with_include(self): constraint_1 = models.UniqueConstraint( fields=["foo", "bar"], name="include", include=["baz_1"], ) constraint_2 = models.UniqueConstraint( fields=["foo", "bar"], name="include", include=["baz_2"], ) self.assertEqual(constraint_1, constraint_1) self.assertNotEqual(constraint_1, constraint_2) def test_eq_with_opclasses(self): constraint_1 = models.UniqueConstraint( fields=["foo", "bar"], name="opclasses", opclasses=["text_pattern_ops", "varchar_pattern_ops"], ) constraint_2 = models.UniqueConstraint( fields=["foo", "bar"], name="opclasses", opclasses=["varchar_pattern_ops", "text_pattern_ops"], ) self.assertEqual(constraint_1, constraint_1) self.assertNotEqual(constraint_1, constraint_2) def test_eq_with_expressions(self): constraint = models.UniqueConstraint( Lower("title"), F("author"), name="book_func_uq", ) same_constraint = models.UniqueConstraint( Lower("title"), "author", name="book_func_uq", ) another_constraint = models.UniqueConstraint( Lower("title"), name="book_func_uq", ) self.assertEqual(constraint, same_constraint) self.assertEqual(constraint, mock.ANY) self.assertNotEqual(constraint, another_constraint) def test_repr(self): fields = ["foo", "bar"] name = "unique_fields" constraint = models.UniqueConstraint(fields=fields, name=name) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='unique_fields'>", ) def test_repr_with_condition(self): constraint = models.UniqueConstraint( fields=["foo", "bar"], name="unique_fields", condition=models.Q(foo=models.F("bar")), ) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='unique_fields' " "condition=(AND: ('foo', F(bar)))>", ) def test_repr_with_deferrable(self): constraint = models.UniqueConstraint( fields=["foo", "bar"], name="unique_fields", deferrable=models.Deferrable.IMMEDIATE, ) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='unique_fields' " "deferrable=Deferrable.IMMEDIATE>", ) def test_repr_with_include(self): constraint = models.UniqueConstraint( fields=["foo", "bar"], name="include_fields", include=["baz_1", "baz_2"], ) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='include_fields' " "include=('baz_1', 'baz_2')>", ) def test_repr_with_opclasses(self): constraint = models.UniqueConstraint( fields=["foo", "bar"], name="opclasses_fields", opclasses=["text_pattern_ops", "varchar_pattern_ops"], ) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='opclasses_fields' " "opclasses=['text_pattern_ops', 'varchar_pattern_ops']>", ) def test_repr_with_expressions(self): constraint = models.UniqueConstraint( Lower("title"), F("author"), name="book_func_uq", ) self.assertEqual( repr(constraint), "<UniqueConstraint: expressions=(Lower(F(title)), F(author)) " "name='book_func_uq'>", ) def test_deconstruction(self): fields = ["foo", "bar"] name = "unique_fields" constraint = models.UniqueConstraint(fields=fields, name=name) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, "django.db.models.UniqueConstraint") self.assertEqual(args, ()) self.assertEqual(kwargs, {"fields": tuple(fields), "name": name}) def test_deconstruction_with_condition(self): fields = ["foo", "bar"] name = "unique_fields" condition = models.Q(foo=models.F("bar")) constraint = models.UniqueConstraint( fields=fields, name=name, condition=condition ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, "django.db.models.UniqueConstraint") self.assertEqual(args, ()) self.assertEqual( kwargs, {"fields": tuple(fields), "name": name, "condition": condition} ) def test_deconstruction_with_deferrable(self): fields = ["foo"] name = "unique_fields" constraint = models.UniqueConstraint( fields=fields, name=name, deferrable=models.Deferrable.DEFERRED, ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, "django.db.models.UniqueConstraint") self.assertEqual(args, ()) self.assertEqual( kwargs, { "fields": tuple(fields), "name": name, "deferrable": models.Deferrable.DEFERRED, }, ) def test_deconstruction_with_include(self): fields = ["foo", "bar"] name = "unique_fields" include = ["baz_1", "baz_2"] constraint = models.UniqueConstraint(fields=fields, name=name, include=include) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, "django.db.models.UniqueConstraint") self.assertEqual(args, ()) self.assertEqual( kwargs, { "fields": tuple(fields), "name": name, "include": tuple(include), }, ) def test_deconstruction_with_opclasses(self): fields = ["foo", "bar"] name = "unique_fields" opclasses = ["varchar_pattern_ops", "text_pattern_ops"] constraint = models.UniqueConstraint( fields=fields, name=name, opclasses=opclasses ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, "django.db.models.UniqueConstraint") self.assertEqual(args, ()) self.assertEqual( kwargs, { "fields": tuple(fields), "name": name, "opclasses": opclasses, }, ) def test_deconstruction_with_expressions(self): name = "unique_fields" constraint = models.UniqueConstraint(Lower("title"), name=name) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, "django.db.models.UniqueConstraint") self.assertEqual(args, (Lower("title"),)) self.assertEqual(kwargs, {"name": name}) def test_database_constraint(self): with self.assertRaises(IntegrityError): UniqueConstraintProduct.objects.create( name=self.p1.name, color=self.p1.color ) @skipUnlessDBFeature("supports_partial_indexes") def test_database_constraint_with_condition(self): UniqueConstraintConditionProduct.objects.create(name="p1") UniqueConstraintConditionProduct.objects.create(name="p2") with self.assertRaises(IntegrityError): UniqueConstraintConditionProduct.objects.create(name="p1") def test_model_validation(self): msg = "Unique constraint product with this Name and Color already exists." with self.assertRaisesMessage(ValidationError, msg): UniqueConstraintProduct( name=self.p1.name, color=self.p1.color ).validate_constraints() @skipUnlessDBFeature("supports_partial_indexes") def test_model_validation_with_condition(self): """ Partial unique constraints are not ignored by Model.validate_constraints(). """ obj1 = UniqueConstraintConditionProduct.objects.create(name="p1", color="red") obj2 = UniqueConstraintConditionProduct.objects.create(name="p2") UniqueConstraintConditionProduct( name=obj1.name, color="blue" ).validate_constraints() msg = "Constraint “name_without_color_uniq” is violated." with self.assertRaisesMessage(ValidationError, msg): UniqueConstraintConditionProduct(name=obj2.name).validate_constraints() def test_validate(self): constraint = UniqueConstraintProduct._meta.constraints[0] msg = "Unique constraint product with this Name and Color already exists." non_unique_product = UniqueConstraintProduct( name=self.p1.name, color=self.p1.color ) with self.assertRaisesMessage(ValidationError, msg): constraint.validate(UniqueConstraintProduct, non_unique_product) # Null values are ignored. constraint.validate( UniqueConstraintProduct, UniqueConstraintProduct(name=self.p2.name, color=None), ) # Existing instances have their existing row excluded. constraint.validate(UniqueConstraintProduct, self.p1) # Unique fields are excluded. constraint.validate( UniqueConstraintProduct, non_unique_product, exclude={"name"}, ) constraint.validate( UniqueConstraintProduct, non_unique_product, exclude={"color"}, ) constraint.validate( UniqueConstraintProduct, non_unique_product, exclude={"name", "color"}, ) # Validation on a child instance. with self.assertRaisesMessage(ValidationError, msg): constraint.validate( UniqueConstraintProduct, ChildUniqueConstraintProduct(name=self.p1.name, color=self.p1.color), ) @skipUnlessDBFeature("supports_partial_indexes") def test_validate_condition(self): p1 = UniqueConstraintConditionProduct.objects.create(name="p1") constraint = UniqueConstraintConditionProduct._meta.constraints[0] msg = "Constraint “name_without_color_uniq” is violated." with self.assertRaisesMessage(ValidationError, msg): constraint.validate( UniqueConstraintConditionProduct, UniqueConstraintConditionProduct(name=p1.name, color=None), ) # Values not matching condition are ignored. constraint.validate( UniqueConstraintConditionProduct, UniqueConstraintConditionProduct(name=p1.name, color="anything-but-none"), ) # Existing instances have their existing row excluded. constraint.validate(UniqueConstraintConditionProduct, p1) # Unique field is excluded. constraint.validate( UniqueConstraintConditionProduct, UniqueConstraintConditionProduct(name=p1.name, color=None), exclude={"name"}, ) def test_validate_expression(self): constraint = models.UniqueConstraint(Lower("name"), name="name_lower_uniq") msg = "Constraint “name_lower_uniq” is violated." with self.assertRaisesMessage(ValidationError, msg): constraint.validate( UniqueConstraintProduct, UniqueConstraintProduct(name=self.p1.name.upper()), ) constraint.validate( UniqueConstraintProduct, UniqueConstraintProduct(name="another-name"), ) # Existing instances have their existing row excluded. constraint.validate(UniqueConstraintProduct, self.p1) # Unique field is excluded. constraint.validate( UniqueConstraintProduct, UniqueConstraintProduct(name=self.p1.name.upper()), exclude={"name"}, ) def test_validate_expression_condition(self): constraint = models.UniqueConstraint( Lower("name"), name="name_lower_without_color_uniq", condition=models.Q(color__isnull=True), ) non_unique_product = UniqueConstraintProduct(name=self.p2.name.upper()) msg = "Constraint “name_lower_without_color_uniq” is violated." with self.assertRaisesMessage(ValidationError, msg): constraint.validate(UniqueConstraintProduct, non_unique_product) # Values not matching condition are ignored. constraint.validate( UniqueConstraintProduct, UniqueConstraintProduct(name=self.p1.name, color=self.p1.color), ) # Existing instances have their existing row excluded. constraint.validate(UniqueConstraintProduct, self.p2) # Unique field is excluded. constraint.validate( UniqueConstraintProduct, non_unique_product, exclude={"name"}, ) # Field from a condition is excluded. constraint.validate( UniqueConstraintProduct, non_unique_product, exclude={"color"}, ) def test_name(self): constraints = get_constraints(UniqueConstraintProduct._meta.db_table) expected_name = "name_color_uniq" self.assertIn(expected_name, constraints) def test_condition_must_be_q(self): with self.assertRaisesMessage( ValueError, "UniqueConstraint.condition must be a Q instance." ): models.UniqueConstraint(name="uniq", fields=["name"], condition="invalid") @skipUnlessDBFeature("supports_deferrable_unique_constraints") def test_initially_deferred_database_constraint(self): obj_1 = UniqueConstraintDeferrable.objects.create(name="p1", shelf="front") obj_2 = UniqueConstraintDeferrable.objects.create(name="p2", shelf="back") def swap(): obj_1.name, obj_2.name = obj_2.name, obj_1.name obj_1.save() obj_2.save() swap() # Behavior can be changed with SET CONSTRAINTS. with self.assertRaises(IntegrityError): with atomic(), connection.cursor() as cursor: constraint_name = connection.ops.quote_name("name_init_deferred_uniq") cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % constraint_name) swap() @skipUnlessDBFeature("supports_deferrable_unique_constraints") def test_initially_immediate_database_constraint(self): obj_1 = UniqueConstraintDeferrable.objects.create(name="p1", shelf="front") obj_2 = UniqueConstraintDeferrable.objects.create(name="p2", shelf="back") obj_1.shelf, obj_2.shelf = obj_2.shelf, obj_1.shelf with self.assertRaises(IntegrityError), atomic(): obj_1.save() # Behavior can be changed with SET CONSTRAINTS. with connection.cursor() as cursor: constraint_name = connection.ops.quote_name("sheld_init_immediate_uniq") cursor.execute("SET CONSTRAINTS %s DEFERRED" % constraint_name) obj_1.save() obj_2.save() def test_deferrable_with_condition(self): message = "UniqueConstraint with conditions cannot be deferred." with self.assertRaisesMessage(ValueError, message): models.UniqueConstraint( fields=["name"], name="name_without_color_unique", condition=models.Q(color__isnull=True), deferrable=models.Deferrable.DEFERRED, ) def test_deferrable_with_include(self): message = "UniqueConstraint with include fields cannot be deferred." with self.assertRaisesMessage(ValueError, message): models.UniqueConstraint( fields=["name"], name="name_inc_color_color_unique", include=["color"], deferrable=models.Deferrable.DEFERRED, ) def test_deferrable_with_opclasses(self): message = "UniqueConstraint with opclasses cannot be deferred." with self.assertRaisesMessage(ValueError, message): models.UniqueConstraint( fields=["name"], name="name_text_pattern_ops_unique", opclasses=["text_pattern_ops"], deferrable=models.Deferrable.DEFERRED, ) def test_deferrable_with_expressions(self): message = "UniqueConstraint with expressions cannot be deferred." with self.assertRaisesMessage(ValueError, message): models.UniqueConstraint( Lower("name"), name="deferred_expression_unique", deferrable=models.Deferrable.DEFERRED, ) def test_invalid_defer_argument(self): message = "UniqueConstraint.deferrable must be a Deferrable instance." with self.assertRaisesMessage(ValueError, message): models.UniqueConstraint( fields=["name"], name="name_invalid", deferrable="invalid", ) @skipUnlessDBFeature( "supports_table_check_constraints", "supports_covering_indexes", ) def test_include_database_constraint(self): UniqueConstraintInclude.objects.create(name="p1", color="red") with self.assertRaises(IntegrityError): UniqueConstraintInclude.objects.create(name="p1", color="blue") def test_invalid_include_argument(self): msg = "UniqueConstraint.include must be a list or tuple." with self.assertRaisesMessage(ValueError, msg): models.UniqueConstraint( name="uniq_include", fields=["field"], include="other", ) def test_invalid_opclasses_argument(self): msg = "UniqueConstraint.opclasses must be a list or tuple." with self.assertRaisesMessage(ValueError, msg): models.UniqueConstraint( name="uniq_opclasses", fields=["field"], opclasses="jsonb_path_ops", ) def test_opclasses_and_fields_same_length(self): msg = ( "UniqueConstraint.fields and UniqueConstraint.opclasses must have " "the same number of elements." ) with self.assertRaisesMessage(ValueError, msg): models.UniqueConstraint( name="uniq_opclasses", fields=["field"], opclasses=["foo", "bar"], ) def test_requires_field_or_expression(self): msg = ( "At least one field or expression is required to define a unique " "constraint." ) with self.assertRaisesMessage(ValueError, msg): models.UniqueConstraint(name="name") def test_expressions_and_fields_mutually_exclusive(self): msg = "UniqueConstraint.fields and expressions are mutually exclusive." with self.assertRaisesMessage(ValueError, msg): models.UniqueConstraint(Lower("field_1"), fields=["field_2"], name="name") def test_expressions_with_opclasses(self): msg = ( "UniqueConstraint.opclasses cannot be used with expressions. Use " "django.contrib.postgres.indexes.OpClass() instead." ) with self.assertRaisesMessage(ValueError, msg): models.UniqueConstraint( Lower("field"), name="test_func_opclass", opclasses=["jsonb_path_ops"], ) def test_requires_name(self): msg = "A unique constraint must be named." with self.assertRaisesMessage(ValueError, msg): models.UniqueConstraint(fields=["field"])
ec245fd437dc7d563e762f23f9d767368e3f14d79ab05abdde2b049113f7c445
from django.db import models from django.test import TestCase from django.utils.deprecation import RemovedInDjango51Warning class IndexTogetherDeprecationTests(TestCase): def test_warning(self): msg = ( "'index_together' is deprecated. Use 'Meta.indexes' in " "'model_options.MyModel' instead." ) with self.assertRaisesMessage(RemovedInDjango51Warning, msg): class MyModel(models.Model): field_1 = models.IntegerField() field_2 = models.IntegerField() class Meta: app_label = "model_options" index_together = ["field_1", "field_2"]
751bf4e0df10a1296401cdcc4ec089997125756b5f26b5209d2f92ed161831c4
from django.db import models class City(models.Model): id = models.BigAutoField(primary_key=True) name = models.CharField(max_length=50) class Country(models.Model): id = models.SmallAutoField(primary_key=True) name = models.CharField(max_length=50) class District(models.Model): city = models.ForeignKey(City, models.CASCADE, primary_key=True) name = models.CharField(max_length=50) class Reporter(models.Model): first_name = models.CharField(max_length=30) last_name = models.CharField(max_length=30) email = models.EmailField() facebook_user_id = models.BigIntegerField(null=True) raw_data = models.BinaryField(null=True) small_int = models.SmallIntegerField() interval = models.DurationField() class Meta: unique_together = ("first_name", "last_name") class Article(models.Model): headline = models.CharField(max_length=100) pub_date = models.DateField() body = models.TextField(default="") reporter = models.ForeignKey(Reporter, models.CASCADE) response_to = models.ForeignKey("self", models.SET_NULL, null=True) unmanaged_reporters = models.ManyToManyField( Reporter, through="ArticleReporter", related_name="+" ) class Meta: ordering = ("headline",) indexes = [ models.Index(fields=["headline", "pub_date"]), models.Index(fields=["headline", "response_to", "pub_date", "reporter"]), ] class ArticleReporter(models.Model): article = models.ForeignKey(Article, models.CASCADE) reporter = models.ForeignKey(Reporter, models.CASCADE) class Meta: managed = False class Comment(models.Model): ref = models.UUIDField(unique=True) article = models.ForeignKey(Article, models.CASCADE, db_index=True) email = models.EmailField() pub_date = models.DateTimeField() body = models.TextField() class Meta: constraints = [ models.UniqueConstraint( fields=["article", "email", "pub_date"], name="article_email_pub_date_uniq", ), ] indexes = [ models.Index(fields=["email", "pub_date"], name="email_pub_date_idx"), ] class CheckConstraintModel(models.Model): up_votes = models.PositiveIntegerField() voting_number = models.PositiveIntegerField(unique=True) class Meta: required_db_features = { "supports_table_check_constraints", } constraints = [ models.CheckConstraint( name="up_votes_gte_0_check", check=models.Q(up_votes__gte=0) ), ] class UniqueConstraintConditionModel(models.Model): name = models.CharField(max_length=255) color = models.CharField(max_length=32, null=True) class Meta: required_db_features = {"supports_partial_indexes"} constraints = [ models.UniqueConstraint( fields=["name"], name="cond_name_without_color_uniq", condition=models.Q(color__isnull=True), ), ]
2afba383fa514ebdc453b84f1118e8bd244b730075c30bdb24ee6686909c29c0
import time import unittest from datetime import date, datetime from django.core.exceptions import FieldError from django.db import connection, models from django.test import SimpleTestCase, TestCase, override_settings from django.test.utils import register_lookup from django.utils import timezone from .models import Article, Author, MySQLUnixTimestamp class Div3Lookup(models.Lookup): lookup_name = "div3" def as_sql(self, compiler, connection): lhs, params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) return "(%s) %%%% 3 = %s" % (lhs, rhs), params def as_oracle(self, compiler, connection): lhs, params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) return "mod(%s, 3) = %s" % (lhs, rhs), params class Div3Transform(models.Transform): lookup_name = "div3" def as_sql(self, compiler, connection): lhs, lhs_params = compiler.compile(self.lhs) return "(%s) %%%% 3" % lhs, lhs_params def as_oracle(self, compiler, connection, **extra_context): lhs, lhs_params = compiler.compile(self.lhs) return "mod(%s, 3)" % lhs, lhs_params class Div3BilateralTransform(Div3Transform): bilateral = True class Mult3BilateralTransform(models.Transform): bilateral = True lookup_name = "mult3" def as_sql(self, compiler, connection): lhs, lhs_params = compiler.compile(self.lhs) return "3 * (%s)" % lhs, lhs_params class LastDigitTransform(models.Transform): lookup_name = "lastdigit" def as_sql(self, compiler, connection): lhs, lhs_params = compiler.compile(self.lhs) return "SUBSTR(CAST(%s AS CHAR(2)), 2, 1)" % lhs, lhs_params class UpperBilateralTransform(models.Transform): bilateral = True lookup_name = "upper" def as_sql(self, compiler, connection): lhs, lhs_params = compiler.compile(self.lhs) return "UPPER(%s)" % lhs, lhs_params class YearTransform(models.Transform): # Use a name that avoids collision with the built-in year lookup. lookup_name = "testyear" def as_sql(self, compiler, connection): lhs_sql, params = compiler.compile(self.lhs) return connection.ops.date_extract_sql("year", lhs_sql, params) @property def output_field(self): return models.IntegerField() @YearTransform.register_lookup class YearExact(models.lookups.Lookup): lookup_name = "exact" def as_sql(self, compiler, connection): # We will need to skip the extract part, and instead go # directly with the originating field, that is self.lhs.lhs lhs_sql, lhs_params = self.process_lhs(compiler, connection, self.lhs.lhs) rhs_sql, rhs_params = self.process_rhs(compiler, connection) # Note that we must be careful so that we have params in the # same order as we have the parts in the SQL. params = lhs_params + rhs_params + lhs_params + rhs_params # We use PostgreSQL specific SQL here. Note that we must do the # conversions in SQL instead of in Python to support F() references. return ( "%(lhs)s >= (%(rhs)s || '-01-01')::date " "AND %(lhs)s <= (%(rhs)s || '-12-31')::date" % {"lhs": lhs_sql, "rhs": rhs_sql}, params, ) @YearTransform.register_lookup class YearLte(models.lookups.LessThanOrEqual): """ The purpose of this lookup is to efficiently compare the year of the field. """ def as_sql(self, compiler, connection): # Skip the YearTransform above us (no possibility for efficient # lookup otherwise). real_lhs = self.lhs.lhs lhs_sql, params = self.process_lhs(compiler, connection, real_lhs) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) # Build SQL where the integer year is concatenated with last month # and day, then convert that to date. (We try to have SQL like: # WHERE somecol <= '2013-12-31') # but also make it work if the rhs_sql is field reference. return "%s <= (%s || '-12-31')::date" % (lhs_sql, rhs_sql), params class Exactly(models.lookups.Exact): """ This lookup is used to test lookup registration. """ lookup_name = "exactly" def get_rhs_op(self, connection, rhs): return connection.operators["exact"] % rhs class SQLFuncMixin: def as_sql(self, compiler, connection): return "%s()" % self.name, [] @property def output_field(self): return CustomField() class SQLFuncLookup(SQLFuncMixin, models.Lookup): def __init__(self, name, *args, **kwargs): super().__init__(*args, **kwargs) self.name = name class SQLFuncTransform(SQLFuncMixin, models.Transform): def __init__(self, name, *args, **kwargs): super().__init__(*args, **kwargs) self.name = name class SQLFuncFactory: def __init__(self, key, name): self.key = key self.name = name def __call__(self, *args, **kwargs): if self.key == "lookupfunc": return SQLFuncLookup(self.name, *args, **kwargs) return SQLFuncTransform(self.name, *args, **kwargs) class CustomField(models.TextField): def get_lookup(self, lookup_name): if lookup_name.startswith("lookupfunc_"): key, name = lookup_name.split("_", 1) return SQLFuncFactory(key, name) return super().get_lookup(lookup_name) def get_transform(self, lookup_name): if lookup_name.startswith("transformfunc_"): key, name = lookup_name.split("_", 1) return SQLFuncFactory(key, name) return super().get_transform(lookup_name) class CustomModel(models.Model): field = CustomField() # We will register this class temporarily in the test method. class InMonth(models.lookups.Lookup): """ InMonth matches if the column's month is the same as value's month. """ lookup_name = "inmonth" def as_sql(self, compiler, connection): lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) # We need to be careful so that we get the params in right # places. params = lhs_params + rhs_params + lhs_params + rhs_params return ( "%s >= date_trunc('month', %s) and " "%s < date_trunc('month', %s) + interval '1 months'" % (lhs, rhs, lhs, rhs), params, ) class DateTimeTransform(models.Transform): lookup_name = "as_datetime" @property def output_field(self): return models.DateTimeField() def as_sql(self, compiler, connection): lhs, params = compiler.compile(self.lhs) return "from_unixtime({})".format(lhs), params class LookupTests(TestCase): def test_custom_name_lookup(self): a1 = Author.objects.create(name="a1", birthdate=date(1981, 2, 16)) Author.objects.create(name="a2", birthdate=date(2012, 2, 29)) with register_lookup(models.DateField, YearTransform), register_lookup( models.DateField, YearTransform, lookup_name="justtheyear" ), register_lookup(YearTransform, Exactly), register_lookup( YearTransform, Exactly, lookup_name="isactually" ): qs1 = Author.objects.filter(birthdate__testyear__exactly=1981) qs2 = Author.objects.filter(birthdate__justtheyear__isactually=1981) self.assertSequenceEqual(qs1, [a1]) self.assertSequenceEqual(qs2, [a1]) def test_custom_exact_lookup_none_rhs(self): """ __exact=None is transformed to __isnull=True if a custom lookup class with lookup_name != 'exact' is registered as the `exact` lookup. """ field = Author._meta.get_field("birthdate") OldExactLookup = field.get_lookup("exact") author = Author.objects.create(name="author", birthdate=None) try: field.register_lookup(Exactly, "exact") self.assertEqual(Author.objects.get(birthdate__exact=None), author) finally: field.register_lookup(OldExactLookup, "exact") def test_basic_lookup(self): a1 = Author.objects.create(name="a1", age=1) a2 = Author.objects.create(name="a2", age=2) a3 = Author.objects.create(name="a3", age=3) a4 = Author.objects.create(name="a4", age=4) with register_lookup(models.IntegerField, Div3Lookup): self.assertSequenceEqual(Author.objects.filter(age__div3=0), [a3]) self.assertSequenceEqual( Author.objects.filter(age__div3=1).order_by("age"), [a1, a4] ) self.assertSequenceEqual(Author.objects.filter(age__div3=2), [a2]) self.assertSequenceEqual(Author.objects.filter(age__div3=3), []) @unittest.skipUnless( connection.vendor == "postgresql", "PostgreSQL specific SQL used" ) def test_birthdate_month(self): a1 = Author.objects.create(name="a1", birthdate=date(1981, 2, 16)) a2 = Author.objects.create(name="a2", birthdate=date(2012, 2, 29)) a3 = Author.objects.create(name="a3", birthdate=date(2012, 1, 31)) a4 = Author.objects.create(name="a4", birthdate=date(2012, 3, 1)) with register_lookup(models.DateField, InMonth): self.assertSequenceEqual( Author.objects.filter(birthdate__inmonth=date(2012, 1, 15)), [a3] ) self.assertSequenceEqual( Author.objects.filter(birthdate__inmonth=date(2012, 2, 1)), [a2] ) self.assertSequenceEqual( Author.objects.filter(birthdate__inmonth=date(1981, 2, 28)), [a1] ) self.assertSequenceEqual( Author.objects.filter(birthdate__inmonth=date(2012, 3, 12)), [a4] ) self.assertSequenceEqual( Author.objects.filter(birthdate__inmonth=date(2012, 4, 1)), [] ) def test_div3_extract(self): with register_lookup(models.IntegerField, Div3Transform): a1 = Author.objects.create(name="a1", age=1) a2 = Author.objects.create(name="a2", age=2) a3 = Author.objects.create(name="a3", age=3) a4 = Author.objects.create(name="a4", age=4) baseqs = Author.objects.order_by("name") self.assertSequenceEqual(baseqs.filter(age__div3=2), [a2]) self.assertSequenceEqual(baseqs.filter(age__div3__lte=3), [a1, a2, a3, a4]) self.assertSequenceEqual(baseqs.filter(age__div3__in=[0, 2]), [a2, a3]) self.assertSequenceEqual(baseqs.filter(age__div3__in=[2, 4]), [a2]) self.assertSequenceEqual(baseqs.filter(age__div3__gte=3), []) self.assertSequenceEqual( baseqs.filter(age__div3__range=(1, 2)), [a1, a2, a4] ) def test_foreignobject_lookup_registration(self): field = Article._meta.get_field("author") with register_lookup(models.ForeignObject, Exactly): self.assertIs(field.get_lookup("exactly"), Exactly) # ForeignObject should ignore regular Field lookups with register_lookup(models.Field, Exactly): self.assertIsNone(field.get_lookup("exactly")) def test_lookups_caching(self): field = Article._meta.get_field("author") # clear and re-cache field.get_lookups.cache_clear() self.assertNotIn("exactly", field.get_lookups()) # registration should bust the cache with register_lookup(models.ForeignObject, Exactly): # getting the lookups again should re-cache self.assertIn("exactly", field.get_lookups()) # Unregistration should bust the cache. self.assertNotIn("exactly", field.get_lookups()) class BilateralTransformTests(TestCase): def test_bilateral_upper(self): with register_lookup(models.CharField, UpperBilateralTransform): author1 = Author.objects.create(name="Doe") author2 = Author.objects.create(name="doe") author3 = Author.objects.create(name="Foo") self.assertCountEqual( Author.objects.filter(name__upper="doe"), [author1, author2], ) self.assertSequenceEqual( Author.objects.filter(name__upper__contains="f"), [author3], ) def test_bilateral_inner_qs(self): with register_lookup(models.CharField, UpperBilateralTransform): msg = "Bilateral transformations on nested querysets are not implemented." with self.assertRaisesMessage(NotImplementedError, msg): Author.objects.filter( name__upper__in=Author.objects.values_list("name") ) def test_bilateral_multi_value(self): with register_lookup(models.CharField, UpperBilateralTransform): Author.objects.bulk_create( [ Author(name="Foo"), Author(name="Bar"), Author(name="Ray"), ] ) self.assertQuerysetEqual( Author.objects.filter(name__upper__in=["foo", "bar", "doe"]).order_by( "name" ), ["Bar", "Foo"], lambda a: a.name, ) def test_div3_bilateral_extract(self): with register_lookup(models.IntegerField, Div3BilateralTransform): a1 = Author.objects.create(name="a1", age=1) a2 = Author.objects.create(name="a2", age=2) a3 = Author.objects.create(name="a3", age=3) a4 = Author.objects.create(name="a4", age=4) baseqs = Author.objects.order_by("name") self.assertSequenceEqual(baseqs.filter(age__div3=2), [a2]) self.assertSequenceEqual(baseqs.filter(age__div3__lte=3), [a3]) self.assertSequenceEqual(baseqs.filter(age__div3__in=[0, 2]), [a2, a3]) self.assertSequenceEqual(baseqs.filter(age__div3__in=[2, 4]), [a1, a2, a4]) self.assertSequenceEqual(baseqs.filter(age__div3__gte=3), [a1, a2, a3, a4]) self.assertSequenceEqual( baseqs.filter(age__div3__range=(1, 2)), [a1, a2, a4] ) def test_bilateral_order(self): with register_lookup( models.IntegerField, Mult3BilateralTransform, Div3BilateralTransform ): a1 = Author.objects.create(name="a1", age=1) a2 = Author.objects.create(name="a2", age=2) a3 = Author.objects.create(name="a3", age=3) a4 = Author.objects.create(name="a4", age=4) baseqs = Author.objects.order_by("name") # mult3__div3 always leads to 0 self.assertSequenceEqual( baseqs.filter(age__mult3__div3=42), [a1, a2, a3, a4] ) self.assertSequenceEqual(baseqs.filter(age__div3__mult3=42), [a3]) def test_transform_order_by(self): with register_lookup(models.IntegerField, LastDigitTransform): a1 = Author.objects.create(name="a1", age=11) a2 = Author.objects.create(name="a2", age=23) a3 = Author.objects.create(name="a3", age=32) a4 = Author.objects.create(name="a4", age=40) qs = Author.objects.order_by("age__lastdigit") self.assertSequenceEqual(qs, [a4, a1, a3, a2]) def test_bilateral_fexpr(self): with register_lookup(models.IntegerField, Mult3BilateralTransform): a1 = Author.objects.create(name="a1", age=1, average_rating=3.2) a2 = Author.objects.create(name="a2", age=2, average_rating=0.5) a3 = Author.objects.create(name="a3", age=3, average_rating=1.5) a4 = Author.objects.create(name="a4", age=4) baseqs = Author.objects.order_by("name") self.assertSequenceEqual( baseqs.filter(age__mult3=models.F("age")), [a1, a2, a3, a4] ) # Same as age >= average_rating self.assertSequenceEqual( baseqs.filter(age__mult3__gte=models.F("average_rating")), [a2, a3] ) @override_settings(USE_TZ=True) class DateTimeLookupTests(TestCase): @unittest.skipUnless(connection.vendor == "mysql", "MySQL specific SQL used") def test_datetime_output_field(self): with register_lookup(models.PositiveIntegerField, DateTimeTransform): ut = MySQLUnixTimestamp.objects.create(timestamp=time.time()) y2k = timezone.make_aware(datetime(2000, 1, 1)) self.assertSequenceEqual( MySQLUnixTimestamp.objects.filter(timestamp__as_datetime__gt=y2k), [ut] ) class YearLteTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name="a1", birthdate=date(1981, 2, 16)) cls.a2 = Author.objects.create(name="a2", birthdate=date(2012, 2, 29)) cls.a3 = Author.objects.create(name="a3", birthdate=date(2012, 1, 31)) cls.a4 = Author.objects.create(name="a4", birthdate=date(2012, 3, 1)) def setUp(self): models.DateField.register_lookup(YearTransform) def tearDown(self): models.DateField._unregister_lookup(YearTransform) @unittest.skipUnless( connection.vendor == "postgresql", "PostgreSQL specific SQL used" ) def test_year_lte(self): baseqs = Author.objects.order_by("name") self.assertSequenceEqual( baseqs.filter(birthdate__testyear__lte=2012), [self.a1, self.a2, self.a3, self.a4], ) self.assertSequenceEqual( baseqs.filter(birthdate__testyear=2012), [self.a2, self.a3, self.a4] ) self.assertNotIn("BETWEEN", str(baseqs.filter(birthdate__testyear=2012).query)) self.assertSequenceEqual( baseqs.filter(birthdate__testyear__lte=2011), [self.a1] ) # The non-optimized version works, too. self.assertSequenceEqual(baseqs.filter(birthdate__testyear__lt=2012), [self.a1]) @unittest.skipUnless( connection.vendor == "postgresql", "PostgreSQL specific SQL used" ) def test_year_lte_fexpr(self): self.a2.age = 2011 self.a2.save() self.a3.age = 2012 self.a3.save() self.a4.age = 2013 self.a4.save() baseqs = Author.objects.order_by("name") self.assertSequenceEqual( baseqs.filter(birthdate__testyear__lte=models.F("age")), [self.a3, self.a4] ) self.assertSequenceEqual( baseqs.filter(birthdate__testyear__lt=models.F("age")), [self.a4] ) def test_year_lte_sql(self): # This test will just check the generated SQL for __lte. This # doesn't require running on PostgreSQL and spots the most likely # error - not running YearLte SQL at all. baseqs = Author.objects.order_by("name") self.assertIn( "<= (2011 || ", str(baseqs.filter(birthdate__testyear__lte=2011).query) ) self.assertIn("-12-31", str(baseqs.filter(birthdate__testyear__lte=2011).query)) def test_postgres_year_exact(self): baseqs = Author.objects.order_by("name") self.assertIn("= (2011 || ", str(baseqs.filter(birthdate__testyear=2011).query)) self.assertIn("-12-31", str(baseqs.filter(birthdate__testyear=2011).query)) def test_custom_implementation_year_exact(self): try: # Two ways to add a customized implementation for different backends: # First is MonkeyPatch of the class. def as_custom_sql(self, compiler, connection): lhs_sql, lhs_params = self.process_lhs( compiler, connection, self.lhs.lhs ) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params = lhs_params + rhs_params + lhs_params + rhs_params return ( "%(lhs)s >= " "str_to_date(concat(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') " "AND %(lhs)s <= " "str_to_date(concat(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" % {"lhs": lhs_sql, "rhs": rhs_sql}, params, ) setattr(YearExact, "as_" + connection.vendor, as_custom_sql) self.assertIn( "concat(", str(Author.objects.filter(birthdate__testyear=2012).query) ) finally: delattr(YearExact, "as_" + connection.vendor) try: # The other way is to subclass the original lookup and register the # subclassed lookup instead of the original. class CustomYearExact(YearExact): # This method should be named "as_mysql" for MySQL, # "as_postgresql" for postgres and so on, but as we don't know # which DB we are running on, we need to use setattr. def as_custom_sql(self, compiler, connection): lhs_sql, lhs_params = self.process_lhs( compiler, connection, self.lhs.lhs ) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params = lhs_params + rhs_params + lhs_params + rhs_params return ( "%(lhs)s >= " "str_to_date(CONCAT(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') " "AND %(lhs)s <= " "str_to_date(CONCAT(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" % {"lhs": lhs_sql, "rhs": rhs_sql}, params, ) setattr( CustomYearExact, "as_" + connection.vendor, CustomYearExact.as_custom_sql, ) YearTransform.register_lookup(CustomYearExact) self.assertIn( "CONCAT(", str(Author.objects.filter(birthdate__testyear=2012).query) ) finally: YearTransform._unregister_lookup(CustomYearExact) YearTransform.register_lookup(YearExact) class TrackCallsYearTransform(YearTransform): # Use a name that avoids collision with the built-in year lookup. lookup_name = "testyear" call_order = [] def as_sql(self, compiler, connection): lhs_sql, params = compiler.compile(self.lhs) return connection.ops.date_extract_sql("year", lhs_sql), params @property def output_field(self): return models.IntegerField() def get_lookup(self, lookup_name): self.call_order.append("lookup") return super().get_lookup(lookup_name) def get_transform(self, lookup_name): self.call_order.append("transform") return super().get_transform(lookup_name) class LookupTransformCallOrderTests(SimpleTestCase): def test_call_order(self): with register_lookup(models.DateField, TrackCallsYearTransform): # junk lookup - tries lookup, then transform, then fails msg = ( "Unsupported lookup 'junk' for IntegerField or join on the field not " "permitted." ) with self.assertRaisesMessage(FieldError, msg): Author.objects.filter(birthdate__testyear__junk=2012) self.assertEqual( TrackCallsYearTransform.call_order, ["lookup", "transform"] ) TrackCallsYearTransform.call_order = [] # junk transform - tries transform only, then fails with self.assertRaisesMessage(FieldError, msg): Author.objects.filter(birthdate__testyear__junk__more_junk=2012) self.assertEqual(TrackCallsYearTransform.call_order, ["transform"]) TrackCallsYearTransform.call_order = [] # Just getting the year (implied __exact) - lookup only Author.objects.filter(birthdate__testyear=2012) self.assertEqual(TrackCallsYearTransform.call_order, ["lookup"]) TrackCallsYearTransform.call_order = [] # Just getting the year (explicit __exact) - lookup only Author.objects.filter(birthdate__testyear__exact=2012) self.assertEqual(TrackCallsYearTransform.call_order, ["lookup"]) class CustomisedMethodsTests(SimpleTestCase): def test_overridden_get_lookup(self): q = CustomModel.objects.filter(field__lookupfunc_monkeys=3) self.assertIn("monkeys()", str(q.query)) def test_overridden_get_transform(self): q = CustomModel.objects.filter(field__transformfunc_banana=3) self.assertIn("banana()", str(q.query)) def test_overridden_get_lookup_chain(self): q = CustomModel.objects.filter( field__transformfunc_banana__lookupfunc_elephants=3 ) self.assertIn("elephants()", str(q.query)) def test_overridden_get_transform_chain(self): q = CustomModel.objects.filter( field__transformfunc_banana__transformfunc_pear=3 ) self.assertIn("pear()", str(q.query)) class SubqueryTransformTests(TestCase): def test_subquery_usage(self): with register_lookup(models.IntegerField, Div3Transform): Author.objects.create(name="a1", age=1) a2 = Author.objects.create(name="a2", age=2) Author.objects.create(name="a3", age=3) Author.objects.create(name="a4", age=4) qs = Author.objects.order_by("name").filter( id__in=Author.objects.filter(age__div3=2) ) self.assertSequenceEqual(qs, [a2])
005e887ac55dc3c446b8427584423f37d90c484cdf685f148813824b020bd4b6
from django.db import connection, migrations from django.db.migrations.operations.base import Operation class DummyOperation(Operation): def state_forwards(self, app_label, state): pass def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass try: from django.contrib.postgres.operations import ( BloomExtension, BtreeGinExtension, BtreeGistExtension, CITextExtension, CreateExtension, CryptoExtension, HStoreExtension, TrigramExtension, UnaccentExtension, ) except ImportError: BloomExtension = DummyOperation BtreeGinExtension = DummyOperation BtreeGistExtension = DummyOperation CITextExtension = DummyOperation CreateExtension = DummyOperation HStoreExtension = DummyOperation TrigramExtension = DummyOperation UnaccentExtension = DummyOperation needs_crypto_extension = False else: needs_crypto_extension = ( connection.vendor == "postgresql" and not connection.features.is_postgresql_13 ) class Migration(migrations.Migration): operations = [ BloomExtension(), BtreeGinExtension(), BtreeGistExtension(), CITextExtension(), # Ensure CreateExtension quotes extension names by creating one with a # dash in its name. CreateExtension("uuid-ossp"), # CryptoExtension is required for RandomUUID() on PostgreSQL < 13. CryptoExtension() if needs_crypto_extension else DummyOperation(), HStoreExtension(), TrigramExtension(), UnaccentExtension(), ]
7cdae198260bf4939e36c1bfd03ed9c6e4112da7e6a95ad385212e3c39a5a7d5
import inspect import os from functools import partial, wraps from asgiref.local import Local from django.template import Context, Template, TemplateSyntaxError from django.template.base import Token, TokenType from django.templatetags.i18n import BlockTranslateNode from django.test import SimpleTestCase, override_settings from django.utils import translation from django.utils.safestring import mark_safe from django.utils.translation import trans_real from ...utils import setup as base_setup from .base import MultipleLocaleActivationTestCase, extended_locale_paths, here def setup(templates, *args, **kwargs): blocktranslate_setup = base_setup(templates, *args, **kwargs) blocktrans_setup = base_setup( { name: template.replace("{% blocktranslate ", "{% blocktrans ").replace( "{% endblocktranslate %}", "{% endblocktrans %}" ) for name, template in templates.items() } ) tags = { "blocktrans": blocktrans_setup, "blocktranslate": blocktranslate_setup, } def decorator(func): @wraps(func) def inner(self, *args): signature = inspect.signature(func) for tag_name, setup_func in tags.items(): if "tag_name" in signature.parameters: setup_func(partial(func, tag_name=tag_name))(self) else: setup_func(func)(self) return inner return decorator class I18nBlockTransTagTests(SimpleTestCase): libraries = {"i18n": "django.templatetags.i18n"} @setup( { "i18n03": ( "{% load i18n %}{% blocktranslate %}{{ anton }}{% endblocktranslate %}" ) } ) def test_i18n03(self): """simple translation of a variable""" output = self.engine.render_to_string("i18n03", {"anton": "Å"}) self.assertEqual(output, "Å") @setup( { "i18n04": ( "{% load i18n %}{% blocktranslate with berta=anton|lower %}{{ berta }}" "{% endblocktranslate %}" ) } ) def test_i18n04(self): """simple translation of a variable and filter""" output = self.engine.render_to_string("i18n04", {"anton": "Å"}) self.assertEqual(output, "å") @setup( { "legacyi18n04": ( "{% load i18n %}" "{% blocktranslate with anton|lower as berta %}{{ berta }}" "{% endblocktranslate %}" ) } ) def test_legacyi18n04(self): """simple translation of a variable and filter""" output = self.engine.render_to_string("legacyi18n04", {"anton": "Å"}) self.assertEqual(output, "å") @setup( { "i18n05": ( "{% load i18n %}{% blocktranslate %}xxx{{ anton }}xxx" "{% endblocktranslate %}" ) } ) def test_i18n05(self): """simple translation of a string with interpolation""" output = self.engine.render_to_string("i18n05", {"anton": "yyy"}) self.assertEqual(output, "xxxyyyxxx") @setup( { "i18n07": "{% load i18n %}" "{% blocktranslate count counter=number %}singular{% plural %}" "{{ counter }} plural{% endblocktranslate %}" } ) def test_i18n07(self): """translation of singular form""" output = self.engine.render_to_string("i18n07", {"number": 1}) self.assertEqual(output, "singular") @setup( { "legacyi18n07": "{% load i18n %}" "{% blocktranslate count number as counter %}singular{% plural %}" "{{ counter }} plural{% endblocktranslate %}" } ) def test_legacyi18n07(self): """translation of singular form""" output = self.engine.render_to_string("legacyi18n07", {"number": 1}) self.assertEqual(output, "singular") @setup( { "i18n08": "{% load i18n %}" "{% blocktranslate count number as counter %}singular{% plural %}" "{{ counter }} plural{% endblocktranslate %}" } ) def test_i18n08(self): """translation of plural form""" output = self.engine.render_to_string("i18n08", {"number": 2}) self.assertEqual(output, "2 plural") @setup( { "legacyi18n08": "{% load i18n %}" "{% blocktranslate count counter=number %}singular{% plural %}" "{{ counter }} plural{% endblocktranslate %}" } ) def test_legacyi18n08(self): """translation of plural form""" output = self.engine.render_to_string("legacyi18n08", {"number": 2}) self.assertEqual(output, "2 plural") @setup( { "i18n17": ( "{% load i18n %}" "{% blocktranslate with berta=anton|escape %}{{ berta }}" "{% endblocktranslate %}" ) } ) def test_i18n17(self): """ Escaping inside blocktranslate and translate works as if it was directly in the template. """ output = self.engine.render_to_string("i18n17", {"anton": "α & β"}) self.assertEqual(output, "α &amp; β") @setup( { "i18n18": ( "{% load i18n %}" "{% blocktranslate with berta=anton|force_escape %}{{ berta }}" "{% endblocktranslate %}" ) } ) def test_i18n18(self): output = self.engine.render_to_string("i18n18", {"anton": "α & β"}) self.assertEqual(output, "α &amp; β") @setup( { "i18n19": ( "{% load i18n %}{% blocktranslate %}{{ andrew }}{% endblocktranslate %}" ) } ) def test_i18n19(self): output = self.engine.render_to_string("i18n19", {"andrew": "a & b"}) self.assertEqual(output, "a &amp; b") @setup( { "i18n21": ( "{% load i18n %}{% blocktranslate %}{{ andrew }}{% endblocktranslate %}" ) } ) def test_i18n21(self): output = self.engine.render_to_string("i18n21", {"andrew": mark_safe("a & b")}) self.assertEqual(output, "a & b") @setup( { "legacyi18n17": ( "{% load i18n %}" "{% blocktranslate with anton|escape as berta %}{{ berta }}" "{% endblocktranslate %}" ) } ) def test_legacyi18n17(self): output = self.engine.render_to_string("legacyi18n17", {"anton": "α & β"}) self.assertEqual(output, "α &amp; β") @setup( { "legacyi18n18": "{% load i18n %}" "{% blocktranslate with anton|force_escape as berta %}" "{{ berta }}{% endblocktranslate %}" } ) def test_legacyi18n18(self): output = self.engine.render_to_string("legacyi18n18", {"anton": "α & β"}) self.assertEqual(output, "α &amp; β") @setup( { "i18n26": "{% load i18n %}" "{% blocktranslate with extra_field=myextra_field count counter=number %}" "singular {{ extra_field }}{% plural %}plural{% endblocktranslate %}" } ) def test_i18n26(self): """ translation of plural form with extra field in singular form (#13568) """ output = self.engine.render_to_string( "i18n26", {"myextra_field": "test", "number": 1} ) self.assertEqual(output, "singular test") @setup( { "legacyi18n26": ( "{% load i18n %}" "{% blocktranslate with myextra_field as extra_field " "count number as counter %}singular {{ extra_field }}{% plural %}plural" "{% endblocktranslate %}" ) } ) def test_legacyi18n26(self): output = self.engine.render_to_string( "legacyi18n26", {"myextra_field": "test", "number": 1} ) self.assertEqual(output, "singular test") @setup( { "i18n27": "{% load i18n %}{% blocktranslate count counter=number %}" "{{ counter }} result{% plural %}{{ counter }} results" "{% endblocktranslate %}" } ) def test_i18n27(self): """translation of singular form in Russian (#14126)""" with translation.override("ru"): output = self.engine.render_to_string("i18n27", {"number": 1}) self.assertEqual( output, "1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442" ) @setup( { "legacyi18n27": "{% load i18n %}" "{% blocktranslate count number as counter %}{{ counter }} result" "{% plural %}{{ counter }} results{% endblocktranslate %}" } ) def test_legacyi18n27(self): with translation.override("ru"): output = self.engine.render_to_string("legacyi18n27", {"number": 1}) self.assertEqual( output, "1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442" ) @setup( { "i18n28": ( "{% load i18n %}" "{% blocktranslate with a=anton b=berta %}{{ a }} + {{ b }}" "{% endblocktranslate %}" ) } ) def test_i18n28(self): """simple translation of multiple variables""" output = self.engine.render_to_string("i18n28", {"anton": "α", "berta": "β"}) self.assertEqual(output, "α + β") @setup( { "legacyi18n28": "{% load i18n %}" "{% blocktranslate with anton as a and berta as b %}" "{{ a }} + {{ b }}{% endblocktranslate %}" } ) def test_legacyi18n28(self): output = self.engine.render_to_string( "legacyi18n28", {"anton": "α", "berta": "β"} ) self.assertEqual(output, "α + β") # blocktranslate handling of variables which are not in the context. # this should work as if blocktranslate was not there (#19915) @setup( { "i18n34": ( "{% load i18n %}{% blocktranslate %}{{ missing }}" "{% endblocktranslate %}" ) } ) def test_i18n34(self): output = self.engine.render_to_string("i18n34") if self.engine.string_if_invalid: self.assertEqual(output, "INVALID") else: self.assertEqual(output, "") @setup( { "i18n34_2": ( "{% load i18n %}{% blocktranslate with a='α' %}{{ missing }}" "{% endblocktranslate %}" ) } ) def test_i18n34_2(self): output = self.engine.render_to_string("i18n34_2") if self.engine.string_if_invalid: self.assertEqual(output, "INVALID") else: self.assertEqual(output, "") @setup( { "i18n34_3": ( "{% load i18n %}{% blocktranslate with a=anton %}{{ missing }}" "{% endblocktranslate %}" ) } ) def test_i18n34_3(self): output = self.engine.render_to_string("i18n34_3", {"anton": "\xce\xb1"}) if self.engine.string_if_invalid: self.assertEqual(output, "INVALID") else: self.assertEqual(output, "") @setup( { "i18n37": "{% load i18n %}" '{% translate "Page not found" as page_not_found %}' "{% blocktranslate %}Error: {{ page_not_found }}{% endblocktranslate %}" } ) def test_i18n37(self): with translation.override("de"): output = self.engine.render_to_string("i18n37") self.assertEqual(output, "Error: Seite nicht gefunden") # blocktranslate tag with asvar @setup( { "i18n39": ( "{% load i18n %}" "{% blocktranslate asvar page_not_found %}Page not found" "{% endblocktranslate %}>{{ page_not_found }}<" ) } ) def test_i18n39(self): with translation.override("de"): output = self.engine.render_to_string("i18n39") self.assertEqual(output, ">Seite nicht gefunden<") @setup( { "i18n40": "{% load i18n %}" '{% translate "Page not found" as pg_404 %}' "{% blocktranslate with page_not_found=pg_404 asvar output %}" "Error: {{ page_not_found }}" "{% endblocktranslate %}" } ) def test_i18n40(self): output = self.engine.render_to_string("i18n40") self.assertEqual(output, "") @setup( { "i18n41": "{% load i18n %}" '{% translate "Page not found" as pg_404 %}' "{% blocktranslate with page_not_found=pg_404 asvar output %}" "Error: {{ page_not_found }}" "{% endblocktranslate %}" ">{{ output }}<" } ) def test_i18n41(self): with translation.override("de"): output = self.engine.render_to_string("i18n41") self.assertEqual(output, ">Error: Seite nicht gefunden<") @setup( { "i18n_asvar_safestring": ( "{% load i18n %}" "{% blocktranslate asvar the_title %}" "{{title}}other text" "{% endblocktranslate %}" "{{ the_title }}" ) } ) def test_i18n_asvar_safestring(self): context = {"title": "<Main Title>"} output = self.engine.render_to_string("i18n_asvar_safestring", context=context) self.assertEqual(output, "&lt;Main Title&gt;other text") @setup( { "template": ( "{% load i18n %}{% blocktranslate asvar %}Yes{% endblocktranslate %}" ) } ) def test_blocktrans_syntax_error_missing_assignment(self, tag_name): msg = "No argument provided to the '{}' tag for the asvar option.".format( tag_name ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string("template") @setup({"template": "{% load i18n %}{% blocktranslate %}%s{% endblocktranslate %}"}) def test_blocktrans_tag_using_a_string_that_looks_like_str_fmt(self): output = self.engine.render_to_string("template") self.assertEqual(output, "%s") @setup( { "template": ( "{% load i18n %}{% blocktranslate %}{% block b %} {% endblock %}" "{% endblocktranslate %}" ) } ) def test_with_block(self, tag_name): msg = "'{}' doesn't allow other block tags (seen 'block b') inside it".format( tag_name ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string("template") @setup( { "template": ( "{% load i18n %}" "{% blocktranslate %}{% for b in [1, 2, 3] %} {% endfor %}" "{% endblocktranslate %}" ) } ) def test_with_for(self, tag_name): msg = ( f"'{tag_name}' doesn't allow other block tags (seen 'for b in [1, 2, 3]') " f"inside it" ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string("template") @setup( { "template": ( "{% load i18n %}{% blocktranslate with foo=bar with %}{{ foo }}" "{% endblocktranslate %}" ) } ) def test_variable_twice(self): with self.assertRaisesMessage( TemplateSyntaxError, "The 'with' option was specified more than once" ): self.engine.render_to_string("template", {"foo": "bar"}) @setup( {"template": "{% load i18n %}{% blocktranslate with %}{% endblocktranslate %}"} ) def test_no_args_with(self, tag_name): msg = "\"with\" in '{}' tag needs at least one keyword argument.".format( tag_name ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string("template") @setup( { "template": ( "{% load i18n %}{% blocktranslate count a %}{% endblocktranslate %}" ) } ) def test_count(self, tag_name): msg = "\"count\" in '{}' tag expected exactly one keyword argument.".format( tag_name ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string("template", {"a": [1, 2, 3]}) @setup( { "template": ( "{% load i18n %}{% blocktranslate count counter=num %}{{ counter }}" "{% plural %}{{ counter }}{% endblocktranslate %}" ) } ) def test_count_not_number(self, tag_name): msg = "'counter' argument to '{}' tag must be a number.".format(tag_name) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string("template", {"num": "1"}) @setup( { "template": ( "{% load i18n %}{% blocktranslate count count=var|length %}" "There is {{ count }} object. {% block a %} {% endblock %}" "{% endblocktranslate %}" ) } ) def test_plural_bad_syntax(self, tag_name): msg = "'{}' doesn't allow other block tags inside it".format(tag_name) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string("template", {"var": [1, 2, 3]}) class TranslationBlockTranslateTagTests(SimpleTestCase): tag_name = "blocktranslate" def get_template(self, template_string): return Template( template_string.replace( "{{% blocktranslate ", "{{% {}".format(self.tag_name) ).replace( "{{% endblocktranslate %}}", "{{% end{} %}}".format(self.tag_name) ) ) @override_settings(LOCALE_PATHS=extended_locale_paths) def test_template_tags_pgettext(self): """{% blocktranslate %} takes message contexts into account (#14806).""" trans_real._active = Local() trans_real._translations = {} with translation.override("de"): # Nonexistent context t = self.get_template( '{% load i18n %}{% blocktranslate context "nonexistent" %}May' "{% endblocktranslate %}" ) rendered = t.render(Context()) self.assertEqual(rendered, "May") # Existing context... using a literal t = self.get_template( "{% load i18n %}" '{% blocktranslate context "month name" %}May{% endblocktranslate %}' ) rendered = t.render(Context()) self.assertEqual(rendered, "Mai") t = self.get_template( "{% load i18n %}" '{% blocktranslate context "verb" %}May{% endblocktranslate %}' ) rendered = t.render(Context()) self.assertEqual(rendered, "Kann") # Using a variable t = self.get_template( "{% load i18n %}{% blocktranslate context message_context %}" "May{% endblocktranslate %}" ) rendered = t.render(Context({"message_context": "month name"})) self.assertEqual(rendered, "Mai") t = self.get_template( "{% load i18n %}{% blocktranslate context message_context %}" "May{% endblocktranslate %}" ) rendered = t.render(Context({"message_context": "verb"})) self.assertEqual(rendered, "Kann") # Using a filter t = self.get_template( "{% load i18n %}" "{% blocktranslate context message_context|lower %}May" "{% endblocktranslate %}" ) rendered = t.render(Context({"message_context": "MONTH NAME"})) self.assertEqual(rendered, "Mai") t = self.get_template( "{% load i18n %}" "{% blocktranslate context message_context|lower %}May" "{% endblocktranslate %}" ) rendered = t.render(Context({"message_context": "VERB"})) self.assertEqual(rendered, "Kann") # Using 'count' t = self.get_template( "{% load i18n %}" '{% blocktranslate count number=1 context "super search" %}{{ number }}' " super result{% plural %}{{ number }} super results" "{% endblocktranslate %}" ) rendered = t.render(Context()) self.assertEqual(rendered, "1 Super-Ergebnis") t = self.get_template( "{% load i18n %}" '{% blocktranslate count number=2 context "super search" %}{{ number }}' " super result{% plural %}{{ number }} super results" "{% endblocktranslate %}" ) rendered = t.render(Context()) self.assertEqual(rendered, "2 Super-Ergebnisse") t = self.get_template( "{% load i18n %}" '{% blocktranslate context "other super search" count number=1 %}' "{{ number }} super result{% plural %}{{ number }} super results" "{% endblocktranslate %}" ) rendered = t.render(Context()) self.assertEqual(rendered, "1 anderen Super-Ergebnis") t = self.get_template( "{% load i18n %}" '{% blocktranslate context "other super search" count number=2 %}' "{{ number }} super result{% plural %}{{ number }} super results" "{% endblocktranslate %}" ) rendered = t.render(Context()) self.assertEqual(rendered, "2 andere Super-Ergebnisse") # Using 'with' t = self.get_template( "{% load i18n %}" '{% blocktranslate with num_comments=5 context "comment count" %}' "There are {{ num_comments }} comments{% endblocktranslate %}" ) rendered = t.render(Context()) self.assertEqual(rendered, "Es gibt 5 Kommentare") t = self.get_template( "{% load i18n %}" '{% blocktranslate with num_comments=5 context "other comment count" %}' "There are {{ num_comments }} comments{% endblocktranslate %}" ) rendered = t.render(Context()) self.assertEqual(rendered, "Andere: Es gibt 5 Kommentare") # Using trimmed t = self.get_template( "{% load i18n %}{% blocktranslate trimmed %}\n\nThere\n\t are 5 " "\n\n comments\n{% endblocktranslate %}" ) rendered = t.render(Context()) self.assertEqual(rendered, "There are 5 comments") t = self.get_template( "{% load i18n %}" '{% blocktranslate with num_comments=5 context "comment count" trimmed ' "%}\n\n" "There are \t\n \t {{ num_comments }} comments\n\n" "{% endblocktranslate %}" ) rendered = t.render(Context()) self.assertEqual(rendered, "Es gibt 5 Kommentare") t = self.get_template( "{% load i18n %}" '{% blocktranslate context "other super search" count number=2 trimmed ' "%}\n{{ number }} super \n result{% plural %}{{ number }} super results" "{% endblocktranslate %}" ) rendered = t.render(Context()) self.assertEqual(rendered, "2 andere Super-Ergebnisse") # Misuses msg = "Unknown argument for 'blocktranslate' tag: %r." with self.assertRaisesMessage(TemplateSyntaxError, msg % 'month="May"'): self.get_template( '{% load i18n %}{% blocktranslate context with month="May" %}' "{{ month }}{% endblocktranslate %}" ) msg = ( '"context" in %r tag expected exactly one argument.' % "blocktranslate" ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.get_template( "{% load i18n %}{% blocktranslate context %}{% endblocktranslate %}" ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.get_template( "{% load i18n %}{% blocktranslate count number=2 context %}" "{{ number }} super result{% plural %}{{ number }}" " super results{% endblocktranslate %}" ) @override_settings(LOCALE_PATHS=[os.path.join(here, "other", "locale")]) def test_bad_placeholder_1(self): """ Error in translation file should not crash template rendering (#16516). (%(person)s is translated as %(personne)s in fr.po). """ with translation.override("fr"): t = Template( "{% load i18n %}{% blocktranslate %}My name is {{ person }}." "{% endblocktranslate %}" ) rendered = t.render(Context({"person": "James"})) self.assertEqual(rendered, "My name is James.") @override_settings(LOCALE_PATHS=[os.path.join(here, "other", "locale")]) def test_bad_placeholder_2(self): """ Error in translation file should not crash template rendering (#18393). (%(person) misses a 's' in fr.po, causing the string formatting to fail) . """ with translation.override("fr"): t = Template( "{% load i18n %}{% blocktranslate %}My other name is {{ person }}." "{% endblocktranslate %}" ) rendered = t.render(Context({"person": "James"})) self.assertEqual(rendered, "My other name is James.") class TranslationBlockTransnTagTests(TranslationBlockTranslateTagTests): tag_name = "blocktrans" class MultipleLocaleActivationBlockTranslateTests(MultipleLocaleActivationTestCase): tag_name = "blocktranslate" def get_template(self, template_string): return Template( template_string.replace( "{{% blocktranslate ", "{{% {}".format(self.tag_name) ).replace( "{{% endblocktranslate %}}", "{{% end{} %}}".format(self.tag_name) ) ) def test_single_locale_activation(self): """ Simple baseline behavior with one locale for all the supported i18n constructs. """ with translation.override("fr"): self.assertEqual( self.get_template( "{% load i18n %}{% blocktranslate %}Yes{% endblocktranslate %}" ).render(Context({})), "Oui", ) def test_multiple_locale_btrans(self): with translation.override("de"): t = self.get_template( "{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}" ) with translation.override(self._old_language), translation.override("nl"): self.assertEqual(t.render(Context({})), "Nee") def test_multiple_locale_deactivate_btrans(self): with translation.override("de", deactivate=True): t = self.get_template( "{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}" ) with translation.override("nl"): self.assertEqual(t.render(Context({})), "Nee") def test_multiple_locale_direct_switch_btrans(self): with translation.override("de"): t = self.get_template( "{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}" ) with translation.override("nl"): self.assertEqual(t.render(Context({})), "Nee") class MultipleLocaleActivationBlockTransTests( MultipleLocaleActivationBlockTranslateTests ): tag_name = "blocktrans" class MiscTests(SimpleTestCase): tag_name = "blocktranslate" def get_template(self, template_string): return Template( template_string.replace( "{{% blocktranslate ", "{{% {}".format(self.tag_name) ).replace( "{{% endblocktranslate %}}", "{{% end{} %}}".format(self.tag_name) ) ) @override_settings(LOCALE_PATHS=extended_locale_paths) def test_percent_in_translatable_block(self): t_sing = self.get_template( "{% load i18n %}{% blocktranslate %}The result was {{ percent }}%" "{% endblocktranslate %}" ) t_plur = self.get_template( "{% load i18n %}{% blocktranslate count num as number %}" "{{ percent }}% represents {{ num }} object{% plural %}" "{{ percent }}% represents {{ num }} objects{% endblocktranslate %}" ) with translation.override("de"): self.assertEqual( t_sing.render(Context({"percent": 42})), "Das Ergebnis war 42%" ) self.assertEqual( t_plur.render(Context({"percent": 42, "num": 1})), "42% stellt 1 Objekt dar", ) self.assertEqual( t_plur.render(Context({"percent": 42, "num": 4})), "42% stellt 4 Objekte dar", ) @override_settings(LOCALE_PATHS=extended_locale_paths) def test_percent_formatting_in_blocktranslate(self): """ Python's %-formatting is properly escaped in blocktranslate, singular, or plural. """ t_sing = self.get_template( "{% load i18n %}{% blocktranslate %}There are %(num_comments)s comments" "{% endblocktranslate %}" ) t_plur = self.get_template( "{% load i18n %}{% blocktranslate count num as number %}" "%(percent)s% represents {{ num }} object{% plural %}" "%(percent)s% represents {{ num }} objects{% endblocktranslate %}" ) with translation.override("de"): # Strings won't get translated as they don't match after escaping % self.assertEqual( t_sing.render(Context({"num_comments": 42})), "There are %(num_comments)s comments", ) self.assertEqual( t_plur.render(Context({"percent": 42, "num": 1})), "%(percent)s% represents 1 object", ) self.assertEqual( t_plur.render(Context({"percent": 42, "num": 4})), "%(percent)s% represents 4 objects", ) class MiscBlockTranslationTests(MiscTests): tag_name = "blocktrans" class BlockTranslateNodeTests(SimpleTestCase): def test_repr(self): block_translate_node = BlockTranslateNode( extra_context={}, singular=[ Token(TokenType.TEXT, "content"), Token(TokenType.VAR, "variable"), ], ) self.assertEqual( repr(block_translate_node), "<BlockTranslateNode: extra_context={} " 'singular=[<Text token: "content...">, <Var token: "variable...">] ' "plural=None>", )
65580e8fdc041ac1d65f75dfdb5747e073c3d5eaf3af7bef4ddb1bd1b027aba1
import decimal from django.core.management.color import no_style from django.db import NotSupportedError, connection, transaction from django.db.backends.base.operations import BaseDatabaseOperations from django.db.models import DurationField, Value from django.test import ( SimpleTestCase, TestCase, TransactionTestCase, override_settings, skipIfDBFeature, ) from django.utils import timezone from ..models import Author, Book class SimpleDatabaseOperationTests(SimpleTestCase): may_require_msg = "subclasses of BaseDatabaseOperations may require a %s() method" def setUp(self): self.ops = BaseDatabaseOperations(connection=connection) def test_deferrable_sql(self): self.assertEqual(self.ops.deferrable_sql(), "") def test_end_transaction_rollback(self): self.assertEqual(self.ops.end_transaction_sql(success=False), "ROLLBACK;") def test_no_limit_value(self): with self.assertRaisesMessage( NotImplementedError, self.may_require_msg % "no_limit_value" ): self.ops.no_limit_value() def test_quote_name(self): with self.assertRaisesMessage( NotImplementedError, self.may_require_msg % "quote_name" ): self.ops.quote_name("a") def test_regex_lookup(self): with self.assertRaisesMessage( NotImplementedError, self.may_require_msg % "regex_lookup" ): self.ops.regex_lookup(lookup_type="regex") def test_set_time_zone_sql(self): self.assertEqual(self.ops.set_time_zone_sql(), "") def test_sql_flush(self): msg = "subclasses of BaseDatabaseOperations must provide an sql_flush() method" with self.assertRaisesMessage(NotImplementedError, msg): self.ops.sql_flush(None, None) def test_pk_default_value(self): self.assertEqual(self.ops.pk_default_value(), "DEFAULT") def test_tablespace_sql(self): self.assertEqual(self.ops.tablespace_sql(None), "") def test_sequence_reset_by_name_sql(self): self.assertEqual(self.ops.sequence_reset_by_name_sql(None, []), []) def test_adapt_unknown_value_decimal(self): value = decimal.Decimal("3.14") self.assertEqual( self.ops.adapt_unknown_value(value), self.ops.adapt_decimalfield_value(value), ) def test_adapt_unknown_value_date(self): value = timezone.now().date() self.assertEqual( self.ops.adapt_unknown_value(value), self.ops.adapt_datefield_value(value) ) def test_adapt_unknown_value_time(self): value = timezone.now().time() self.assertEqual( self.ops.adapt_unknown_value(value), self.ops.adapt_timefield_value(value) ) def test_adapt_timefield_value_none(self): self.assertIsNone(self.ops.adapt_timefield_value(None)) def test_adapt_timefield_value_expression(self): value = Value(timezone.now().time()) self.assertEqual(self.ops.adapt_timefield_value(value), value) def test_adapt_datetimefield_value_none(self): self.assertIsNone(self.ops.adapt_datetimefield_value(None)) def test_adapt_datetimefield_value_expression(self): value = Value(timezone.now()) self.assertEqual(self.ops.adapt_datetimefield_value(value), value) def test_adapt_timefield_value(self): msg = "Django does not support timezone-aware times." with self.assertRaisesMessage(ValueError, msg): self.ops.adapt_timefield_value(timezone.make_aware(timezone.now())) @override_settings(USE_TZ=False) def test_adapt_timefield_value_unaware(self): now = timezone.now() self.assertEqual(self.ops.adapt_timefield_value(now), str(now)) def test_format_for_duration_arithmetic(self): msg = self.may_require_msg % "format_for_duration_arithmetic" with self.assertRaisesMessage(NotImplementedError, msg): self.ops.format_for_duration_arithmetic(None) def test_date_extract_sql(self): with self.assertRaisesMessage( NotImplementedError, self.may_require_msg % "date_extract_sql" ): self.ops.date_extract_sql(None, None, None) def test_time_extract_sql(self): with self.assertRaisesMessage( NotImplementedError, self.may_require_msg % "date_extract_sql" ): self.ops.time_extract_sql(None, None, None) def test_date_trunc_sql(self): with self.assertRaisesMessage( NotImplementedError, self.may_require_msg % "date_trunc_sql" ): self.ops.date_trunc_sql(None, None, None) def test_time_trunc_sql(self): with self.assertRaisesMessage( NotImplementedError, self.may_require_msg % "time_trunc_sql" ): self.ops.time_trunc_sql(None, None, None) def test_datetime_trunc_sql(self): with self.assertRaisesMessage( NotImplementedError, self.may_require_msg % "datetime_trunc_sql" ): self.ops.datetime_trunc_sql(None, None, None, None) def test_datetime_cast_date_sql(self): with self.assertRaisesMessage( NotImplementedError, self.may_require_msg % "datetime_cast_date_sql" ): self.ops.datetime_cast_date_sql(None, None, None) def test_datetime_cast_time_sql(self): with self.assertRaisesMessage( NotImplementedError, self.may_require_msg % "datetime_cast_time_sql" ): self.ops.datetime_cast_time_sql(None, None, None) def test_datetime_extract_sql(self): with self.assertRaisesMessage( NotImplementedError, self.may_require_msg % "datetime_extract_sql" ): self.ops.datetime_extract_sql(None, None, None, None) class DatabaseOperationTests(TestCase): def setUp(self): self.ops = BaseDatabaseOperations(connection=connection) @skipIfDBFeature("supports_over_clause") def test_window_frame_raise_not_supported_error(self): msg = "This backend does not support window expressions." with self.assertRaisesMessage(NotSupportedError, msg): self.ops.window_frame_rows_start_end() @skipIfDBFeature("can_distinct_on_fields") def test_distinct_on_fields(self): msg = "DISTINCT ON fields is not supported by this database backend" with self.assertRaisesMessage(NotSupportedError, msg): self.ops.distinct_sql(["a", "b"], None) @skipIfDBFeature("supports_temporal_subtraction") def test_subtract_temporals(self): duration_field = DurationField() duration_field_internal_type = duration_field.get_internal_type() msg = ( "This backend does not support %s subtraction." % duration_field_internal_type ) with self.assertRaisesMessage(NotSupportedError, msg): self.ops.subtract_temporals(duration_field_internal_type, None, None) class SqlFlushTests(TransactionTestCase): available_apps = ["backends"] def test_sql_flush_no_tables(self): self.assertEqual(connection.ops.sql_flush(no_style(), []), []) def test_execute_sql_flush_statements(self): with transaction.atomic(): author = Author.objects.create(name="George Orwell") Book.objects.create(author=author) author = Author.objects.create(name="Harper Lee") Book.objects.create(author=author) Book.objects.create(author=author) self.assertIs(Author.objects.exists(), True) self.assertIs(Book.objects.exists(), True) sql_list = connection.ops.sql_flush( no_style(), [Author._meta.db_table, Book._meta.db_table], reset_sequences=True, allow_cascade=True, ) connection.ops.execute_sql_flush(sql_list) with transaction.atomic(): self.assertIs(Author.objects.exists(), False) self.assertIs(Book.objects.exists(), False) if connection.features.supports_sequence_reset: author = Author.objects.create(name="F. Scott Fitzgerald") self.assertEqual(author.pk, 1) book = Book.objects.create(author=author) self.assertEqual(book.pk, 1)
9a09856abe9940669ef4c42535cc3ed355be004307dfb2b6f3b9ebf0a3240958
import unittest from contextlib import contextmanager from unittest import mock from django.core.exceptions import ImproperlyConfigured from django.db import NotSupportedError, connection from django.test import TestCase, override_settings @contextmanager def get_connection(): new_connection = connection.copy() yield new_connection new_connection.close() @override_settings(DEBUG=True) @unittest.skipUnless(connection.vendor == "mysql", "MySQL tests") class IsolationLevelTests(TestCase): read_committed = "read committed" repeatable_read = "repeatable read" isolation_values = { level: level.upper() for level in (read_committed, repeatable_read) } @classmethod def setUpClass(cls): super().setUpClass() configured_isolation_level = ( connection.isolation_level or cls.isolation_values[cls.repeatable_read] ) cls.configured_isolation_level = configured_isolation_level.upper() cls.other_isolation_level = ( cls.read_committed if configured_isolation_level != cls.isolation_values[cls.read_committed] else cls.repeatable_read ) @staticmethod def get_isolation_level(connection): with connection.cursor() as cursor: cursor.execute( "SHOW VARIABLES " "WHERE variable_name IN ('transaction_isolation', 'tx_isolation')" ) return cursor.fetchone()[1].replace("-", " ") def test_auto_is_null_auto_config(self): query = "set sql_auto_is_null = 0" connection.init_connection_state() last_query = connection.queries[-1]["sql"].lower() if connection.features.is_sql_auto_is_null_enabled: self.assertIn(query, last_query) else: self.assertNotIn(query, last_query) def test_connect_isolation_level(self): self.assertEqual( self.get_isolation_level(connection), self.configured_isolation_level ) def test_setting_isolation_level(self): with get_connection() as new_connection: new_connection.settings_dict["OPTIONS"][ "isolation_level" ] = self.other_isolation_level self.assertEqual( self.get_isolation_level(new_connection), self.isolation_values[self.other_isolation_level], ) def test_uppercase_isolation_level(self): # Upper case values are also accepted in 'isolation_level'. with get_connection() as new_connection: new_connection.settings_dict["OPTIONS"][ "isolation_level" ] = self.other_isolation_level.upper() self.assertEqual( self.get_isolation_level(new_connection), self.isolation_values[self.other_isolation_level], ) def test_default_isolation_level(self): # If not specified in settings, the default is read committed. with get_connection() as new_connection: new_connection.settings_dict["OPTIONS"].pop("isolation_level", None) self.assertEqual( self.get_isolation_level(new_connection), self.isolation_values[self.read_committed], ) def test_isolation_level_validation(self): new_connection = connection.copy() new_connection.settings_dict["OPTIONS"]["isolation_level"] = "xxx" msg = ( "Invalid transaction isolation level 'xxx' specified.\n" "Use one of 'read committed', 'read uncommitted', " "'repeatable read', 'serializable', or None." ) with self.assertRaisesMessage(ImproperlyConfigured, msg): new_connection.cursor() @unittest.skipUnless(connection.vendor == "mysql", "MySQL tests") class Tests(TestCase): @mock.patch.object(connection, "get_database_version") def test_check_database_version_supported(self, mocked_get_database_version): if connection.mysql_is_mariadb: mocked_get_database_version.return_value = (10, 3) msg = "MariaDB 10.4 or later is required (found 10.3)." else: mocked_get_database_version.return_value = (5, 7) msg = "MySQL 8 or later is required (found 5.7)." with self.assertRaisesMessage(NotSupportedError, msg): connection.check_database_version_supported() self.assertTrue(mocked_get_database_version.called)
cbe26d7a2e1e495d3bd79c6855f1183a95aa73595d83cb91e882af46d9ea021c
from django.apps import AppConfig class IndexTogetherAppConfig(AppConfig): name = "check_framework.migrations_test_apps.index_together_app"
28a89f3a6fe255e77083ee8fa7b39b9f5e1852e99923d2ee8ed4eb54f1bd3de1
from django.db import migrations, models class Migration(migrations.Migration): initial = True operations = [ migrations.CreateModel( "SimpleModel", [ ("field", models.IntegerField()), ], ), migrations.AlterIndexTogether("SimpleModel", index_together=(("id", "field"),)), ]
d5e612f8d543b8ca4af65ef241e1433fede3a03649385ac858a9e9360554aa81
import tempfile from io import StringIO from django.contrib.gis import gdal from django.contrib.gis.db.models import Extent, MakeLine, Union, functions from django.contrib.gis.geos import ( GeometryCollection, GEOSGeometry, LinearRing, LineString, MultiLineString, MultiPoint, MultiPolygon, Point, Polygon, fromstr, ) from django.core.management import call_command from django.db import DatabaseError, NotSupportedError, connection from django.db.models import F, OuterRef, Subquery from django.test import TestCase, skipUnlessDBFeature from django.test.utils import CaptureQueriesContext from ..utils import skipUnlessGISLookup from .models import ( City, Country, Feature, MinusOneSRID, MultiFields, NonConcreteModel, PennsylvaniaCity, State, Track, ) class GeoModelTest(TestCase): fixtures = ["initial"] def test_fixtures(self): "Testing geographic model initialization from fixtures." # Ensuring that data was loaded from initial data fixtures. self.assertEqual(2, Country.objects.count()) self.assertEqual(8, City.objects.count()) self.assertEqual(2, State.objects.count()) def test_proxy(self): "Testing Lazy-Geometry support (using the GeometryProxy)." # Testing on a Point pnt = Point(0, 0) nullcity = City(name="NullCity", point=pnt) nullcity.save() # Making sure TypeError is thrown when trying to set with an # incompatible type. for bad in [5, 2.0, LineString((0, 0), (1, 1))]: with self.assertRaisesMessage(TypeError, "Cannot set"): nullcity.point = bad # Now setting with a compatible GEOS Geometry, saving, and ensuring # the save took, notice no SRID is explicitly set. new = Point(5, 23) nullcity.point = new # Ensuring that the SRID is automatically set to that of the # field after assignment, but before saving. self.assertEqual(4326, nullcity.point.srid) nullcity.save() # Ensuring the point was saved correctly after saving self.assertEqual(new, City.objects.get(name="NullCity").point) # Setting the X and Y of the Point nullcity.point.x = 23 nullcity.point.y = 5 # Checking assignments pre & post-save. self.assertNotEqual( Point(23, 5, srid=4326), City.objects.get(name="NullCity").point ) nullcity.save() self.assertEqual( Point(23, 5, srid=4326), City.objects.get(name="NullCity").point ) nullcity.delete() # Testing on a Polygon shell = LinearRing((0, 0), (0, 90), (100, 90), (100, 0), (0, 0)) inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40)) # Creating a State object using a built Polygon ply = Polygon(shell, inner) nullstate = State(name="NullState", poly=ply) self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None nullstate.save() ns = State.objects.get(name="NullState") self.assertEqual(connection.ops.Adapter._fix_polygon(ply), ns.poly) # Testing the `ogr` and `srs` lazy-geometry properties. self.assertIsInstance(ns.poly.ogr, gdal.OGRGeometry) self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb) self.assertIsInstance(ns.poly.srs, gdal.SpatialReference) self.assertEqual("WGS 84", ns.poly.srs.name) # Changing the interior ring on the poly attribute. new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30)) ns.poly[1] = new_inner ply[1] = new_inner self.assertEqual(4326, ns.poly.srid) ns.save() self.assertEqual( connection.ops.Adapter._fix_polygon(ply), State.objects.get(name="NullState").poly, ) ns.delete() @skipUnlessDBFeature("supports_transform") def test_lookup_insert_transform(self): "Testing automatic transform for lookups and inserts." # San Antonio in 'WGS84' (SRID 4326) sa_4326 = "POINT (-98.493183 29.424170)" wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84 # San Antonio in 'WGS 84 / Pseudo-Mercator' (SRID 3857) other_srid_pnt = wgs_pnt.transform(3857, clone=True) # Constructing & querying with a point from a different SRID. Oracle # `SDO_OVERLAPBDYINTERSECT` operates differently from # `ST_Intersects`, so contains is used instead. if connection.ops.oracle: tx = Country.objects.get(mpoly__contains=other_srid_pnt) else: tx = Country.objects.get(mpoly__intersects=other_srid_pnt) self.assertEqual("Texas", tx.name) # Creating San Antonio. Remember the Alamo. sa = City.objects.create(name="San Antonio", point=other_srid_pnt) # Now verifying that San Antonio was transformed correctly sa = City.objects.get(name="San Antonio") self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6) self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6) # If the GeometryField SRID is -1, then we shouldn't perform any # transformation if the SRID of the input geometry is different. m1 = MinusOneSRID(geom=Point(17, 23, srid=4326)) m1.save() self.assertEqual(-1, m1.geom.srid) def test_createnull(self): "Testing creating a model instance and the geometry being None" c = City() self.assertIsNone(c.point) def test_geometryfield(self): "Testing the general GeometryField." Feature(name="Point", geom=Point(1, 1)).save() Feature(name="LineString", geom=LineString((0, 0), (1, 1), (5, 5))).save() Feature( name="Polygon", geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))), ).save() Feature( name="GeometryCollection", geom=GeometryCollection( Point(2, 2), LineString((0, 0), (2, 2)), Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))), ), ).save() f_1 = Feature.objects.get(name="Point") self.assertIsInstance(f_1.geom, Point) self.assertEqual((1.0, 1.0), f_1.geom.tuple) f_2 = Feature.objects.get(name="LineString") self.assertIsInstance(f_2.geom, LineString) self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple) f_3 = Feature.objects.get(name="Polygon") self.assertIsInstance(f_3.geom, Polygon) f_4 = Feature.objects.get(name="GeometryCollection") self.assertIsInstance(f_4.geom, GeometryCollection) self.assertEqual(f_3.geom, f_4.geom[2]) @skipUnlessDBFeature("supports_transform") def test_inherited_geofields(self): "Database functions on inherited Geometry fields." # Creating a Pennsylvanian city. PennsylvaniaCity.objects.create( name="Mansfield", county="Tioga", point="POINT(-77.071445 41.823881)" ) # All transformation SQL will need to be performed on the # _parent_ table. qs = PennsylvaniaCity.objects.annotate( new_point=functions.Transform("point", srid=32128) ) self.assertEqual(1, qs.count()) for pc in qs: self.assertEqual(32128, pc.new_point.srid) def test_raw_sql_query(self): "Testing raw SQL query." cities1 = City.objects.all() point_select = connection.ops.select % "point" cities2 = list( City.objects.raw( "select id, name, %s as point from geoapp_city" % point_select ) ) self.assertEqual(len(cities1), len(cities2)) with self.assertNumQueries(0): # Ensure point isn't deferred. self.assertIsInstance(cities2[0].point, Point) def test_gis_query_as_string(self): """GIS queries can be represented as strings.""" query = City.objects.filter(point__within=Polygon.from_bbox((0, 0, 2, 2))) self.assertIn( connection.ops.quote_name(City._meta.db_table), str(query.query), ) def test_dumpdata_loaddata_cycle(self): """ Test a dumpdata/loaddata cycle with geographic data. """ out = StringIO() original_data = list(City.objects.order_by("name")) call_command("dumpdata", "geoapp.City", stdout=out) result = out.getvalue() houston = City.objects.get(name="Houston") self.assertIn('"point": "%s"' % houston.point.ewkt, result) # Reload now dumped data with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as tmp: tmp.write(result) tmp.seek(0) call_command("loaddata", tmp.name, verbosity=0) self.assertEqual(original_data, list(City.objects.order_by("name"))) @skipUnlessDBFeature("supports_empty_geometries") def test_empty_geometries(self): geometry_classes = [ Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon, GeometryCollection, ] for klass in geometry_classes: g = klass(srid=4326) feature = Feature.objects.create(name="Empty %s" % klass.__name__, geom=g) feature.refresh_from_db() if klass is LinearRing: # LinearRing isn't representable in WKB, so GEOSGeomtry.wkb # uses LineString instead. g = LineString(srid=4326) self.assertEqual(feature.geom, g) self.assertEqual(feature.geom.srid, g.srid) class GeoLookupTest(TestCase): fixtures = ["initial"] def test_disjoint_lookup(self): "Testing the `disjoint` lookup type." ptown = City.objects.get(name="Pueblo") qs1 = City.objects.filter(point__disjoint=ptown.point) self.assertEqual(7, qs1.count()) qs2 = State.objects.filter(poly__disjoint=ptown.point) self.assertEqual(1, qs2.count()) self.assertEqual("Kansas", qs2[0].name) def test_contains_contained_lookups(self): "Testing the 'contained', 'contains', and 'bbcontains' lookup types." # Getting Texas, yes we were a country -- once ;) texas = Country.objects.get(name="Texas") # Seeing what cities are in Texas, should get Houston and Dallas, # and Oklahoma City because 'contained' only checks on the # _bounding box_ of the Geometries. if connection.features.supports_contained_lookup: qs = City.objects.filter(point__contained=texas.mpoly) self.assertEqual(3, qs.count()) cities = ["Houston", "Dallas", "Oklahoma City"] for c in qs: self.assertIn(c.name, cities) # Pulling out some cities. houston = City.objects.get(name="Houston") wellington = City.objects.get(name="Wellington") pueblo = City.objects.get(name="Pueblo") okcity = City.objects.get(name="Oklahoma City") lawrence = City.objects.get(name="Lawrence") # Now testing contains on the countries using the points for # Houston and Wellington. tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry nz = Country.objects.get( mpoly__contains=wellington.point.hex ) # Query w/EWKBHEX self.assertEqual("Texas", tx.name) self.assertEqual("New Zealand", nz.name) # Testing `contains` on the states using the point for Lawrence. ks = State.objects.get(poly__contains=lawrence.point) self.assertEqual("Kansas", ks.name) # Pueblo and Oklahoma City (even though OK City is within the bounding # box of Texas) are not contained in Texas or New Zealand. self.assertEqual( len(Country.objects.filter(mpoly__contains=pueblo.point)), 0 ) # Query w/GEOSGeometry object self.assertEqual( len(Country.objects.filter(mpoly__contains=okcity.point.wkt)), 0 ) # Query w/WKT # OK City is contained w/in bounding box of Texas. if connection.features.supports_bbcontains_lookup: qs = Country.objects.filter(mpoly__bbcontains=okcity.point) self.assertEqual(1, len(qs)) self.assertEqual("Texas", qs[0].name) @skipUnlessDBFeature("supports_crosses_lookup") def test_crosses_lookup(self): Track.objects.create(name="Line1", line=LineString([(-95, 29), (-60, 0)])) self.assertEqual( Track.objects.filter( line__crosses=LineString([(-95, 0), (-60, 29)]) ).count(), 1, ) self.assertEqual( Track.objects.filter( line__crosses=LineString([(-95, 30), (0, 30)]) ).count(), 0, ) @skipUnlessDBFeature("supports_isvalid_lookup") def test_isvalid_lookup(self): invalid_geom = fromstr("POLYGON((0 0, 0 1, 1 1, 1 0, 1 1, 1 0, 0 0))") State.objects.create(name="invalid", poly=invalid_geom) qs = State.objects.all() if connection.ops.oracle: # Kansas has adjacent vertices with distance 6.99244813842e-12 # which is smaller than the default Oracle tolerance. qs = qs.exclude(name="Kansas") self.assertEqual( State.objects.filter(name="Kansas", poly__isvalid=False).count(), 1 ) self.assertEqual(qs.filter(poly__isvalid=False).count(), 1) self.assertEqual(qs.filter(poly__isvalid=True).count(), qs.count() - 1) @skipUnlessGISLookup("left", "right") def test_left_right_lookups(self): "Testing the 'left' and 'right' lookup types." # Left: A << B => true if xmax(A) < xmin(B) # Right: A >> B => true if xmin(A) > xmax(B) # See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source. # Getting the borders for Colorado & Kansas co_border = State.objects.get(name="Colorado").poly ks_border = State.objects.get(name="Kansas").poly # Note: Wellington has an 'X' value of 174, so it will not be considered # to the left of CO. # These cities should be strictly to the right of the CO border. cities = [ "Houston", "Dallas", "Oklahoma City", "Lawrence", "Chicago", "Wellington", ] qs = City.objects.filter(point__right=co_border) self.assertEqual(6, len(qs)) for c in qs: self.assertIn(c.name, cities) # These cities should be strictly to the right of the KS border. cities = ["Chicago", "Wellington"] qs = City.objects.filter(point__right=ks_border) self.assertEqual(2, len(qs)) for c in qs: self.assertIn(c.name, cities) # Note: Wellington has an 'X' value of 174, so it will not be considered # to the left of CO. vic = City.objects.get(point__left=co_border) self.assertEqual("Victoria", vic.name) cities = ["Pueblo", "Victoria"] qs = City.objects.filter(point__left=ks_border) self.assertEqual(2, len(qs)) for c in qs: self.assertIn(c.name, cities) @skipUnlessGISLookup("strictly_above", "strictly_below") def test_strictly_above_below_lookups(self): dallas = City.objects.get(name="Dallas") self.assertQuerysetEqual( City.objects.filter(point__strictly_above=dallas.point).order_by("name"), ["Chicago", "Lawrence", "Oklahoma City", "Pueblo", "Victoria"], lambda b: b.name, ) self.assertQuerysetEqual( City.objects.filter(point__strictly_below=dallas.point).order_by("name"), ["Houston", "Wellington"], lambda b: b.name, ) def test_equals_lookups(self): "Testing the 'same_as' and 'equals' lookup types." pnt = fromstr("POINT (-95.363151 29.763374)", srid=4326) c1 = City.objects.get(point=pnt) c2 = City.objects.get(point__same_as=pnt) c3 = City.objects.get(point__equals=pnt) for c in [c1, c2, c3]: self.assertEqual("Houston", c.name) @skipUnlessDBFeature("supports_null_geometries") def test_null_geometries(self): "Testing NULL geometry support, and the `isnull` lookup type." # Creating a state with a NULL boundary. State.objects.create(name="Puerto Rico") # Querying for both NULL and Non-NULL values. nullqs = State.objects.filter(poly__isnull=True) validqs = State.objects.filter(poly__isnull=False) # Puerto Rico should be NULL (it's a commonwealth unincorporated territory) self.assertEqual(1, len(nullqs)) self.assertEqual("Puerto Rico", nullqs[0].name) # GeometryField=None is an alias for __isnull=True. self.assertCountEqual(State.objects.filter(poly=None), nullqs) self.assertCountEqual(State.objects.exclude(poly=None), validqs) # The valid states should be Colorado & Kansas self.assertEqual(2, len(validqs)) state_names = [s.name for s in validqs] self.assertIn("Colorado", state_names) self.assertIn("Kansas", state_names) # Saving another commonwealth w/a NULL geometry. nmi = State.objects.create(name="Northern Mariana Islands", poly=None) self.assertIsNone(nmi.poly) # Assigning a geometry and saving -- then UPDATE back to NULL. nmi.poly = "POLYGON((0 0,1 0,1 1,1 0,0 0))" nmi.save() State.objects.filter(name="Northern Mariana Islands").update(poly=None) self.assertIsNone(State.objects.get(name="Northern Mariana Islands").poly) @skipUnlessDBFeature( "supports_null_geometries", "supports_crosses_lookup", "supports_relate_lookup" ) def test_null_geometries_excluded_in_lookups(self): """NULL features are excluded in spatial lookup functions.""" null = State.objects.create(name="NULL", poly=None) queries = [ ("equals", Point(1, 1)), ("disjoint", Point(1, 1)), ("touches", Point(1, 1)), ("crosses", LineString((0, 0), (1, 1), (5, 5))), ("within", Point(1, 1)), ("overlaps", LineString((0, 0), (1, 1), (5, 5))), ("contains", LineString((0, 0), (1, 1), (5, 5))), ("intersects", LineString((0, 0), (1, 1), (5, 5))), ("relate", (Point(1, 1), "T*T***FF*")), ("same_as", Point(1, 1)), ("exact", Point(1, 1)), ("coveredby", Point(1, 1)), ("covers", Point(1, 1)), ] for lookup, geom in queries: with self.subTest(lookup=lookup): self.assertNotIn( null, State.objects.filter(**{"poly__%s" % lookup: geom}) ) def test_wkt_string_in_lookup(self): # Valid WKT strings don't emit error logs. with self.assertNoLogs("django.contrib.gis", "ERROR"): State.objects.filter(poly__intersects="LINESTRING(0 0, 1 1, 5 5)") @skipUnlessDBFeature("supports_relate_lookup") def test_relate_lookup(self): "Testing the 'relate' lookup type." # To make things more interesting, we will have our Texas reference point in # different SRIDs. pnt1 = fromstr("POINT (649287.0363174 4177429.4494686)", srid=2847) pnt2 = fromstr("POINT(-98.4919715741052 29.4333344025053)", srid=4326) # Not passing in a geometry as first param raises a TypeError when # initializing the QuerySet. with self.assertRaises(ValueError): Country.objects.filter(mpoly__relate=(23, "foo")) # Making sure the right exception is raised for the given # bad arguments. for bad_args, e in [ ((pnt1, 0), ValueError), ((pnt2, "T*T***FF*", 0), ValueError), ]: qs = Country.objects.filter(mpoly__relate=bad_args) with self.assertRaises(e): qs.count() contains_mask = "T*T***FF*" within_mask = "T*F**F***" intersects_mask = "T********" # Relate works differently on Oracle. if connection.ops.oracle: contains_mask = "contains" within_mask = "inside" # TODO: This is not quite the same as the PostGIS mask above intersects_mask = "overlapbdyintersect" # Testing contains relation mask. if connection.features.supports_transform: self.assertEqual( Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name, "Texas", ) self.assertEqual( "Texas", Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name ) # Testing within relation mask. ks = State.objects.get(name="Kansas") self.assertEqual( "Lawrence", City.objects.get(point__relate=(ks.poly, within_mask)).name ) # Testing intersection relation mask. if not connection.ops.oracle: if connection.features.supports_transform: self.assertEqual( Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name, "Texas", ) self.assertEqual( "Texas", Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name ) self.assertEqual( "Lawrence", City.objects.get(point__relate=(ks.poly, intersects_mask)).name, ) # With a complex geometry expression mask = "anyinteract" if connection.ops.oracle else within_mask self.assertFalse( City.objects.exclude( point__relate=(functions.Union("point", "point"), mask) ) ) def test_gis_lookups_with_complex_expressions(self): multiple_arg_lookups = { "dwithin", "relate", } # These lookups are tested elsewhere. lookups = connection.ops.gis_operators.keys() - multiple_arg_lookups self.assertTrue(lookups, "No lookups found") for lookup in lookups: with self.subTest(lookup): City.objects.filter( **{"point__" + lookup: functions.Union("point", "point")} ).exists() def test_subquery_annotation(self): multifields = MultiFields.objects.create( city=City.objects.create(point=Point(1, 1)), point=Point(2, 2), poly=Polygon.from_bbox((0, 0, 2, 2)), ) qs = MultiFields.objects.annotate( city_point=Subquery( City.objects.filter( id=OuterRef("city"), ).values("point") ), ).filter( city_point__within=F("poly"), ) self.assertEqual(qs.get(), multifields) class GeoQuerySetTest(TestCase): # TODO: GeoQuerySet is removed, organize these test better. fixtures = ["initial"] @skipUnlessDBFeature("supports_extent_aggr") def test_extent(self): """ Testing the `Extent` aggregate. """ # Reference query: # SELECT ST_extent(point) # FROM geoapp_city # WHERE (name='Houston' or name='Dallas');` # => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203) expected = ( -96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820, ) qs = City.objects.filter(name__in=("Houston", "Dallas")) extent = qs.aggregate(Extent("point"))["point__extent"] for val, exp in zip(extent, expected): self.assertAlmostEqual(exp, val, 4) self.assertIsNone( City.objects.filter(name=("Smalltown")).aggregate(Extent("point"))[ "point__extent" ] ) @skipUnlessDBFeature("supports_extent_aggr") def test_extent_with_limit(self): """ Testing if extent supports limit. """ extent1 = City.objects.aggregate(Extent("point"))["point__extent"] extent2 = City.objects.all()[:3].aggregate(Extent("point"))["point__extent"] self.assertNotEqual(extent1, extent2) def test_make_line(self): """ Testing the `MakeLine` aggregate. """ if not connection.features.supports_make_line_aggr: with self.assertRaises(NotSupportedError): City.objects.aggregate(MakeLine("point")) return # MakeLine on an inappropriate field returns simply None self.assertIsNone(State.objects.aggregate(MakeLine("poly"))["poly__makeline"]) # Reference query: # SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city; ref_line = GEOSGeometry( "LINESTRING(-95.363151 29.763374,-96.801611 32.782057," "-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001," "-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)", srid=4326, ) # We check for equality with a tolerance of 10e-5 which is a lower bound # of the precisions of ref_line coordinates line = City.objects.aggregate(MakeLine("point"))["point__makeline"] self.assertTrue( ref_line.equals_exact(line, tolerance=10e-5), "%s != %s" % (ref_line, line) ) @skipUnlessDBFeature("supports_union_aggr") def test_unionagg(self): """ Testing the `Union` aggregate. """ tx = Country.objects.get(name="Texas").mpoly # Houston, Dallas -- Ordering may differ depending on backend or GEOS version. union = GEOSGeometry("MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)") qs = City.objects.filter(point__within=tx) with self.assertRaises(ValueError): qs.aggregate(Union("name")) # Using `field_name` keyword argument in one query and specifying an # order in the other (which should not be used because this is # an aggregate method on a spatial column) u1 = qs.aggregate(Union("point"))["point__union"] u2 = qs.order_by("name").aggregate(Union("point"))["point__union"] self.assertTrue(union.equals(u1)) self.assertTrue(union.equals(u2)) qs = City.objects.filter(name="NotACity") self.assertIsNone(qs.aggregate(Union("point"))["point__union"]) @skipUnlessDBFeature("supports_union_aggr") def test_geoagg_subquery(self): tx = Country.objects.get(name="Texas") union = GEOSGeometry("MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)") # Use distinct() to force the usage of a subquery for aggregation. with CaptureQueriesContext(connection) as ctx: self.assertIs( union.equals( City.objects.filter(point__within=tx.mpoly) .distinct() .aggregate( Union("point"), )["point__union"], ), True, ) self.assertIn("subquery", ctx.captured_queries[0]["sql"]) @skipUnlessDBFeature("supports_tolerance_parameter") def test_unionagg_tolerance(self): City.objects.create( point=fromstr("POINT(-96.467222 32.751389)", srid=4326), name="Forney", ) tx = Country.objects.get(name="Texas").mpoly # Tolerance is greater than distance between Forney and Dallas, that's # why Dallas is ignored. forney_houston = GEOSGeometry( "MULTIPOINT(-95.363151 29.763374, -96.467222 32.751389)", srid=4326, ) self.assertIs( forney_houston.equals_exact( City.objects.filter(point__within=tx).aggregate( Union("point", tolerance=32000), )["point__union"], tolerance=10e-6, ), True, ) @skipUnlessDBFeature("supports_tolerance_parameter") def test_unionagg_tolerance_escaping(self): tx = Country.objects.get(name="Texas").mpoly with self.assertRaises(DatabaseError): City.objects.filter(point__within=tx).aggregate( Union("point", tolerance="0.05))), (((1"), ) def test_within_subquery(self): """ Using a queryset inside a geo lookup is working (using a subquery) (#14483). """ tex_cities = City.objects.filter( point__within=Country.objects.filter(name="Texas").values("mpoly") ).order_by("name") self.assertEqual( list(tex_cities.values_list("name", flat=True)), ["Dallas", "Houston"] ) def test_non_concrete_field(self): NonConcreteModel.objects.create(point=Point(0, 0), name="name") list(NonConcreteModel.objects.all()) def test_values_srid(self): for c, v in zip(City.objects.all(), City.objects.values()): self.assertEqual(c.point.srid, v["point"].srid)
10c7d634b59225f359cf708a280e8284ca4278a080d3f9fada906260bb83231e
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union from django.contrib.gis.geos import GEOSGeometry, MultiPoint, Point from django.db import NotSupportedError, connection from django.test import TestCase, skipUnlessDBFeature from django.test.utils import override_settings from django.utils import timezone from .models import Article, Author, Book, City, DirectoryEntry, Event, Location, Parcel class RelatedGeoModelTest(TestCase): fixtures = ["initial"] def test02_select_related(self): "Testing `select_related` on geographic models (see #7126)." qs1 = City.objects.order_by("id") qs2 = City.objects.order_by("id").select_related() qs3 = City.objects.order_by("id").select_related("location") # Reference data for what's in the fixtures. cities = ( ("Aurora", "TX", -97.516111, 33.058333), ("Roswell", "NM", -104.528056, 33.387222), ("Kecksburg", "PA", -79.460734, 40.18476), ) for qs in (qs1, qs2, qs3): for ref, c in zip(cities, qs): nm, st, lon, lat = ref self.assertEqual(nm, c.name) self.assertEqual(st, c.state) self.assertAlmostEqual(lon, c.location.point.x, 6) self.assertAlmostEqual(lat, c.location.point.y, 6) @skipUnlessDBFeature("supports_extent_aggr") def test_related_extent_aggregate(self): "Testing the `Extent` aggregate on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Extent("location__point")) # One for all locations, one that excludes New Mexico (Roswell). all_extent = (-104.528056, 29.763374, -79.460734, 40.18476) txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476) e1 = City.objects.aggregate(Extent("location__point"))[ "location__point__extent" ] e2 = City.objects.exclude(state="NM").aggregate(Extent("location__point"))[ "location__point__extent" ] e3 = aggs["location__point__extent"] # The tolerance value is to four decimal places because of differences # between the Oracle and PostGIS spatial backends on the extent calculation. tol = 4 for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]: for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol) @skipUnlessDBFeature("supports_extent_aggr") def test_related_extent_annotate(self): """ Test annotation with Extent GeoAggregate. """ cities = City.objects.annotate( points_extent=Extent("location__point") ).order_by("name") tol = 4 self.assertAlmostEqual( cities[0].points_extent, (-97.516111, 33.058333, -97.516111, 33.058333), tol ) @skipUnlessDBFeature("supports_union_aggr") def test_related_union_aggregate(self): "Testing the `Union` aggregate on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Union("location__point")) # These are the points that are components of the aggregate geographic # union that is returned. Each point # corresponds to City PK. p1 = Point(-104.528056, 33.387222) p2 = Point(-97.516111, 33.058333) p3 = Point(-79.460734, 40.18476) p4 = Point(-96.801611, 32.782057) p5 = Point(-95.363151, 29.763374) # The second union aggregate is for a union # query that includes limiting information in the WHERE clause (in other # words a `.filter()` precedes the call to `.aggregate(Union()`). ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326) ref_u2 = MultiPoint(p2, p3, srid=4326) u1 = City.objects.aggregate(Union("location__point"))["location__point__union"] u2 = City.objects.exclude( name__in=("Roswell", "Houston", "Dallas", "Fort Worth"), ).aggregate(Union("location__point"))["location__point__union"] u3 = aggs["location__point__union"] self.assertEqual(type(u1), MultiPoint) self.assertEqual(type(u3), MultiPoint) # Ordering of points in the result of the union is not defined and # implementation-dependent (DB backend, GEOS version) self.assertEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u1}) self.assertEqual({p.ewkt for p in ref_u2}, {p.ewkt for p in u2}) self.assertEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u3}) def test05_select_related_fk_to_subclass(self): """ select_related on a query over a model with an FK to a model subclass. """ # Regression test for #9752. list(DirectoryEntry.objects.select_related()) def test06_f_expressions(self): "Testing F() expressions on GeometryFields." # Constructing a dummy parcel border and getting the City instance for # assigning the FK. b1 = GEOSGeometry( "POLYGON((-97.501205 33.052520,-97.501205 33.052576," "-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))", srid=4326, ) pcity = City.objects.get(name="Aurora") # First parcel has incorrect center point that is equal to the City; # it also has a second border that is different from the first as a # 100ft buffer around the City. c1 = pcity.location.point c2 = c1.transform(2276, clone=True) b2 = c2.buffer(100) Parcel.objects.create( name="P1", city=pcity, center1=c1, center2=c2, border1=b1, border2=b2 ) # Now creating a second Parcel where the borders are the same, just # in different coordinate systems. The center points are also the # same (but in different coordinate systems), and this time they # actually correspond to the centroid of the border. c1 = b1.centroid c2 = c1.transform(2276, clone=True) b2 = ( b1 if connection.features.supports_transform else b1.transform(2276, clone=True) ) Parcel.objects.create( name="P2", city=pcity, center1=c1, center2=c2, border1=b1, border2=b2 ) # Should return the second Parcel, which has the center within the # border. qs = Parcel.objects.filter(center1__within=F("border1")) self.assertEqual(1, len(qs)) self.assertEqual("P2", qs[0].name) # This time center2 is in a different coordinate system and needs to be # wrapped in transformation SQL. qs = Parcel.objects.filter(center2__within=F("border1")) if connection.features.supports_transform: self.assertEqual("P2", qs.get().name) else: msg = "This backend doesn't support the Transform function." with self.assertRaisesMessage(NotSupportedError, msg): list(qs) # Should return the first Parcel, which has the center point equal # to the point in the City ForeignKey. qs = Parcel.objects.filter(center1=F("city__location__point")) self.assertEqual(1, len(qs)) self.assertEqual("P1", qs[0].name) # This time the city column should be wrapped in transformation SQL. qs = Parcel.objects.filter(border2__contains=F("city__location__point")) if connection.features.supports_transform: self.assertEqual("P1", qs.get().name) else: msg = "This backend doesn't support the Transform function." with self.assertRaisesMessage(NotSupportedError, msg): list(qs) def test07_values(self): "Testing values() and values_list()." gqs = Location.objects.all() gvqs = Location.objects.values() gvlqs = Location.objects.values_list() # Incrementing through each of the models, dictionaries, and tuples # returned by each QuerySet. for m, d, t in zip(gqs, gvqs, gvlqs): # The values should be Geometry objects and not raw strings returned # by the spatial database. self.assertIsInstance(d["point"], GEOSGeometry) self.assertIsInstance(t[1], GEOSGeometry) self.assertEqual(m.point, d["point"]) self.assertEqual(m.point, t[1]) @override_settings(USE_TZ=True) def test_07b_values(self): "Testing values() and values_list() with aware datetime. See #21565." Event.objects.create(name="foo", when=timezone.now()) list(Event.objects.values_list("when")) def test08_defer_only(self): "Testing defer() and only() on Geographic models." qs = Location.objects.all().order_by("pk") def_qs = Location.objects.defer("point").order_by("pk") for loc, def_loc in zip(qs, def_qs): self.assertEqual(loc.point, def_loc.point) def test09_pk_relations(self): "Ensuring correct primary key column is selected across relations. See #10757." # The expected ID values -- notice the last two location IDs # are out of order. Dallas and Houston have location IDs that differ # from their PKs -- this is done to ensure that the related location # ID column is selected instead of ID column for the city. city_ids = (1, 2, 3, 4, 5) loc_ids = (1, 2, 3, 5, 4) ids_qs = City.objects.order_by("id").values("id", "location__id") for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids): self.assertEqual(val_dict["id"], c_id) self.assertEqual(val_dict["location__id"], l_id) def test10_combine(self): "Testing the combination of two QuerySets (#10807)." buf1 = City.objects.get(name="Aurora").location.point.buffer(0.1) buf2 = City.objects.get(name="Kecksburg").location.point.buffer(0.1) qs1 = City.objects.filter(location__point__within=buf1) qs2 = City.objects.filter(location__point__within=buf2) combined = qs1 | qs2 names = [c.name for c in combined] self.assertEqual(2, len(names)) self.assertIn("Aurora", names) self.assertIn("Kecksburg", names) @skipUnlessDBFeature("allows_group_by_lob") def test12a_count(self): "Testing `Count` aggregate on geo-fields." # The City, 'Fort Worth' uses the same location as Dallas. dallas = City.objects.get(name="Dallas") # Count annotation should be 2 for the Dallas location now. loc = Location.objects.annotate(num_cities=Count("city")).get( id=dallas.location.id ) self.assertEqual(2, loc.num_cities) def test12b_count(self): "Testing `Count` aggregate on non geo-fields." # Should only be one author (Trevor Paglen) returned by this query, and # the annotation should have 3 for the number of books, see #11087. # Also testing with a values(), see #11489. qs = Author.objects.annotate(num_books=Count("books")).filter(num_books__gt=1) vqs = ( Author.objects.values("name") .annotate(num_books=Count("books")) .filter(num_books__gt=1) ) self.assertEqual(1, len(qs)) self.assertEqual(3, qs[0].num_books) self.assertEqual(1, len(vqs)) self.assertEqual(3, vqs[0]["num_books"]) @skipUnlessDBFeature("allows_group_by_lob") def test13c_count(self): "Testing `Count` aggregate with `.values()`. See #15305." qs = ( Location.objects.filter(id=5) .annotate(num_cities=Count("city")) .values("id", "point", "num_cities") ) self.assertEqual(1, len(qs)) self.assertEqual(2, qs[0]["num_cities"]) self.assertIsInstance(qs[0]["point"], GEOSGeometry) def test13_select_related_null_fk(self): "Testing `select_related` on a nullable ForeignKey." Book.objects.create(title="Without Author") b = Book.objects.select_related("author").get(title="Without Author") # Should be `None`, and not a 'dummy' model. self.assertIsNone(b.author) @skipUnlessDBFeature("supports_collect_aggr") def test_collect(self): """ Testing the `Collect` aggregate. """ # Reference query: # SELECT AsText(ST_Collect("relatedapp_location"."point")) # FROM "relatedapp_city" # LEFT OUTER JOIN # "relatedapp_location" ON ( # "relatedapp_city"."location_id" = "relatedapp_location"."id" # ) # WHERE "relatedapp_city"."state" = 'TX'; ref_geom = GEOSGeometry( "MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057," "-95.363151 29.763374,-96.801611 32.782057)" ) coll = City.objects.filter(state="TX").aggregate(Collect("location__point"))[ "location__point__collect" ] # Even though Dallas and Ft. Worth share same point, Collect doesn't # consolidate -- that's why 4 points in MultiPoint. self.assertEqual(4, len(coll)) self.assertTrue(ref_geom.equals(coll)) def test15_invalid_select_related(self): """ select_related on the related name manager of a unique FK. """ qs = Article.objects.select_related("author__article") # This triggers TypeError when `get_default_columns` has no `local_only` # keyword. The TypeError is swallowed if QuerySet is actually # evaluated as list generation swallows TypeError in CPython. str(qs.query) def test16_annotated_date_queryset(self): "Ensure annotated date querysets work if spatial backend is used. See #14648." birth_years = [ dt.year for dt in list( Author.objects.annotate(num_books=Count("books")).dates("dob", "year") ) ] birth_years.sort() self.assertEqual([1950, 1974], birth_years) # TODO: Related tests for KML, GML, and distance lookups.
d81ffe730f6e704eaffe92147adbd72b795aad9d6a1638e8123a80704c1083a9
from django.db import migrations from django.db.migrations.operations.base import Operation class DummyOperation(Operation): def state_forwards(self, app_label, state): pass def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass try: from django.contrib.postgres.operations import CryptoExtension except ImportError: CryptoExtension = DummyOperation class Migration(migrations.Migration): # Required for the SHA database functions. operations = [CryptoExtension()]
27e459d36d6c6faca831134c88c4dd0918f1ecbdcaa717181a4668a15d06d7f0
import unittest from datetime import datetime, timedelta from datetime import timezone as datetime_timezone try: import zoneinfo except ImportError: from backports import zoneinfo try: import pytz except ImportError: pytz = None from django.conf import settings from django.db import DataError, OperationalError from django.db.models import ( DateField, DateTimeField, F, IntegerField, Max, OuterRef, Subquery, TimeField, ) from django.db.models.functions import ( Extract, ExtractDay, ExtractHour, ExtractIsoWeekDay, ExtractIsoYear, ExtractMinute, ExtractMonth, ExtractQuarter, ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate, TruncDay, TruncHour, TruncMinute, TruncMonth, TruncQuarter, TruncSecond, TruncTime, TruncWeek, TruncYear, ) from django.test import ( TestCase, ignore_warnings, override_settings, skipIfDBFeature, skipUnlessDBFeature, ) from django.utils import timezone from django.utils.deprecation import RemovedInDjango50Warning from ..models import Author, DTModel, Fan HAS_PYTZ = pytz is not None if not HAS_PYTZ: needs_pytz = unittest.skip("Test requires pytz") else: def needs_pytz(f): return f ZONE_CONSTRUCTORS = (zoneinfo.ZoneInfo,) if HAS_PYTZ: ZONE_CONSTRUCTORS += (pytz.timezone,) def truncate_to(value, kind, tzinfo=None): # Convert to target timezone before truncation if tzinfo is not None: value = value.astimezone(tzinfo) def truncate(value, kind): if kind == "second": return value.replace(microsecond=0) if kind == "minute": return value.replace(second=0, microsecond=0) if kind == "hour": return value.replace(minute=0, second=0, microsecond=0) if kind == "day": if isinstance(value, datetime): return value.replace(hour=0, minute=0, second=0, microsecond=0) return value if kind == "week": if isinstance(value, datetime): return (value - timedelta(days=value.weekday())).replace( hour=0, minute=0, second=0, microsecond=0 ) return value - timedelta(days=value.weekday()) if kind == "month": if isinstance(value, datetime): return value.replace(day=1, hour=0, minute=0, second=0, microsecond=0) return value.replace(day=1) if kind == "quarter": month_in_quarter = value.month - (value.month - 1) % 3 if isinstance(value, datetime): return value.replace( month=month_in_quarter, day=1, hour=0, minute=0, second=0, microsecond=0, ) return value.replace(month=month_in_quarter, day=1) # otherwise, truncate to year if isinstance(value, datetime): return value.replace( month=1, day=1, hour=0, minute=0, second=0, microsecond=0 ) return value.replace(month=1, day=1) value = truncate(value, kind) if tzinfo is not None: # If there was a daylight saving transition, then reset the timezone. value = timezone.make_aware(value.replace(tzinfo=None), tzinfo) return value @override_settings(USE_TZ=False) class DateFunctionTests(TestCase): def create_model(self, start_datetime, end_datetime): return DTModel.objects.create( name=start_datetime.isoformat() if start_datetime else "None", start_datetime=start_datetime, end_datetime=end_datetime, start_date=start_datetime.date() if start_datetime else None, end_date=end_datetime.date() if end_datetime else None, start_time=start_datetime.time() if start_datetime else None, end_time=end_datetime.time() if end_datetime else None, duration=(end_datetime - start_datetime) if start_datetime and end_datetime else None, ) def test_extract_year_exact_lookup(self): """ Extract year uses a BETWEEN filter to compare the year to allow indexes to be used. """ start_datetime = datetime(2015, 6, 15, 14, 10) end_datetime = datetime(2016, 6, 15, 14, 10) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) for lookup in ("year", "iso_year"): with self.subTest(lookup): qs = DTModel.objects.filter( **{"start_datetime__%s__exact" % lookup: 2015} ) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(" between "), 1) self.assertEqual(query_string.count("extract"), 0) # exact is implied and should be the same qs = DTModel.objects.filter(**{"start_datetime__%s" % lookup: 2015}) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(" between "), 1) self.assertEqual(query_string.count("extract"), 0) # date and datetime fields should behave the same qs = DTModel.objects.filter(**{"start_date__%s" % lookup: 2015}) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(" between "), 1) self.assertEqual(query_string.count("extract"), 0) # an expression rhs cannot use the between optimization. qs = DTModel.objects.annotate( start_year=ExtractYear("start_datetime"), ).filter(end_datetime__year=F("start_year") + 1) self.assertEqual(qs.count(), 1) query_string = str(qs.query).lower() self.assertEqual(query_string.count(" between "), 0) self.assertEqual(query_string.count("extract"), 3) def test_extract_year_greaterthan_lookup(self): start_datetime = datetime(2015, 6, 15, 14, 10) end_datetime = datetime(2016, 6, 15, 14, 10) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) for lookup in ("year", "iso_year"): with self.subTest(lookup): qs = DTModel.objects.filter(**{"start_datetime__%s__gt" % lookup: 2015}) self.assertEqual(qs.count(), 1) self.assertEqual(str(qs.query).lower().count("extract"), 0) qs = DTModel.objects.filter( **{"start_datetime__%s__gte" % lookup: 2015} ) self.assertEqual(qs.count(), 2) self.assertEqual(str(qs.query).lower().count("extract"), 0) qs = DTModel.objects.annotate( start_year=ExtractYear("start_datetime"), ).filter(**{"end_datetime__%s__gte" % lookup: F("start_year")}) self.assertEqual(qs.count(), 1) self.assertGreaterEqual(str(qs.query).lower().count("extract"), 2) def test_extract_year_lessthan_lookup(self): start_datetime = datetime(2015, 6, 15, 14, 10) end_datetime = datetime(2016, 6, 15, 14, 10) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) for lookup in ("year", "iso_year"): with self.subTest(lookup): qs = DTModel.objects.filter(**{"start_datetime__%s__lt" % lookup: 2016}) self.assertEqual(qs.count(), 1) self.assertEqual(str(qs.query).count("extract"), 0) qs = DTModel.objects.filter( **{"start_datetime__%s__lte" % lookup: 2016} ) self.assertEqual(qs.count(), 2) self.assertEqual(str(qs.query).count("extract"), 0) qs = DTModel.objects.annotate( end_year=ExtractYear("end_datetime"), ).filter(**{"start_datetime__%s__lte" % lookup: F("end_year")}) self.assertEqual(qs.count(), 1) self.assertGreaterEqual(str(qs.query).lower().count("extract"), 2) def test_extract_lookup_name_sql_injection(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) with self.assertRaises((DataError, OperationalError, ValueError)): DTModel.objects.filter( start_datetime__year=Extract( "start_datetime", "day' FROM start_datetime)) OR 1=1;--" ) ).exists() def test_extract_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) with self.assertRaisesMessage(ValueError, "lookup_name must be provided"): Extract("start_datetime") msg = ( "Extract input expression must be DateField, DateTimeField, TimeField, or " "DurationField." ) with self.assertRaisesMessage(ValueError, msg): list(DTModel.objects.annotate(extracted=Extract("name", "hour"))) with self.assertRaisesMessage( ValueError, "Cannot extract time component 'second' from DateField 'start_date'.", ): list(DTModel.objects.annotate(extracted=Extract("start_date", "second"))) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=Extract("start_datetime", "year") ).order_by("start_datetime"), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=Extract("start_datetime", "quarter") ).order_by("start_datetime"), [(start_datetime, 2), (end_datetime, 2)], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=Extract("start_datetime", "month") ).order_by("start_datetime"), [ (start_datetime, start_datetime.month), (end_datetime, end_datetime.month), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=Extract("start_datetime", "day") ).order_by("start_datetime"), [(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=Extract("start_datetime", "week") ).order_by("start_datetime"), [(start_datetime, 25), (end_datetime, 24)], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=Extract("start_datetime", "week_day") ).order_by("start_datetime"), [ (start_datetime, (start_datetime.isoweekday() % 7) + 1), (end_datetime, (end_datetime.isoweekday() % 7) + 1), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=Extract("start_datetime", "iso_week_day"), ).order_by("start_datetime"), [ (start_datetime, start_datetime.isoweekday()), (end_datetime, end_datetime.isoweekday()), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=Extract("start_datetime", "hour") ).order_by("start_datetime"), [(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=Extract("start_datetime", "minute") ).order_by("start_datetime"), [ (start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=Extract("start_datetime", "second") ).order_by("start_datetime"), [ (start_datetime, start_datetime.second), (end_datetime, end_datetime.second), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__year=Extract("start_datetime", "year") ).count(), 2, ) self.assertEqual( DTModel.objects.filter( start_datetime__hour=Extract("start_datetime", "hour") ).count(), 2, ) self.assertEqual( DTModel.objects.filter( start_date__month=Extract("start_date", "month") ).count(), 2, ) self.assertEqual( DTModel.objects.filter( start_time__hour=Extract("start_time", "hour") ).count(), 2, ) def test_extract_none(self): self.create_model(None, None) for t in ( Extract("start_datetime", "year"), Extract("start_date", "year"), Extract("start_time", "hour"), ): with self.subTest(t): self.assertIsNone( DTModel.objects.annotate(extracted=t).first().extracted ) def test_extract_outerref_validation(self): inner_qs = DTModel.objects.filter(name=ExtractMonth(OuterRef("name"))) msg = ( "Extract input expression must be DateField, DateTimeField, " "TimeField, or DurationField." ) with self.assertRaisesMessage(ValueError, msg): DTModel.objects.annotate(related_name=Subquery(inner_qs.values("name")[:1])) @skipUnlessDBFeature("has_native_duration_field") def test_extract_duration(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=Extract("duration", "second")).order_by( "start_datetime" ), [ (start_datetime, (end_datetime - start_datetime).seconds % 60), (end_datetime, (start_datetime - end_datetime).seconds % 60), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.annotate( duration_days=Extract("duration", "day"), ) .filter(duration_days__gt=200) .count(), 1, ) @skipIfDBFeature("has_native_duration_field") def test_extract_duration_without_native_duration_field(self): msg = "Extract requires native DurationField database support." with self.assertRaisesMessage(ValueError, msg): list(DTModel.objects.annotate(extracted=Extract("duration", "second"))) def test_extract_duration_unsupported_lookups(self): msg = "Cannot extract component '%s' from DurationField 'duration'." for lookup in ( "year", "iso_year", "month", "week", "week_day", "iso_week_day", "quarter", ): with self.subTest(lookup): with self.assertRaisesMessage(ValueError, msg % lookup): DTModel.objects.annotate(extracted=Extract("duration", lookup)) def test_extract_year_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractYear("start_datetime")).order_by( "start_datetime" ), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractYear("start_date")).order_by( "start_datetime" ), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__year=ExtractYear("start_datetime") ).count(), 2, ) def test_extract_iso_year_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=ExtractIsoYear("start_datetime") ).order_by("start_datetime"), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractIsoYear("start_date")).order_by( "start_datetime" ), [(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)], lambda m: (m.start_datetime, m.extracted), ) # Both dates are from the same week year. self.assertEqual( DTModel.objects.filter( start_datetime__iso_year=ExtractIsoYear("start_datetime") ).count(), 2, ) def test_extract_iso_year_func_boundaries(self): end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: end_datetime = timezone.make_aware(end_datetime) week_52_day_2014 = datetime(2014, 12, 27, 13, 0) # Sunday week_1_day_2014_2015 = datetime(2014, 12, 31, 13, 0) # Wednesday week_53_day_2015 = datetime(2015, 12, 31, 13, 0) # Thursday if settings.USE_TZ: week_1_day_2014_2015 = timezone.make_aware(week_1_day_2014_2015) week_52_day_2014 = timezone.make_aware(week_52_day_2014) week_53_day_2015 = timezone.make_aware(week_53_day_2015) days = [week_52_day_2014, week_1_day_2014_2015, week_53_day_2015] obj_1_iso_2014 = self.create_model(week_52_day_2014, end_datetime) obj_1_iso_2015 = self.create_model(week_1_day_2014_2015, end_datetime) obj_2_iso_2015 = self.create_model(week_53_day_2015, end_datetime) qs = ( DTModel.objects.filter(start_datetime__in=days) .annotate( extracted=ExtractIsoYear("start_datetime"), ) .order_by("start_datetime") ) self.assertQuerysetEqual( qs, [ (week_52_day_2014, 2014), (week_1_day_2014_2015, 2015), (week_53_day_2015, 2015), ], lambda m: (m.start_datetime, m.extracted), ) qs = DTModel.objects.filter( start_datetime__iso_year=2015, ).order_by("start_datetime") self.assertSequenceEqual(qs, [obj_1_iso_2015, obj_2_iso_2015]) qs = DTModel.objects.filter( start_datetime__iso_year__gt=2014, ).order_by("start_datetime") self.assertSequenceEqual(qs, [obj_1_iso_2015, obj_2_iso_2015]) qs = DTModel.objects.filter( start_datetime__iso_year__lte=2014, ).order_by("start_datetime") self.assertSequenceEqual(qs, [obj_1_iso_2014]) def test_extract_month_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractMonth("start_datetime")).order_by( "start_datetime" ), [ (start_datetime, start_datetime.month), (end_datetime, end_datetime.month), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractMonth("start_date")).order_by( "start_datetime" ), [ (start_datetime, start_datetime.month), (end_datetime, end_datetime.month), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__month=ExtractMonth("start_datetime") ).count(), 2, ) def test_extract_day_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractDay("start_datetime")).order_by( "start_datetime" ), [(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractDay("start_date")).order_by( "start_datetime" ), [(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__day=ExtractDay("start_datetime") ).count(), 2, ) def test_extract_week_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractWeek("start_datetime")).order_by( "start_datetime" ), [(start_datetime, 25), (end_datetime, 24)], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractWeek("start_date")).order_by( "start_datetime" ), [(start_datetime, 25), (end_datetime, 24)], lambda m: (m.start_datetime, m.extracted), ) # both dates are from the same week. self.assertEqual( DTModel.objects.filter( start_datetime__week=ExtractWeek("start_datetime") ).count(), 2, ) def test_extract_quarter_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 8, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=ExtractQuarter("start_datetime") ).order_by("start_datetime"), [(start_datetime, 2), (end_datetime, 3)], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractQuarter("start_date")).order_by( "start_datetime" ), [(start_datetime, 2), (end_datetime, 3)], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__quarter=ExtractQuarter("start_datetime") ).count(), 2, ) def test_extract_quarter_func_boundaries(self): end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: end_datetime = timezone.make_aware(end_datetime) last_quarter_2014 = datetime(2014, 12, 31, 13, 0) first_quarter_2015 = datetime(2015, 1, 1, 13, 0) if settings.USE_TZ: last_quarter_2014 = timezone.make_aware(last_quarter_2014) first_quarter_2015 = timezone.make_aware(first_quarter_2015) dates = [last_quarter_2014, first_quarter_2015] self.create_model(last_quarter_2014, end_datetime) self.create_model(first_quarter_2015, end_datetime) qs = ( DTModel.objects.filter(start_datetime__in=dates) .annotate( extracted=ExtractQuarter("start_datetime"), ) .order_by("start_datetime") ) self.assertQuerysetEqual( qs, [ (last_quarter_2014, 4), (first_quarter_2015, 1), ], lambda m: (m.start_datetime, m.extracted), ) def test_extract_week_func_boundaries(self): end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: end_datetime = timezone.make_aware(end_datetime) week_52_day_2014 = datetime(2014, 12, 27, 13, 0) # Sunday week_1_day_2014_2015 = datetime(2014, 12, 31, 13, 0) # Wednesday week_53_day_2015 = datetime(2015, 12, 31, 13, 0) # Thursday if settings.USE_TZ: week_1_day_2014_2015 = timezone.make_aware(week_1_day_2014_2015) week_52_day_2014 = timezone.make_aware(week_52_day_2014) week_53_day_2015 = timezone.make_aware(week_53_day_2015) days = [week_52_day_2014, week_1_day_2014_2015, week_53_day_2015] self.create_model(week_53_day_2015, end_datetime) self.create_model(week_52_day_2014, end_datetime) self.create_model(week_1_day_2014_2015, end_datetime) qs = ( DTModel.objects.filter(start_datetime__in=days) .annotate( extracted=ExtractWeek("start_datetime"), ) .order_by("start_datetime") ) self.assertQuerysetEqual( qs, [ (week_52_day_2014, 52), (week_1_day_2014_2015, 1), (week_53_day_2015, 53), ], lambda m: (m.start_datetime, m.extracted), ) def test_extract_weekday_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=ExtractWeekDay("start_datetime") ).order_by("start_datetime"), [ (start_datetime, (start_datetime.isoweekday() % 7) + 1), (end_datetime, (end_datetime.isoweekday() % 7) + 1), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractWeekDay("start_date")).order_by( "start_datetime" ), [ (start_datetime, (start_datetime.isoweekday() % 7) + 1), (end_datetime, (end_datetime.isoweekday() % 7) + 1), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__week_day=ExtractWeekDay("start_datetime") ).count(), 2, ) def test_extract_iso_weekday_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=ExtractIsoWeekDay("start_datetime"), ).order_by("start_datetime"), [ (start_datetime, start_datetime.isoweekday()), (end_datetime, end_datetime.isoweekday()), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=ExtractIsoWeekDay("start_date"), ).order_by("start_datetime"), [ (start_datetime, start_datetime.isoweekday()), (end_datetime, end_datetime.isoweekday()), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__week_day=ExtractWeekDay("start_datetime"), ).count(), 2, ) def test_extract_hour_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractHour("start_datetime")).order_by( "start_datetime" ), [(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractHour("start_time")).order_by( "start_datetime" ), [(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__hour=ExtractHour("start_datetime") ).count(), 2, ) def test_extract_minute_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=ExtractMinute("start_datetime") ).order_by("start_datetime"), [ (start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractMinute("start_time")).order_by( "start_datetime" ), [ (start_datetime, start_datetime.minute), (end_datetime, end_datetime.minute), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__minute=ExtractMinute("start_datetime") ).count(), 2, ) def test_extract_second_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate( extracted=ExtractSecond("start_datetime") ).order_by("start_datetime"), [ (start_datetime, start_datetime.second), (end_datetime, end_datetime.second), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=ExtractSecond("start_time")).order_by( "start_datetime" ), [ (start_datetime, start_datetime.second), (end_datetime, end_datetime.second), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__second=ExtractSecond("start_datetime") ).count(), 2, ) def test_extract_second_func_no_fractional(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 30, 50, 783) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) obj = self.create_model(start_datetime, end_datetime) self.assertSequenceEqual( DTModel.objects.filter(start_datetime__second=F("end_datetime__second")), [obj], ) self.assertSequenceEqual( DTModel.objects.filter(start_time__second=F("end_time__second")), [obj], ) def test_trunc_lookup_name_sql_injection(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) # Database backends raise an exception or don't return any results. try: exists = DTModel.objects.filter( start_datetime__date=Trunc( "start_datetime", "year', start_datetime)) OR 1=1;--", ) ).exists() except (DataError, OperationalError): pass else: self.assertIs(exists, False) def test_trunc_func(self): start_datetime = datetime(999, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) def test_datetime_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc( "start_datetime", kind, output_field=DateTimeField() ) ).order_by("start_datetime"), [ (start_datetime, truncate_to(start_datetime, kind)), (end_datetime, truncate_to(end_datetime, kind)), ], lambda m: (m.start_datetime, m.truncated), ) def test_date_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc("start_date", kind, output_field=DateField()) ).order_by("start_datetime"), [ (start_datetime, truncate_to(start_datetime.date(), kind)), (end_datetime, truncate_to(end_datetime.date(), kind)), ], lambda m: (m.start_datetime, m.truncated), ) def test_time_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc("start_time", kind, output_field=TimeField()) ).order_by("start_datetime"), [ (start_datetime, truncate_to(start_datetime.time(), kind)), (end_datetime, truncate_to(end_datetime.time(), kind)), ], lambda m: (m.start_datetime, m.truncated), ) def test_datetime_to_time_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc("start_datetime", kind, output_field=TimeField()), ).order_by("start_datetime"), [ (start_datetime, truncate_to(start_datetime.time(), kind)), (end_datetime, truncate_to(end_datetime.time(), kind)), ], lambda m: (m.start_datetime, m.truncated), ) test_date_kind("year") test_date_kind("quarter") test_date_kind("month") test_date_kind("day") test_time_kind("hour") test_time_kind("minute") test_time_kind("second") test_datetime_kind("year") test_datetime_kind("quarter") test_datetime_kind("month") test_datetime_kind("day") test_datetime_kind("hour") test_datetime_kind("minute") test_datetime_kind("second") test_datetime_to_time_kind("hour") test_datetime_to_time_kind("minute") test_datetime_to_time_kind("second") qs = DTModel.objects.filter( start_datetime__date=Trunc( "start_datetime", "day", output_field=DateField() ) ) self.assertEqual(qs.count(), 2) def _test_trunc_week(self, start_datetime, end_datetime): if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc("start_datetime", "week", output_field=DateTimeField()) ).order_by("start_datetime"), [ (start_datetime, truncate_to(start_datetime, "week")), (end_datetime, truncate_to(end_datetime, "week")), ], lambda m: (m.start_datetime, m.truncated), ) self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc("start_date", "week", output_field=DateField()) ).order_by("start_datetime"), [ (start_datetime, truncate_to(start_datetime.date(), "week")), (end_datetime, truncate_to(end_datetime.date(), "week")), ], lambda m: (m.start_datetime, m.truncated), ) def test_trunc_week(self): self._test_trunc_week( start_datetime=datetime(2015, 6, 15, 14, 30, 50, 321), end_datetime=datetime(2016, 6, 15, 14, 10, 50, 123), ) def test_trunc_week_before_1000(self): self._test_trunc_week( start_datetime=datetime(999, 6, 15, 14, 30, 50, 321), end_datetime=datetime(2016, 6, 15, 14, 10, 50, 123), ) def test_trunc_invalid_arguments(self): msg = "output_field must be either DateField, TimeField, or DateTimeField" with self.assertRaisesMessage(ValueError, msg): list( DTModel.objects.annotate( truncated=Trunc( "start_datetime", "year", output_field=IntegerField() ), ) ) msg = "'name' isn't a DateField, TimeField, or DateTimeField." with self.assertRaisesMessage(TypeError, msg): list( DTModel.objects.annotate( truncated=Trunc("name", "year", output_field=DateTimeField()), ) ) msg = "Cannot truncate DateField 'start_date' to DateTimeField" with self.assertRaisesMessage(ValueError, msg): list(DTModel.objects.annotate(truncated=Trunc("start_date", "second"))) with self.assertRaisesMessage(ValueError, msg): list( DTModel.objects.annotate( truncated=Trunc( "start_date", "month", output_field=DateTimeField() ), ) ) msg = "Cannot truncate TimeField 'start_time' to DateTimeField" with self.assertRaisesMessage(ValueError, msg): list(DTModel.objects.annotate(truncated=Trunc("start_time", "month"))) with self.assertRaisesMessage(ValueError, msg): list( DTModel.objects.annotate( truncated=Trunc( "start_time", "second", output_field=DateTimeField() ), ) ) def test_trunc_none(self): self.create_model(None, None) for t in ( Trunc("start_datetime", "year"), Trunc("start_date", "year"), Trunc("start_time", "hour"), ): with self.subTest(t): self.assertIsNone( DTModel.objects.annotate(truncated=t).first().truncated ) def test_trunc_year_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), "year") if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncYear("start_datetime")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime, "year")), (end_datetime, truncate_to(end_datetime, "year")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncYear("start_date")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime.date(), "year")), (end_datetime, truncate_to(end_datetime.date(), "year")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter(start_datetime=TruncYear("start_datetime")).count(), 1, ) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField" ): list(DTModel.objects.annotate(truncated=TruncYear("start_time"))) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField" ): list( DTModel.objects.annotate( truncated=TruncYear("start_time", output_field=TimeField()) ) ) def test_trunc_quarter_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 10, 15, 14, 10, 50, 123), "quarter") last_quarter_2015 = truncate_to( datetime(2015, 12, 31, 14, 10, 50, 123), "quarter" ) first_quarter_2016 = truncate_to( datetime(2016, 1, 1, 14, 10, 50, 123), "quarter" ) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) last_quarter_2015 = timezone.make_aware(last_quarter_2015) first_quarter_2016 = timezone.make_aware(first_quarter_2016) self.create_model(start_datetime=start_datetime, end_datetime=end_datetime) self.create_model(start_datetime=end_datetime, end_datetime=start_datetime) self.create_model(start_datetime=last_quarter_2015, end_datetime=end_datetime) self.create_model(start_datetime=first_quarter_2016, end_datetime=end_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncQuarter("start_date")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime.date(), "quarter")), (last_quarter_2015, truncate_to(last_quarter_2015.date(), "quarter")), (first_quarter_2016, truncate_to(first_quarter_2016.date(), "quarter")), (end_datetime, truncate_to(end_datetime.date(), "quarter")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncQuarter("start_datetime")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime, "quarter")), (last_quarter_2015, truncate_to(last_quarter_2015, "quarter")), (first_quarter_2016, truncate_to(first_quarter_2016, "quarter")), (end_datetime, truncate_to(end_datetime, "quarter")), ], lambda m: (m.start_datetime, m.extracted), ) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField" ): list(DTModel.objects.annotate(truncated=TruncQuarter("start_time"))) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField" ): list( DTModel.objects.annotate( truncated=TruncQuarter("start_time", output_field=TimeField()) ) ) def test_trunc_month_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), "month") if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMonth("start_datetime")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime, "month")), (end_datetime, truncate_to(end_datetime, "month")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMonth("start_date")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime.date(), "month")), (end_datetime, truncate_to(end_datetime.date(), "month")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter(start_datetime=TruncMonth("start_datetime")).count(), 1, ) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField" ): list(DTModel.objects.annotate(truncated=TruncMonth("start_time"))) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField" ): list( DTModel.objects.annotate( truncated=TruncMonth("start_time", output_field=TimeField()) ) ) def test_trunc_week_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), "week") if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncWeek("start_datetime")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime, "week")), (end_datetime, truncate_to(end_datetime, "week")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter(start_datetime=TruncWeek("start_datetime")).count(), 1, ) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField" ): list(DTModel.objects.annotate(truncated=TruncWeek("start_time"))) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField" ): list( DTModel.objects.annotate( truncated=TruncWeek("start_time", output_field=TimeField()) ) ) def test_trunc_date_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncDate("start_datetime")).order_by( "start_datetime" ), [ (start_datetime, start_datetime.date()), (end_datetime, end_datetime.date()), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__date=TruncDate("start_datetime") ).count(), 2, ) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateField" ): list(DTModel.objects.annotate(truncated=TruncDate("start_time"))) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateField" ): list( DTModel.objects.annotate( truncated=TruncDate("start_time", output_field=TimeField()) ) ) def test_trunc_date_none(self): self.create_model(None, None) self.assertIsNone( DTModel.objects.annotate(truncated=TruncDate("start_datetime")) .first() .truncated ) def test_trunc_time_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncTime("start_datetime")).order_by( "start_datetime" ), [ (start_datetime, start_datetime.time()), (end_datetime, end_datetime.time()), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime__time=TruncTime("start_datetime") ).count(), 2, ) with self.assertRaisesMessage( ValueError, "Cannot truncate DateField 'start_date' to TimeField" ): list(DTModel.objects.annotate(truncated=TruncTime("start_date"))) with self.assertRaisesMessage( ValueError, "Cannot truncate DateField 'start_date' to TimeField" ): list( DTModel.objects.annotate( truncated=TruncTime("start_date", output_field=DateField()) ) ) def test_trunc_time_none(self): self.create_model(None, None) self.assertIsNone( DTModel.objects.annotate(truncated=TruncTime("start_datetime")) .first() .truncated ) def test_trunc_time_comparison(self): start_datetime = datetime(2015, 6, 15, 14, 30, 26) # 0 microseconds. end_datetime = datetime(2015, 6, 15, 14, 30, 26, 321) if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.assertIs( DTModel.objects.filter( start_datetime__time=start_datetime.time(), end_datetime__time=end_datetime.time(), ).exists(), True, ) self.assertIs( DTModel.objects.annotate( extracted_start=TruncTime("start_datetime"), extracted_end=TruncTime("end_datetime"), ) .filter( extracted_start=start_datetime.time(), extracted_end=end_datetime.time(), ) .exists(), True, ) def test_trunc_day_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), "day") if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncDay("start_datetime")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime, "day")), (end_datetime, truncate_to(end_datetime, "day")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter(start_datetime=TruncDay("start_datetime")).count(), 1 ) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField" ): list(DTModel.objects.annotate(truncated=TruncDay("start_time"))) with self.assertRaisesMessage( ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField" ): list( DTModel.objects.annotate( truncated=TruncDay("start_time", output_field=TimeField()) ) ) def test_trunc_hour_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), "hour") if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncHour("start_datetime")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime, "hour")), (end_datetime, truncate_to(end_datetime, "hour")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncHour("start_time")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime.time(), "hour")), (end_datetime, truncate_to(end_datetime.time(), "hour")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter(start_datetime=TruncHour("start_datetime")).count(), 1, ) with self.assertRaisesMessage( ValueError, "Cannot truncate DateField 'start_date' to DateTimeField" ): list(DTModel.objects.annotate(truncated=TruncHour("start_date"))) with self.assertRaisesMessage( ValueError, "Cannot truncate DateField 'start_date' to DateTimeField" ): list( DTModel.objects.annotate( truncated=TruncHour("start_date", output_field=DateField()) ) ) def test_trunc_minute_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), "minute") if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMinute("start_datetime")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime, "minute")), (end_datetime, truncate_to(end_datetime, "minute")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncMinute("start_time")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime.time(), "minute")), (end_datetime, truncate_to(end_datetime.time(), "minute")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime=TruncMinute("start_datetime") ).count(), 1, ) with self.assertRaisesMessage( ValueError, "Cannot truncate DateField 'start_date' to DateTimeField" ): list(DTModel.objects.annotate(truncated=TruncMinute("start_date"))) with self.assertRaisesMessage( ValueError, "Cannot truncate DateField 'start_date' to DateTimeField" ): list( DTModel.objects.annotate( truncated=TruncMinute("start_date", output_field=DateField()) ) ) def test_trunc_second_func(self): start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = truncate_to(datetime(2016, 6, 15, 14, 10, 50, 123), "second") if settings.USE_TZ: start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncSecond("start_datetime")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime, "second")), (end_datetime, truncate_to(end_datetime, "second")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertQuerysetEqual( DTModel.objects.annotate(extracted=TruncSecond("start_time")).order_by( "start_datetime" ), [ (start_datetime, truncate_to(start_datetime.time(), "second")), (end_datetime, truncate_to(end_datetime.time(), "second")), ], lambda m: (m.start_datetime, m.extracted), ) self.assertEqual( DTModel.objects.filter( start_datetime=TruncSecond("start_datetime") ).count(), 1, ) with self.assertRaisesMessage( ValueError, "Cannot truncate DateField 'start_date' to DateTimeField" ): list(DTModel.objects.annotate(truncated=TruncSecond("start_date"))) with self.assertRaisesMessage( ValueError, "Cannot truncate DateField 'start_date' to DateTimeField" ): list( DTModel.objects.annotate( truncated=TruncSecond("start_date", output_field=DateField()) ) ) def test_trunc_subquery_with_parameters(self): author_1 = Author.objects.create(name="J. R. R. Tolkien") author_2 = Author.objects.create(name="G. R. R. Martin") fan_since_1 = datetime(2016, 2, 3, 15, 0, 0) fan_since_2 = datetime(2015, 2, 3, 15, 0, 0) fan_since_3 = datetime(2017, 2, 3, 15, 0, 0) if settings.USE_TZ: fan_since_1 = timezone.make_aware(fan_since_1) fan_since_2 = timezone.make_aware(fan_since_2) fan_since_3 = timezone.make_aware(fan_since_3) Fan.objects.create(author=author_1, name="Tom", fan_since=fan_since_1) Fan.objects.create(author=author_1, name="Emma", fan_since=fan_since_2) Fan.objects.create(author=author_2, name="Isabella", fan_since=fan_since_3) inner = ( Fan.objects.filter( author=OuterRef("pk"), name__in=("Emma", "Isabella", "Tom") ) .values("author") .annotate(newest_fan=Max("fan_since")) .values("newest_fan") ) outer = Author.objects.annotate( newest_fan_year=TruncYear(Subquery(inner, output_field=DateTimeField())) ) tz = datetime_timezone.utc if settings.USE_TZ else None self.assertSequenceEqual( outer.order_by("name").values("name", "newest_fan_year"), [ { "name": "G. R. R. Martin", "newest_fan_year": datetime(2017, 1, 1, 0, 0, tzinfo=tz), }, { "name": "J. R. R. Tolkien", "newest_fan_year": datetime(2016, 1, 1, 0, 0, tzinfo=tz), }, ], ) def test_extract_outerref(self): datetime_1 = datetime(2000, 1, 1) datetime_2 = datetime(2001, 3, 5) datetime_3 = datetime(2002, 1, 3) if settings.USE_TZ: datetime_1 = timezone.make_aware(datetime_1) datetime_2 = timezone.make_aware(datetime_2) datetime_3 = timezone.make_aware(datetime_3) obj_1 = self.create_model(datetime_1, datetime_3) obj_2 = self.create_model(datetime_2, datetime_1) obj_3 = self.create_model(datetime_3, datetime_2) inner_qs = DTModel.objects.filter( start_datetime__year=2000, start_datetime__month=ExtractMonth(OuterRef("end_datetime")), ) qs = DTModel.objects.annotate( related_pk=Subquery(inner_qs.values("pk")[:1]), ) self.assertSequenceEqual( qs.order_by("name").values("pk", "related_pk"), [ {"pk": obj_1.pk, "related_pk": obj_1.pk}, {"pk": obj_2.pk, "related_pk": obj_1.pk}, {"pk": obj_3.pk, "related_pk": None}, ], ) @override_settings(USE_TZ=True, TIME_ZONE="UTC") class DateFunctionWithTimeZoneTests(DateFunctionTests): def get_timezones(self, key): for constructor in ZONE_CONSTRUCTORS: yield constructor(key) def test_extract_func_with_timezone(self): start_datetime = datetime(2015, 6, 15, 23, 30, 1, 321) end_datetime = datetime(2015, 6, 16, 13, 11, 27, 123) start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) delta_tzinfo_pos = datetime_timezone(timedelta(hours=5)) delta_tzinfo_neg = datetime_timezone(timedelta(hours=-5, minutes=17)) for melb in self.get_timezones("Australia/Melbourne"): with self.subTest(repr(melb)): qs = DTModel.objects.annotate( day=Extract("start_datetime", "day"), day_melb=Extract("start_datetime", "day", tzinfo=melb), week=Extract("start_datetime", "week", tzinfo=melb), isoyear=ExtractIsoYear("start_datetime", tzinfo=melb), weekday=ExtractWeekDay("start_datetime"), weekday_melb=ExtractWeekDay("start_datetime", tzinfo=melb), isoweekday=ExtractIsoWeekDay("start_datetime"), isoweekday_melb=ExtractIsoWeekDay("start_datetime", tzinfo=melb), quarter=ExtractQuarter("start_datetime", tzinfo=melb), hour=ExtractHour("start_datetime"), hour_melb=ExtractHour("start_datetime", tzinfo=melb), hour_with_delta_pos=ExtractHour( "start_datetime", tzinfo=delta_tzinfo_pos ), hour_with_delta_neg=ExtractHour( "start_datetime", tzinfo=delta_tzinfo_neg ), minute_with_delta_neg=ExtractMinute( "start_datetime", tzinfo=delta_tzinfo_neg ), ).order_by("start_datetime") utc_model = qs.get() self.assertEqual(utc_model.day, 15) self.assertEqual(utc_model.day_melb, 16) self.assertEqual(utc_model.week, 25) self.assertEqual(utc_model.isoyear, 2015) self.assertEqual(utc_model.weekday, 2) self.assertEqual(utc_model.weekday_melb, 3) self.assertEqual(utc_model.isoweekday, 1) self.assertEqual(utc_model.isoweekday_melb, 2) self.assertEqual(utc_model.quarter, 2) self.assertEqual(utc_model.hour, 23) self.assertEqual(utc_model.hour_melb, 9) self.assertEqual(utc_model.hour_with_delta_pos, 4) self.assertEqual(utc_model.hour_with_delta_neg, 18) self.assertEqual(utc_model.minute_with_delta_neg, 47) with timezone.override(melb): melb_model = qs.get() self.assertEqual(melb_model.day, 16) self.assertEqual(melb_model.day_melb, 16) self.assertEqual(melb_model.week, 25) self.assertEqual(melb_model.isoyear, 2015) self.assertEqual(melb_model.weekday, 3) self.assertEqual(melb_model.isoweekday, 2) self.assertEqual(melb_model.quarter, 2) self.assertEqual(melb_model.weekday_melb, 3) self.assertEqual(melb_model.isoweekday_melb, 2) self.assertEqual(melb_model.hour, 9) self.assertEqual(melb_model.hour_melb, 9) def test_extract_func_with_timezone_minus_no_offset(self): start_datetime = datetime(2015, 6, 15, 23, 30, 1, 321) end_datetime = datetime(2015, 6, 16, 13, 11, 27, 123) start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) for ust_nera in self.get_timezones("Asia/Ust-Nera"): with self.subTest(repr(ust_nera)): qs = DTModel.objects.annotate( hour=ExtractHour("start_datetime"), hour_tz=ExtractHour("start_datetime", tzinfo=ust_nera), ).order_by("start_datetime") utc_model = qs.get() self.assertEqual(utc_model.hour, 23) self.assertEqual(utc_model.hour_tz, 9) with timezone.override(ust_nera): ust_nera_model = qs.get() self.assertEqual(ust_nera_model.hour, 9) self.assertEqual(ust_nera_model.hour_tz, 9) def test_extract_func_explicit_timezone_priority(self): start_datetime = datetime(2015, 6, 15, 23, 30, 1, 321) end_datetime = datetime(2015, 6, 16, 13, 11, 27, 123) start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) for melb in self.get_timezones("Australia/Melbourne"): with self.subTest(repr(melb)): with timezone.override(melb): model = ( DTModel.objects.annotate( day_melb=Extract("start_datetime", "day"), day_utc=Extract( "start_datetime", "day", tzinfo=datetime_timezone.utc ), ) .order_by("start_datetime") .get() ) self.assertEqual(model.day_melb, 16) self.assertEqual(model.day_utc, 15) def test_extract_invalid_field_with_timezone(self): for melb in self.get_timezones("Australia/Melbourne"): with self.subTest(repr(melb)): msg = "tzinfo can only be used with DateTimeField." with self.assertRaisesMessage(ValueError, msg): DTModel.objects.annotate( day_melb=Extract("start_date", "day", tzinfo=melb), ).get() with self.assertRaisesMessage(ValueError, msg): DTModel.objects.annotate( hour_melb=Extract("start_time", "hour", tzinfo=melb), ).get() def test_trunc_timezone_applied_before_truncation(self): start_datetime = datetime(2016, 1, 1, 1, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) for melb, pacific in zip( self.get_timezones("Australia/Melbourne"), self.get_timezones("America/Los_Angeles"), ): with self.subTest((repr(melb), repr(pacific))): model = ( DTModel.objects.annotate( melb_year=TruncYear("start_datetime", tzinfo=melb), pacific_year=TruncYear("start_datetime", tzinfo=pacific), melb_date=TruncDate("start_datetime", tzinfo=melb), pacific_date=TruncDate("start_datetime", tzinfo=pacific), melb_time=TruncTime("start_datetime", tzinfo=melb), pacific_time=TruncTime("start_datetime", tzinfo=pacific), ) .order_by("start_datetime") .get() ) melb_start_datetime = start_datetime.astimezone(melb) pacific_start_datetime = start_datetime.astimezone(pacific) self.assertEqual(model.start_datetime, start_datetime) self.assertEqual( model.melb_year, truncate_to(start_datetime, "year", melb) ) self.assertEqual( model.pacific_year, truncate_to(start_datetime, "year", pacific) ) self.assertEqual(model.start_datetime.year, 2016) self.assertEqual(model.melb_year.year, 2016) self.assertEqual(model.pacific_year.year, 2015) self.assertEqual(model.melb_date, melb_start_datetime.date()) self.assertEqual(model.pacific_date, pacific_start_datetime.date()) self.assertEqual(model.melb_time, melb_start_datetime.time()) self.assertEqual(model.pacific_time, pacific_start_datetime.time()) @needs_pytz @ignore_warnings(category=RemovedInDjango50Warning) def test_trunc_ambiguous_and_invalid_times(self): sao = pytz.timezone("America/Sao_Paulo") start_datetime = datetime(2016, 10, 16, 13, tzinfo=datetime_timezone.utc) end_datetime = datetime(2016, 2, 21, 1, tzinfo=datetime_timezone.utc) self.create_model(start_datetime, end_datetime) with timezone.override(sao): with self.assertRaisesMessage( pytz.NonExistentTimeError, "2016-10-16 00:00:00" ): model = DTModel.objects.annotate( truncated_start=TruncDay("start_datetime") ).get() with self.assertRaisesMessage( pytz.AmbiguousTimeError, "2016-02-20 23:00:00" ): model = DTModel.objects.annotate( truncated_end=TruncHour("end_datetime") ).get() model = DTModel.objects.annotate( truncated_start=TruncDay("start_datetime", is_dst=False), truncated_end=TruncHour("end_datetime", is_dst=False), ).get() self.assertEqual(model.truncated_start.dst(), timedelta(0)) self.assertEqual(model.truncated_end.dst(), timedelta(0)) model = DTModel.objects.annotate( truncated_start=TruncDay("start_datetime", is_dst=True), truncated_end=TruncHour("end_datetime", is_dst=True), ).get() self.assertEqual(model.truncated_start.dst(), timedelta(0, 3600)) self.assertEqual(model.truncated_end.dst(), timedelta(0, 3600)) def test_trunc_func_with_timezone(self): """ If the truncated datetime transitions to a different offset (daylight saving) then the returned value will have that new timezone/offset. """ start_datetime = datetime(2015, 6, 15, 14, 30, 50, 321) end_datetime = datetime(2016, 6, 15, 14, 10, 50, 123) start_datetime = timezone.make_aware(start_datetime) end_datetime = timezone.make_aware(end_datetime) self.create_model(start_datetime, end_datetime) self.create_model(end_datetime, start_datetime) for melb in self.get_timezones("Australia/Melbourne"): with self.subTest(repr(melb)): def test_datetime_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc( "start_datetime", kind, output_field=DateTimeField(), tzinfo=melb, ) ).order_by("start_datetime"), [ ( start_datetime, truncate_to( start_datetime.astimezone(melb), kind, melb ), ), ( end_datetime, truncate_to(end_datetime.astimezone(melb), kind, melb), ), ], lambda m: (m.start_datetime, m.truncated), ) def test_datetime_to_date_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc( "start_datetime", kind, output_field=DateField(), tzinfo=melb, ), ).order_by("start_datetime"), [ ( start_datetime, truncate_to( start_datetime.astimezone(melb).date(), kind ), ), ( end_datetime, truncate_to(end_datetime.astimezone(melb).date(), kind), ), ], lambda m: (m.start_datetime, m.truncated), ) def test_datetime_to_time_kind(kind): self.assertQuerysetEqual( DTModel.objects.annotate( truncated=Trunc( "start_datetime", kind, output_field=TimeField(), tzinfo=melb, ) ).order_by("start_datetime"), [ ( start_datetime, truncate_to( start_datetime.astimezone(melb).time(), kind ), ), ( end_datetime, truncate_to(end_datetime.astimezone(melb).time(), kind), ), ], lambda m: (m.start_datetime, m.truncated), ) test_datetime_to_date_kind("year") test_datetime_to_date_kind("quarter") test_datetime_to_date_kind("month") test_datetime_to_date_kind("week") test_datetime_to_date_kind("day") test_datetime_to_time_kind("hour") test_datetime_to_time_kind("minute") test_datetime_to_time_kind("second") test_datetime_kind("year") test_datetime_kind("quarter") test_datetime_kind("month") test_datetime_kind("week") test_datetime_kind("day") test_datetime_kind("hour") test_datetime_kind("minute") test_datetime_kind("second") qs = DTModel.objects.filter( start_datetime__date=Trunc( "start_datetime", "day", output_field=DateField() ) ) self.assertEqual(qs.count(), 2) def test_trunc_invalid_field_with_timezone(self): for melb in self.get_timezones("Australia/Melbourne"): with self.subTest(repr(melb)): msg = "tzinfo can only be used with DateTimeField." with self.assertRaisesMessage(ValueError, msg): DTModel.objects.annotate( day_melb=Trunc("start_date", "day", tzinfo=melb), ).get() with self.assertRaisesMessage(ValueError, msg): DTModel.objects.annotate( hour_melb=Trunc("start_time", "hour", tzinfo=melb), ).get()
38064e5c2b51320685349ab10fbe129152655ea9fbb32dd1a7ddea37f6e343ad
#!/usr/bin/env python import argparse import atexit import copy import gc import multiprocessing import os import shutil import socket import subprocess import sys import tempfile import warnings from pathlib import Path try: import django except ImportError as e: raise RuntimeError( "Django module not found, reference tests/README.rst for instructions." ) from e else: from django.apps import apps from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import connection, connections from django.test import TestCase, TransactionTestCase from django.test.runner import get_max_test_processes, parallel_type from django.test.selenium import SeleniumTestCaseBase from django.test.utils import NullTimeKeeper, TimeKeeper, get_runner from django.utils.deprecation import ( RemovedInDjango50Warning, RemovedInDjango51Warning, ) from django.utils.log import DEFAULT_LOGGING try: import MySQLdb except ImportError: pass else: # Ignore informational warnings from QuerySet.explain(). warnings.filterwarnings("ignore", r"\(1003, *", category=MySQLdb.Warning) # Make deprecation warnings errors to ensure no usage of deprecated features. warnings.simplefilter("error", RemovedInDjango50Warning) warnings.simplefilter("error", RemovedInDjango51Warning) # Make resource and runtime warning errors to ensure no usage of error prone # patterns. warnings.simplefilter("error", ResourceWarning) warnings.simplefilter("error", RuntimeWarning) # Ignore known warnings in test dependencies. warnings.filterwarnings( "ignore", "'U' mode is deprecated", DeprecationWarning, module="docutils.io" ) # Reduce garbage collection frequency to improve performance. Since CPython # uses refcounting, garbage collection only collects objects with cyclic # references, which are a minority, so the garbage collection threshold can be # larger than the default threshold of 700 allocations + deallocations without # much increase in memory usage. gc.set_threshold(100_000) RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__)) TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, "templates") # Create a specific subdirectory for the duration of the test suite. TMPDIR = tempfile.mkdtemp(prefix="django_") # Set the TMPDIR environment variable in addition to tempfile.tempdir # so that children processes inherit it. tempfile.tempdir = os.environ["TMPDIR"] = TMPDIR # Removing the temporary TMPDIR. atexit.register(shutil.rmtree, TMPDIR) # This is a dict mapping RUNTESTS_DIR subdirectory to subdirectories of that # directory to skip when searching for test modules. SUBDIRS_TO_SKIP = { "": {"import_error_package", "test_runner_apps"}, "gis_tests": {"data"}, } ALWAYS_INSTALLED_APPS = [ "django.contrib.contenttypes", "django.contrib.auth", "django.contrib.sites", "django.contrib.sessions", "django.contrib.messages", "django.contrib.admin.apps.SimpleAdminConfig", "django.contrib.staticfiles", ] ALWAYS_MIDDLEWARE = [ "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", ] # Need to add the associated contrib app to INSTALLED_APPS in some cases to # avoid "RuntimeError: Model class X doesn't declare an explicit app_label # and isn't in an application in INSTALLED_APPS." CONTRIB_TESTS_TO_APPS = { "deprecation": ["django.contrib.flatpages", "django.contrib.redirects"], "flatpages_tests": ["django.contrib.flatpages"], "redirects_tests": ["django.contrib.redirects"], } def get_test_modules(gis_enabled): """ Scan the tests directory and yield the names of all test modules. The yielded names have either one dotted part like "test_runner" or, in the case of GIS tests, two dotted parts like "gis_tests.gdal_tests". """ discovery_dirs = [""] if gis_enabled: # GIS tests are in nested apps discovery_dirs.append("gis_tests") else: SUBDIRS_TO_SKIP[""].add("gis_tests") for dirname in discovery_dirs: dirpath = os.path.join(RUNTESTS_DIR, dirname) subdirs_to_skip = SUBDIRS_TO_SKIP[dirname] with os.scandir(dirpath) as entries: for f in entries: if ( "." in f.name or os.path.basename(f.name) in subdirs_to_skip or f.is_file() or not os.path.exists(os.path.join(f.path, "__init__.py")) ): continue test_module = f.name if dirname: test_module = dirname + "." + test_module yield test_module def get_label_module(label): """Return the top-level module part for a test label.""" path = Path(label) if len(path.parts) == 1: # Interpret the label as a dotted module name. return label.split(".")[0] # Otherwise, interpret the label as a path. Check existence first to # provide a better error message than relative_to() if it doesn't exist. if not path.exists(): raise RuntimeError(f"Test label path {label} does not exist") path = path.resolve() rel_path = path.relative_to(RUNTESTS_DIR) return rel_path.parts[0] def get_filtered_test_modules(start_at, start_after, gis_enabled, test_labels=None): if test_labels is None: test_labels = [] # Reduce each test label to just the top-level module part. label_modules = set() for label in test_labels: test_module = get_label_module(label) label_modules.add(test_module) # It would be nice to put this validation earlier but it must come after # django.setup() so that connection.features.gis_enabled can be accessed. if "gis_tests" in label_modules and not gis_enabled: print("Aborting: A GIS database backend is required to run gis_tests.") sys.exit(1) def _module_match_label(module_name, label): # Exact or ancestor match. return module_name == label or module_name.startswith(label + ".") start_label = start_at or start_after for test_module in get_test_modules(gis_enabled): if start_label: if not _module_match_label(test_module, start_label): continue start_label = "" if not start_at: assert start_after # Skip the current one before starting. continue # If the module (or an ancestor) was named on the command line, or # no modules were named (i.e., run all), include the test module. if not test_labels or any( _module_match_label(test_module, label_module) for label_module in label_modules ): yield test_module def setup_collect_tests(start_at, start_after, test_labels=None): state = { "INSTALLED_APPS": settings.INSTALLED_APPS, "ROOT_URLCONF": getattr(settings, "ROOT_URLCONF", ""), "TEMPLATES": settings.TEMPLATES, "LANGUAGE_CODE": settings.LANGUAGE_CODE, "STATIC_URL": settings.STATIC_URL, "STATIC_ROOT": settings.STATIC_ROOT, "MIDDLEWARE": settings.MIDDLEWARE, } # Redirect some settings for the duration of these tests. settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS settings.ROOT_URLCONF = "urls" settings.STATIC_URL = "static/" settings.STATIC_ROOT = os.path.join(TMPDIR, "static") settings.TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [TEMPLATE_DIR], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ], }, } ] settings.LANGUAGE_CODE = "en" settings.SITE_ID = 1 settings.MIDDLEWARE = ALWAYS_MIDDLEWARE settings.MIGRATION_MODULES = { # This lets us skip creating migrations for the test models as many of # them depend on one of the following contrib applications. "auth": None, "contenttypes": None, "sessions": None, } log_config = copy.deepcopy(DEFAULT_LOGGING) # Filter out non-error logging so we don't have to capture it in lots of # tests. log_config["loggers"]["django"]["level"] = "ERROR" settings.LOGGING = log_config settings.SILENCED_SYSTEM_CHECKS = [ "fields.W342", # ForeignKey(unique=True) -> OneToOneField # django.contrib.postgres.fields.CICharField deprecated. "fields.W905", "postgres.W004", # django.contrib.postgres.fields.CIEmailField deprecated. "fields.W906", # django.contrib.postgres.fields.CITextField deprecated. "fields.W907", ] # RemovedInDjango50Warning settings.FORM_RENDERER = "django.forms.renderers.DjangoDivFormRenderer" # Load all the ALWAYS_INSTALLED_APPS. django.setup() # This flag must be evaluated after django.setup() because otherwise it can # raise AppRegistryNotReady when running gis_tests in isolation on some # backends (e.g. PostGIS). gis_enabled = connection.features.gis_enabled test_modules = list( get_filtered_test_modules( start_at, start_after, gis_enabled, test_labels=test_labels, ) ) return test_modules, state def teardown_collect_tests(state): # Restore the old settings. for key, value in state.items(): setattr(settings, key, value) def get_installed(): return [app_config.name for app_config in apps.get_app_configs()] # This function should be called only after calling django.setup(), # since it calls connection.features.gis_enabled. def get_apps_to_install(test_modules): for test_module in test_modules: if test_module in CONTRIB_TESTS_TO_APPS: yield from CONTRIB_TESTS_TO_APPS[test_module] yield test_module # Add contrib.gis to INSTALLED_APPS if needed (rather than requiring # @override_settings(INSTALLED_APPS=...) on all test cases. if connection.features.gis_enabled: yield "django.contrib.gis" def setup_run_tests(verbosity, start_at, start_after, test_labels=None): test_modules, state = setup_collect_tests( start_at, start_after, test_labels=test_labels ) installed_apps = set(get_installed()) for app in get_apps_to_install(test_modules): if app in installed_apps: continue if verbosity >= 2: print(f"Importing application {app}") settings.INSTALLED_APPS.append(app) installed_apps.add(app) apps.set_installed_apps(settings.INSTALLED_APPS) # Force declaring available_apps in TransactionTestCase for faster tests. def no_available_apps(self): raise Exception( "Please define available_apps in TransactionTestCase and its subclasses." ) TransactionTestCase.available_apps = property(no_available_apps) TestCase.available_apps = None # Set an environment variable that other code may consult to see if # Django's own test suite is running. os.environ["RUNNING_DJANGOS_TEST_SUITE"] = "true" test_labels = test_labels or test_modules return test_labels, state def teardown_run_tests(state): teardown_collect_tests(state) # Discard the multiprocessing.util finalizer that tries to remove a # temporary directory that's already removed by this script's # atexit.register(shutil.rmtree, TMPDIR) handler. Prevents # FileNotFoundError at the end of a test run (#27890). from multiprocessing.util import _finalizer_registry _finalizer_registry.pop((-100, 0), None) del os.environ["RUNNING_DJANGOS_TEST_SUITE"] class ActionSelenium(argparse.Action): """ Validate the comma-separated list of requested browsers. """ def __call__(self, parser, namespace, values, option_string=None): try: import selenium # NOQA except ImportError as e: raise ImproperlyConfigured(f"Error loading selenium module: {e}") browsers = values.split(",") for browser in browsers: try: SeleniumTestCaseBase.import_webdriver(browser) except ImportError: raise argparse.ArgumentError( self, "Selenium browser specification '%s' is not valid." % browser ) setattr(namespace, self.dest, browsers) def django_tests( verbosity, interactive, failfast, keepdb, reverse, test_labels, debug_sql, parallel, tags, exclude_tags, test_name_patterns, start_at, start_after, pdb, buffer, timing, shuffle, ): if parallel in {0, "auto"}: max_parallel = get_max_test_processes() else: max_parallel = parallel if verbosity >= 1: msg = "Testing against Django installed in '%s'" % os.path.dirname( django.__file__ ) if max_parallel > 1: msg += " with up to %d processes" % max_parallel print(msg) process_setup_args = (verbosity, start_at, start_after, test_labels) test_labels, state = setup_run_tests(*process_setup_args) # Run the test suite, including the extra validation tests. if not hasattr(settings, "TEST_RUNNER"): settings.TEST_RUNNER = "django.test.runner.DiscoverRunner" if parallel in {0, "auto"}: # This doesn't work before django.setup() on some databases. if all(conn.features.can_clone_databases for conn in connections.all()): parallel = max_parallel else: parallel = 1 TestRunner = get_runner(settings) TestRunner.parallel_test_suite.process_setup = setup_run_tests TestRunner.parallel_test_suite.process_setup_args = process_setup_args test_runner = TestRunner( verbosity=verbosity, interactive=interactive, failfast=failfast, keepdb=keepdb, reverse=reverse, debug_sql=debug_sql, parallel=parallel, tags=tags, exclude_tags=exclude_tags, test_name_patterns=test_name_patterns, pdb=pdb, buffer=buffer, timing=timing, shuffle=shuffle, ) failures = test_runner.run_tests(test_labels) teardown_run_tests(state) return failures def collect_test_modules(start_at, start_after): test_modules, state = setup_collect_tests(start_at, start_after) teardown_collect_tests(state) return test_modules def get_subprocess_args(options): subprocess_args = [sys.executable, __file__, "--settings=%s" % options.settings] if options.failfast: subprocess_args.append("--failfast") if options.verbosity: subprocess_args.append("--verbosity=%s" % options.verbosity) if not options.interactive: subprocess_args.append("--noinput") if options.tags: subprocess_args.append("--tag=%s" % options.tags) if options.exclude_tags: subprocess_args.append("--exclude_tag=%s" % options.exclude_tags) if options.shuffle is not False: if options.shuffle is None: subprocess_args.append("--shuffle") else: subprocess_args.append("--shuffle=%s" % options.shuffle) return subprocess_args def bisect_tests(bisection_label, options, test_labels, start_at, start_after): if not test_labels: test_labels = collect_test_modules(start_at, start_after) print("***** Bisecting test suite: %s" % " ".join(test_labels)) # Make sure the bisection point isn't in the test list # Also remove tests that need to be run in specific combinations for label in [bisection_label, "model_inheritance_same_model_name"]: try: test_labels.remove(label) except ValueError: pass subprocess_args = get_subprocess_args(options) iteration = 1 while len(test_labels) > 1: midpoint = len(test_labels) // 2 test_labels_a = test_labels[:midpoint] + [bisection_label] test_labels_b = test_labels[midpoint:] + [bisection_label] print("***** Pass %da: Running the first half of the test suite" % iteration) print("***** Test labels: %s" % " ".join(test_labels_a)) failures_a = subprocess.run(subprocess_args + test_labels_a) print("***** Pass %db: Running the second half of the test suite" % iteration) print("***** Test labels: %s" % " ".join(test_labels_b)) print("") failures_b = subprocess.run(subprocess_args + test_labels_b) if failures_a.returncode and not failures_b.returncode: print("***** Problem found in first half. Bisecting again...") iteration += 1 test_labels = test_labels_a[:-1] elif failures_b.returncode and not failures_a.returncode: print("***** Problem found in second half. Bisecting again...") iteration += 1 test_labels = test_labels_b[:-1] elif failures_a.returncode and failures_b.returncode: print("***** Multiple sources of failure found") break else: print("***** No source of failure found... try pair execution (--pair)") break if len(test_labels) == 1: print("***** Source of error: %s" % test_labels[0]) def paired_tests(paired_test, options, test_labels, start_at, start_after): if not test_labels: test_labels = collect_test_modules(start_at, start_after) print("***** Trying paired execution") # Make sure the constant member of the pair isn't in the test list # Also remove tests that need to be run in specific combinations for label in [paired_test, "model_inheritance_same_model_name"]: try: test_labels.remove(label) except ValueError: pass subprocess_args = get_subprocess_args(options) for i, label in enumerate(test_labels): print( "***** %d of %d: Check test pairing with %s" % (i + 1, len(test_labels), label) ) failures = subprocess.call(subprocess_args + [label, paired_test]) if failures: print("***** Found problem pair with %s" % label) return print("***** No problem pair found") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run the Django test suite.") parser.add_argument( "modules", nargs="*", metavar="module", help='Optional path(s) to test modules; e.g. "i18n" or ' '"i18n.tests.TranslationTests.test_lazy_objects".', ) parser.add_argument( "-v", "--verbosity", default=1, type=int, choices=[0, 1, 2, 3], help="Verbosity level; 0=minimal output, 1=normal output, 2=all output", ) parser.add_argument( "--noinput", action="store_false", dest="interactive", help="Tells Django to NOT prompt the user for input of any kind.", ) parser.add_argument( "--failfast", action="store_true", help="Tells Django to stop running the test suite after first failed test.", ) parser.add_argument( "--keepdb", action="store_true", help="Tells Django to preserve the test database between runs.", ) parser.add_argument( "--settings", help='Python path to settings module, e.g. "myproject.settings". If ' "this isn't provided, either the DJANGO_SETTINGS_MODULE " 'environment variable or "test_sqlite" will be used.', ) parser.add_argument( "--bisect", help="Bisect the test suite to discover a test that causes a test " "failure when combined with the named test.", ) parser.add_argument( "--pair", help="Run the test suite in pairs with the named test to find problem pairs.", ) parser.add_argument( "--shuffle", nargs="?", default=False, type=int, metavar="SEED", help=( "Shuffle the order of test cases to help check that tests are " "properly isolated." ), ) parser.add_argument( "--reverse", action="store_true", help="Sort test suites and test cases in opposite order to debug " "test side effects not apparent with normal execution lineup.", ) parser.add_argument( "--selenium", action=ActionSelenium, metavar="BROWSERS", help="A comma-separated list of browsers to run the Selenium tests against.", ) parser.add_argument( "--headless", action="store_true", help="Run selenium tests in headless mode, if the browser supports the option.", ) parser.add_argument( "--selenium-hub", help="A URL for a selenium hub instance to use in combination with --selenium.", ) parser.add_argument( "--external-host", default=socket.gethostname(), help=( "The external host that can be reached by the selenium hub instance when " "running Selenium tests via Selenium Hub." ), ) parser.add_argument( "--debug-sql", action="store_true", help="Turn on the SQL query logger within tests.", ) # 0 is converted to "auto" or 1 later on, depending on a method used by # multiprocessing to start subprocesses and on the backend support for # cloning databases. parser.add_argument( "--parallel", nargs="?", const="auto", default=0, type=parallel_type, metavar="N", help=( 'Run tests using up to N parallel processes. Use the value "auto" ' "to run one test process for each processor core." ), ) parser.add_argument( "--tag", dest="tags", action="append", help="Run only tests with the specified tags. Can be used multiple times.", ) parser.add_argument( "--exclude-tag", dest="exclude_tags", action="append", help="Do not run tests with the specified tag. Can be used multiple times.", ) parser.add_argument( "--start-after", dest="start_after", help="Run tests starting after the specified top-level module.", ) parser.add_argument( "--start-at", dest="start_at", help="Run tests starting at the specified top-level module.", ) parser.add_argument( "--pdb", action="store_true", help="Runs the PDB debugger on error or failure." ) parser.add_argument( "-b", "--buffer", action="store_true", help="Discard output of passing tests.", ) parser.add_argument( "--timing", action="store_true", help="Output timings, including database set up and total run time.", ) parser.add_argument( "-k", dest="test_name_patterns", action="append", help=( "Only run test methods and classes matching test name pattern. " "Same as unittest -k option. Can be used multiple times." ), ) options = parser.parse_args() using_selenium_hub = options.selenium and options.selenium_hub if options.selenium_hub and not options.selenium: parser.error( "--selenium-hub and --external-host require --selenium to be used." ) if using_selenium_hub and not options.external_host: parser.error("--selenium-hub and --external-host must be used together.") # Allow including a trailing slash on app_labels for tab completion convenience options.modules = [os.path.normpath(labels) for labels in options.modules] mutually_exclusive_options = [ options.start_at, options.start_after, options.modules, ] enabled_module_options = [ bool(option) for option in mutually_exclusive_options ].count(True) if enabled_module_options > 1: print( "Aborting: --start-at, --start-after, and test labels are mutually " "exclusive." ) sys.exit(1) for opt_name in ["start_at", "start_after"]: opt_val = getattr(options, opt_name) if opt_val: if "." in opt_val: print( "Aborting: --%s must be a top-level module." % opt_name.replace("_", "-") ) sys.exit(1) setattr(options, opt_name, os.path.normpath(opt_val)) if options.settings: os.environ["DJANGO_SETTINGS_MODULE"] = options.settings else: os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_sqlite") options.settings = os.environ["DJANGO_SETTINGS_MODULE"] if options.selenium: if multiprocessing.get_start_method() == "spawn" and options.parallel != 1: parser.error( "You cannot use --selenium with parallel tests on this system. " "Pass --parallel=1 to use --selenium." ) if not options.tags: options.tags = ["selenium"] elif "selenium" not in options.tags: options.tags.append("selenium") if options.selenium_hub: SeleniumTestCaseBase.selenium_hub = options.selenium_hub SeleniumTestCaseBase.external_host = options.external_host SeleniumTestCaseBase.headless = options.headless SeleniumTestCaseBase.browsers = options.selenium if options.bisect: bisect_tests( options.bisect, options, options.modules, options.start_at, options.start_after, ) elif options.pair: paired_tests( options.pair, options, options.modules, options.start_at, options.start_after, ) else: time_keeper = TimeKeeper() if options.timing else NullTimeKeeper() with time_keeper.timed("Total run"): failures = django_tests( options.verbosity, options.interactive, options.failfast, options.keepdb, options.reverse, options.modules, options.debug_sql, options.parallel, options.tags, options.exclude_tags, getattr(options, "test_name_patterns", None), options.start_at, options.start_after, options.pdb, options.buffer, options.timing, options.shuffle, ) time_keeper.print_results() if failures: sys.exit(1)
b9acca40cc11b3783812004ae74da0582109cd2983be75f2dc4840857a006864
import argparse import ctypes import faulthandler import io import itertools import logging import multiprocessing import os import pickle import random import sys import textwrap import unittest import warnings from collections import defaultdict from contextlib import contextmanager from importlib import import_module from io import StringIO import django from django.core.management import call_command from django.db import connections from django.test import SimpleTestCase, TestCase from django.test.utils import NullTimeKeeper, TimeKeeper, iter_test_cases from django.test.utils import setup_databases as _setup_databases from django.test.utils import setup_test_environment from django.test.utils import teardown_databases as _teardown_databases from django.test.utils import teardown_test_environment from django.utils.crypto import new_hash from django.utils.datastructures import OrderedSet from django.utils.deprecation import RemovedInDjango50Warning try: import ipdb as pdb except ImportError: import pdb try: import tblib.pickling_support except ImportError: tblib = None class DebugSQLTextTestResult(unittest.TextTestResult): def __init__(self, stream, descriptions, verbosity): self.logger = logging.getLogger("django.db.backends") self.logger.setLevel(logging.DEBUG) self.debug_sql_stream = None super().__init__(stream, descriptions, verbosity) def startTest(self, test): self.debug_sql_stream = StringIO() self.handler = logging.StreamHandler(self.debug_sql_stream) self.logger.addHandler(self.handler) super().startTest(test) def stopTest(self, test): super().stopTest(test) self.logger.removeHandler(self.handler) if self.showAll: self.debug_sql_stream.seek(0) self.stream.write(self.debug_sql_stream.read()) self.stream.writeln(self.separator2) def addError(self, test, err): super().addError(test, err) if self.debug_sql_stream is None: # Error before tests e.g. in setUpTestData(). sql = "" else: self.debug_sql_stream.seek(0) sql = self.debug_sql_stream.read() self.errors[-1] = self.errors[-1] + (sql,) def addFailure(self, test, err): super().addFailure(test, err) self.debug_sql_stream.seek(0) self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),) def addSubTest(self, test, subtest, err): super().addSubTest(test, subtest, err) if err is not None: self.debug_sql_stream.seek(0) errors = ( self.failures if issubclass(err[0], test.failureException) else self.errors ) errors[-1] = errors[-1] + (self.debug_sql_stream.read(),) def printErrorList(self, flavour, errors): for test, err, sql_debug in errors: self.stream.writeln(self.separator1) self.stream.writeln("%s: %s" % (flavour, self.getDescription(test))) self.stream.writeln(self.separator2) self.stream.writeln(err) self.stream.writeln(self.separator2) self.stream.writeln(sql_debug) class PDBDebugResult(unittest.TextTestResult): """ Custom result class that triggers a PDB session when an error or failure occurs. """ def addError(self, test, err): super().addError(test, err) self.debug(err) def addFailure(self, test, err): super().addFailure(test, err) self.debug(err) def addSubTest(self, test, subtest, err): if err is not None: self.debug(err) super().addSubTest(test, subtest, err) def debug(self, error): self._restoreStdout() self.buffer = False exc_type, exc_value, traceback = error print("\nOpening PDB: %r" % exc_value) pdb.post_mortem(traceback) class DummyList: """ Dummy list class for faking storage of results in unittest.TestResult. """ __slots__ = () def append(self, item): pass class RemoteTestResult(unittest.TestResult): """ Extend unittest.TestResult to record events in the child processes so they can be replayed in the parent process. Events include things like which tests succeeded or failed. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Fake storage of results to reduce memory usage. These are used by the # unittest default methods, but here 'events' is used instead. dummy_list = DummyList() self.failures = dummy_list self.errors = dummy_list self.skipped = dummy_list self.expectedFailures = dummy_list self.unexpectedSuccesses = dummy_list if tblib is not None: tblib.pickling_support.install() self.events = [] def __getstate__(self): # Make this class picklable by removing the file-like buffer # attributes. This is possible since they aren't used after unpickling # after being sent to ParallelTestSuite. state = self.__dict__.copy() state.pop("_stdout_buffer", None) state.pop("_stderr_buffer", None) state.pop("_original_stdout", None) state.pop("_original_stderr", None) return state @property def test_index(self): return self.testsRun - 1 def _confirm_picklable(self, obj): """ Confirm that obj can be pickled and unpickled as multiprocessing will need to pickle the exception in the child process and unpickle it in the parent process. Let the exception rise, if not. """ pickle.loads(pickle.dumps(obj)) def _print_unpicklable_subtest(self, test, subtest, pickle_exc): print( """ Subtest failed: test: {} subtest: {} Unfortunately, the subtest that failed cannot be pickled, so the parallel test runner cannot handle it cleanly. Here is the pickling error: > {} You should re-run this test with --parallel=1 to reproduce the failure with a cleaner failure message. """.format( test, subtest, pickle_exc ) ) def check_picklable(self, test, err): # Ensure that sys.exc_info() tuples are picklable. This displays a # clear multiprocessing.pool.RemoteTraceback generated in the child # process instead of a multiprocessing.pool.MaybeEncodingError, making # the root cause easier to figure out for users who aren't familiar # with the multiprocessing module. Since we're in a forked process, # our best chance to communicate with them is to print to stdout. try: self._confirm_picklable(err) except Exception as exc: original_exc_txt = repr(err[1]) original_exc_txt = textwrap.fill( original_exc_txt, 75, initial_indent=" ", subsequent_indent=" " ) pickle_exc_txt = repr(exc) pickle_exc_txt = textwrap.fill( pickle_exc_txt, 75, initial_indent=" ", subsequent_indent=" " ) if tblib is None: print( """ {} failed: {} Unfortunately, tracebacks cannot be pickled, making it impossible for the parallel test runner to handle this exception cleanly. In order to see the traceback, you should install tblib: python -m pip install tblib """.format( test, original_exc_txt ) ) else: print( """ {} failed: {} Unfortunately, the exception it raised cannot be pickled, making it impossible for the parallel test runner to handle it cleanly. Here's the error encountered while trying to pickle the exception: {} You should re-run this test with the --parallel=1 option to reproduce the failure and get a correct traceback. """.format( test, original_exc_txt, pickle_exc_txt ) ) raise def check_subtest_picklable(self, test, subtest): try: self._confirm_picklable(subtest) except Exception as exc: self._print_unpicklable_subtest(test, subtest, exc) raise def startTestRun(self): super().startTestRun() self.events.append(("startTestRun",)) def stopTestRun(self): super().stopTestRun() self.events.append(("stopTestRun",)) def startTest(self, test): super().startTest(test) self.events.append(("startTest", self.test_index)) def stopTest(self, test): super().stopTest(test) self.events.append(("stopTest", self.test_index)) def addError(self, test, err): self.check_picklable(test, err) self.events.append(("addError", self.test_index, err)) super().addError(test, err) def addFailure(self, test, err): self.check_picklable(test, err) self.events.append(("addFailure", self.test_index, err)) super().addFailure(test, err) def addSubTest(self, test, subtest, err): # Follow Python's implementation of unittest.TestResult.addSubTest() by # not doing anything when a subtest is successful. if err is not None: # Call check_picklable() before check_subtest_picklable() since # check_picklable() performs the tblib check. self.check_picklable(test, err) self.check_subtest_picklable(test, subtest) self.events.append(("addSubTest", self.test_index, subtest, err)) super().addSubTest(test, subtest, err) def addSuccess(self, test): self.events.append(("addSuccess", self.test_index)) super().addSuccess(test) def addSkip(self, test, reason): self.events.append(("addSkip", self.test_index, reason)) super().addSkip(test, reason) def addExpectedFailure(self, test, err): # If tblib isn't installed, pickling the traceback will always fail. # However we don't want tblib to be required for running the tests # when they pass or fail as expected. Drop the traceback when an # expected failure occurs. if tblib is None: err = err[0], err[1], None self.check_picklable(test, err) self.events.append(("addExpectedFailure", self.test_index, err)) super().addExpectedFailure(test, err) def addUnexpectedSuccess(self, test): self.events.append(("addUnexpectedSuccess", self.test_index)) super().addUnexpectedSuccess(test) def wasSuccessful(self): """Tells whether or not this result was a success.""" failure_types = {"addError", "addFailure", "addSubTest", "addUnexpectedSuccess"} return all(e[0] not in failure_types for e in self.events) def _exc_info_to_string(self, err, test): # Make this method no-op. It only powers the default unittest behavior # for recording errors, but this class pickles errors into 'events' # instead. return "" class RemoteTestRunner: """ Run tests and record everything but don't display anything. The implementation matches the unpythonic coding style of unittest2. """ resultclass = RemoteTestResult def __init__(self, failfast=False, resultclass=None, buffer=False): self.failfast = failfast self.buffer = buffer if resultclass is not None: self.resultclass = resultclass def run(self, test): result = self.resultclass() unittest.registerResult(result) result.failfast = self.failfast result.buffer = self.buffer test(result) return result def get_max_test_processes(): """ The maximum number of test processes when using the --parallel option. """ # The current implementation of the parallel test runner requires # multiprocessing to start subprocesses with fork() or spawn(). if multiprocessing.get_start_method() not in {"fork", "spawn"}: return 1 try: return int(os.environ["DJANGO_TEST_PROCESSES"]) except KeyError: return multiprocessing.cpu_count() def parallel_type(value): """Parse value passed to the --parallel option.""" if value == "auto": return value try: return int(value) except ValueError: raise argparse.ArgumentTypeError( f"{value!r} is not an integer or the string 'auto'" ) _worker_id = 0 def _init_worker( counter, initial_settings=None, serialized_contents=None, process_setup=None, process_setup_args=None, debug_mode=None, ): """ Switch to databases dedicated to this worker. This helper lives at module-level because of the multiprocessing module's requirements. """ global _worker_id with counter.get_lock(): counter.value += 1 _worker_id = counter.value start_method = multiprocessing.get_start_method() if start_method == "spawn": if process_setup and callable(process_setup): if process_setup_args is None: process_setup_args = () process_setup(*process_setup_args) django.setup() setup_test_environment(debug=debug_mode) for alias in connections: connection = connections[alias] if start_method == "spawn": # Restore initial settings in spawned processes. connection.settings_dict.update(initial_settings[alias]) if value := serialized_contents.get(alias): connection._test_serialized_contents = value connection.creation.setup_worker_connection(_worker_id) def _run_subsuite(args): """ Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult. This helper lives at module-level and its arguments are wrapped in a tuple because of the multiprocessing module's requirements. """ runner_class, subsuite_index, subsuite, failfast, buffer = args runner = runner_class(failfast=failfast, buffer=buffer) result = runner.run(subsuite) return subsuite_index, result.events def _process_setup_stub(*args): """Stub method to simplify run() implementation.""" pass class ParallelTestSuite(unittest.TestSuite): """ Run a series of tests in parallel in several processes. While the unittest module's documentation implies that orchestrating the execution of tests is the responsibility of the test runner, in practice, it appears that TestRunner classes are more concerned with formatting and displaying test results. Since there are fewer use cases for customizing TestSuite than TestRunner, implementing parallelization at the level of the TestSuite improves interoperability with existing custom test runners. A single instance of a test runner can still collect results from all tests without being aware that they have been run in parallel. """ # In case someone wants to modify these in a subclass. init_worker = _init_worker process_setup = _process_setup_stub process_setup_args = () run_subsuite = _run_subsuite runner_class = RemoteTestRunner def __init__( self, subsuites, processes, failfast=False, debug_mode=False, buffer=False ): self.subsuites = subsuites self.processes = processes self.failfast = failfast self.debug_mode = debug_mode self.buffer = buffer self.initial_settings = None self.serialized_contents = None super().__init__() def run(self, result): """ Distribute test cases across workers. Return an identifier of each test case with its result in order to use imap_unordered to show results as soon as they're available. To minimize pickling errors when getting results from workers: - pass back numeric indexes in self.subsuites instead of tests - make tracebacks picklable with tblib, if available Even with tblib, errors may still occur for dynamically created exception classes which cannot be unpickled. """ self.initialize_suite() counter = multiprocessing.Value(ctypes.c_int, 0) pool = multiprocessing.Pool( processes=self.processes, initializer=self.init_worker.__func__, initargs=[ counter, self.initial_settings, self.serialized_contents, self.process_setup.__func__, self.process_setup_args, self.debug_mode, ], ) args = [ (self.runner_class, index, subsuite, self.failfast, self.buffer) for index, subsuite in enumerate(self.subsuites) ] test_results = pool.imap_unordered(self.run_subsuite.__func__, args) while True: if result.shouldStop: pool.terminate() break try: subsuite_index, events = test_results.next(timeout=0.1) except multiprocessing.TimeoutError: continue except StopIteration: pool.close() break tests = list(self.subsuites[subsuite_index]) for event in events: event_name = event[0] handler = getattr(result, event_name, None) if handler is None: continue test = tests[event[1]] args = event[2:] handler(test, *args) pool.join() return result def __iter__(self): return iter(self.subsuites) def initialize_suite(self): if multiprocessing.get_start_method() == "spawn": self.initial_settings = { alias: connections[alias].settings_dict for alias in connections } self.serialized_contents = { alias: connections[alias]._test_serialized_contents for alias in connections if alias in self.serialized_aliases } class Shuffler: """ This class implements shuffling with a special consistency property. Consistency means that, for a given seed and key function, if two sets of items are shuffled, the resulting order will agree on the intersection of the two sets. For example, if items are removed from an original set, the shuffled order for the new set will be the shuffled order of the original set restricted to the smaller set. """ # This doesn't need to be cryptographically strong, so use what's fastest. hash_algorithm = "md5" @classmethod def _hash_text(cls, text): h = new_hash(cls.hash_algorithm, usedforsecurity=False) h.update(text.encode("utf-8")) return h.hexdigest() def __init__(self, seed=None): if seed is None: # Limit seeds to 10 digits for simpler output. seed = random.randint(0, 10**10 - 1) seed_source = "generated" else: seed_source = "given" self.seed = seed self.seed_source = seed_source @property def seed_display(self): return f"{self.seed!r} ({self.seed_source})" def _hash_item(self, item, key): text = "{}{}".format(self.seed, key(item)) return self._hash_text(text) def shuffle(self, items, key): """ Return a new list of the items in a shuffled order. The `key` is a function that accepts an item in `items` and returns a string unique for that item that can be viewed as a string id. The order of the return value is deterministic. It depends on the seed and key function but not on the original order. """ hashes = {} for item in items: hashed = self._hash_item(item, key) if hashed in hashes: msg = "item {!r} has same hash {!r} as item {!r}".format( item, hashed, hashes[hashed], ) raise RuntimeError(msg) hashes[hashed] = item return [hashes[hashed] for hashed in sorted(hashes)] class DiscoverRunner: """A Django test runner that uses unittest2 test discovery.""" test_suite = unittest.TestSuite parallel_test_suite = ParallelTestSuite test_runner = unittest.TextTestRunner test_loader = unittest.defaultTestLoader reorder_by = (TestCase, SimpleTestCase) def __init__( self, pattern=None, top_level=None, verbosity=1, interactive=True, failfast=False, keepdb=False, reverse=False, debug_mode=False, debug_sql=False, parallel=0, tags=None, exclude_tags=None, test_name_patterns=None, pdb=False, buffer=False, enable_faulthandler=True, timing=False, shuffle=False, logger=None, **kwargs, ): self.pattern = pattern self.top_level = top_level self.verbosity = verbosity self.interactive = interactive self.failfast = failfast self.keepdb = keepdb self.reverse = reverse self.debug_mode = debug_mode self.debug_sql = debug_sql self.parallel = parallel self.tags = set(tags or []) self.exclude_tags = set(exclude_tags or []) if not faulthandler.is_enabled() and enable_faulthandler: try: faulthandler.enable(file=sys.stderr.fileno()) except (AttributeError, io.UnsupportedOperation): faulthandler.enable(file=sys.__stderr__.fileno()) self.pdb = pdb if self.pdb and self.parallel > 1: raise ValueError( "You cannot use --pdb with parallel tests; pass --parallel=1 to use it." ) self.buffer = buffer self.test_name_patterns = None self.time_keeper = TimeKeeper() if timing else NullTimeKeeper() if test_name_patterns: # unittest does not export the _convert_select_pattern function # that converts command-line arguments to patterns. self.test_name_patterns = { pattern if "*" in pattern else "*%s*" % pattern for pattern in test_name_patterns } self.shuffle = shuffle self._shuffler = None self.logger = logger @classmethod def add_arguments(cls, parser): parser.add_argument( "-t", "--top-level-directory", dest="top_level", help="Top level of project for unittest discovery.", ) parser.add_argument( "-p", "--pattern", default="test*.py", help="The test matching pattern. Defaults to test*.py.", ) parser.add_argument( "--keepdb", action="store_true", help="Preserves the test DB between runs." ) parser.add_argument( "--shuffle", nargs="?", default=False, type=int, metavar="SEED", help="Shuffles test case order.", ) parser.add_argument( "-r", "--reverse", action="store_true", help="Reverses test case order.", ) parser.add_argument( "--debug-mode", action="store_true", help="Sets settings.DEBUG to True.", ) parser.add_argument( "-d", "--debug-sql", action="store_true", help="Prints logged SQL queries on failure.", ) parser.add_argument( "--parallel", nargs="?", const="auto", default=0, type=parallel_type, metavar="N", help=( "Run tests using up to N parallel processes. Use the value " '"auto" to run one test process for each processor core.' ), ) parser.add_argument( "--tag", action="append", dest="tags", help="Run only tests with the specified tag. Can be used multiple times.", ) parser.add_argument( "--exclude-tag", action="append", dest="exclude_tags", help="Do not run tests with the specified tag. Can be used multiple times.", ) parser.add_argument( "--pdb", action="store_true", help="Runs a debugger (pdb, or ipdb if installed) on error or failure.", ) parser.add_argument( "-b", "--buffer", action="store_true", help="Discard output from passing tests.", ) parser.add_argument( "--no-faulthandler", action="store_false", dest="enable_faulthandler", help="Disables the Python faulthandler module during tests.", ) parser.add_argument( "--timing", action="store_true", help=("Output timings, including database set up and total run time."), ) parser.add_argument( "-k", action="append", dest="test_name_patterns", help=( "Only run test methods and classes that match the pattern " "or substring. Can be used multiple times. Same as " "unittest -k option." ), ) @property def shuffle_seed(self): if self._shuffler is None: return None return self._shuffler.seed def log(self, msg, level=None): """ Log the message at the given logging level (the default is INFO). If a logger isn't set, the message is instead printed to the console, respecting the configured verbosity. A verbosity of 0 prints no output, a verbosity of 1 prints INFO and above, and a verbosity of 2 or higher prints all levels. """ if level is None: level = logging.INFO if self.logger is None: if self.verbosity <= 0 or (self.verbosity == 1 and level < logging.INFO): return print(msg) else: self.logger.log(level, msg) def setup_test_environment(self, **kwargs): setup_test_environment(debug=self.debug_mode) unittest.installHandler() def setup_shuffler(self): if self.shuffle is False: return shuffler = Shuffler(seed=self.shuffle) self.log(f"Using shuffle seed: {shuffler.seed_display}") self._shuffler = shuffler @contextmanager def load_with_patterns(self): original_test_name_patterns = self.test_loader.testNamePatterns self.test_loader.testNamePatterns = self.test_name_patterns try: yield finally: # Restore the original patterns. self.test_loader.testNamePatterns = original_test_name_patterns def load_tests_for_label(self, label, discover_kwargs): label_as_path = os.path.abspath(label) tests = None # If a module, or "module.ClassName[.method_name]", just run those. if not os.path.exists(label_as_path): with self.load_with_patterns(): tests = self.test_loader.loadTestsFromName(label) if tests.countTestCases(): return tests # Try discovery if "label" is a package or directory. is_importable, is_package = try_importing(label) if is_importable: if not is_package: return tests elif not os.path.isdir(label_as_path): if os.path.exists(label_as_path): assert tests is None raise RuntimeError( f"One of the test labels is a path to a file: {label!r}, " f"which is not supported. Use a dotted module name or " f"path to a directory instead." ) return tests kwargs = discover_kwargs.copy() if os.path.isdir(label_as_path) and not self.top_level: kwargs["top_level_dir"] = find_top_level(label_as_path) with self.load_with_patterns(): tests = self.test_loader.discover(start_dir=label, **kwargs) # Make unittest forget the top-level dir it calculated from this run, # to support running tests from two different top-levels. self.test_loader._top_level_dir = None return tests def build_suite(self, test_labels=None, extra_tests=None, **kwargs): if extra_tests is not None: warnings.warn( "The extra_tests argument is deprecated.", RemovedInDjango50Warning, stacklevel=2, ) test_labels = test_labels or ["."] extra_tests = extra_tests or [] discover_kwargs = {} if self.pattern is not None: discover_kwargs["pattern"] = self.pattern if self.top_level is not None: discover_kwargs["top_level_dir"] = self.top_level self.setup_shuffler() all_tests = [] for label in test_labels: tests = self.load_tests_for_label(label, discover_kwargs) all_tests.extend(iter_test_cases(tests)) all_tests.extend(iter_test_cases(extra_tests)) if self.tags or self.exclude_tags: if self.tags: self.log( "Including test tag(s): %s." % ", ".join(sorted(self.tags)), level=logging.DEBUG, ) if self.exclude_tags: self.log( "Excluding test tag(s): %s." % ", ".join(sorted(self.exclude_tags)), level=logging.DEBUG, ) all_tests = filter_tests_by_tags(all_tests, self.tags, self.exclude_tags) # Put the failures detected at load time first for quicker feedback. # _FailedTest objects include things like test modules that couldn't be # found or that couldn't be loaded due to syntax errors. test_types = (unittest.loader._FailedTest, *self.reorder_by) all_tests = list( reorder_tests( all_tests, test_types, shuffler=self._shuffler, reverse=self.reverse, ) ) self.log("Found %d test(s)." % len(all_tests)) suite = self.test_suite(all_tests) if self.parallel > 1: subsuites = partition_suite_by_case(suite) # Since tests are distributed across processes on a per-TestCase # basis, there's no need for more processes than TestCases. processes = min(self.parallel, len(subsuites)) # Update also "parallel" because it's used to determine the number # of test databases. self.parallel = processes if processes > 1: suite = self.parallel_test_suite( subsuites, processes, self.failfast, self.debug_mode, self.buffer, ) return suite def setup_databases(self, **kwargs): return _setup_databases( self.verbosity, self.interactive, time_keeper=self.time_keeper, keepdb=self.keepdb, debug_sql=self.debug_sql, parallel=self.parallel, **kwargs, ) def get_resultclass(self): if self.debug_sql: return DebugSQLTextTestResult elif self.pdb: return PDBDebugResult def get_test_runner_kwargs(self): return { "failfast": self.failfast, "resultclass": self.get_resultclass(), "verbosity": self.verbosity, "buffer": self.buffer, } def run_checks(self, databases): # Checks are run after database creation since some checks require # database access. call_command("check", verbosity=self.verbosity, databases=databases) def run_suite(self, suite, **kwargs): kwargs = self.get_test_runner_kwargs() runner = self.test_runner(**kwargs) try: return runner.run(suite) finally: if self._shuffler is not None: seed_display = self._shuffler.seed_display self.log(f"Used shuffle seed: {seed_display}") def teardown_databases(self, old_config, **kwargs): """Destroy all the non-mirror databases.""" _teardown_databases( old_config, verbosity=self.verbosity, parallel=self.parallel, keepdb=self.keepdb, ) def teardown_test_environment(self, **kwargs): unittest.removeHandler() teardown_test_environment() def suite_result(self, suite, result, **kwargs): return ( len(result.failures) + len(result.errors) + len(result.unexpectedSuccesses) ) def _get_databases(self, suite): databases = {} for test in iter_test_cases(suite): test_databases = getattr(test, "databases", None) if test_databases == "__all__": test_databases = connections if test_databases: serialized_rollback = getattr(test, "serialized_rollback", False) databases.update( (alias, serialized_rollback or databases.get(alias, False)) for alias in test_databases ) return databases def get_databases(self, suite): databases = self._get_databases(suite) unused_databases = [alias for alias in connections if alias not in databases] if unused_databases: self.log( "Skipping setup of unused database(s): %s." % ", ".join(sorted(unused_databases)), level=logging.DEBUG, ) return databases def run_tests(self, test_labels, extra_tests=None, **kwargs): """ Run the unit tests for all the test labels in the provided list. Test labels should be dotted Python paths to test modules, test classes, or test methods. Return the number of tests that failed. """ if extra_tests is not None: warnings.warn( "The extra_tests argument is deprecated.", RemovedInDjango50Warning, stacklevel=2, ) self.setup_test_environment() suite = self.build_suite(test_labels, extra_tests) databases = self.get_databases(suite) suite.serialized_aliases = set( alias for alias, serialize in databases.items() if serialize ) with self.time_keeper.timed("Total database setup"): old_config = self.setup_databases( aliases=databases, serialized_aliases=suite.serialized_aliases, ) run_failed = False try: self.run_checks(databases) result = self.run_suite(suite) except Exception: run_failed = True raise finally: try: with self.time_keeper.timed("Total database teardown"): self.teardown_databases(old_config) self.teardown_test_environment() except Exception: # Silence teardown exceptions if an exception was raised during # runs to avoid shadowing it. if not run_failed: raise self.time_keeper.print_results() return self.suite_result(suite, result) def try_importing(label): """ Try importing a test label, and return (is_importable, is_package). Relative labels like "." and ".." are seen as directories. """ try: mod = import_module(label) except (ImportError, TypeError): return (False, False) return (True, hasattr(mod, "__path__")) def find_top_level(top_level): # Try to be a bit smarter than unittest about finding the default top-level # for a given directory path, to avoid breaking relative imports. # (Unittest's default is to set top-level equal to the path, which means # relative imports will result in "Attempted relative import in # non-package."). # We'd be happy to skip this and require dotted module paths (which don't # cause this problem) instead of file paths (which do), but in the case of # a directory in the cwd, which would be equally valid if considered as a # top-level module or as a directory path, unittest unfortunately prefers # the latter. while True: init_py = os.path.join(top_level, "__init__.py") if not os.path.exists(init_py): break try_next = os.path.dirname(top_level) if try_next == top_level: # __init__.py all the way down? give up. break top_level = try_next return top_level def _class_shuffle_key(cls): return f"{cls.__module__}.{cls.__qualname__}" def shuffle_tests(tests, shuffler): """ Return an iterator over the given tests in a shuffled order, keeping tests next to other tests of their class. `tests` should be an iterable of tests. """ tests_by_type = {} for _, class_tests in itertools.groupby(tests, type): class_tests = list(class_tests) test_type = type(class_tests[0]) class_tests = shuffler.shuffle(class_tests, key=lambda test: test.id()) tests_by_type[test_type] = class_tests classes = shuffler.shuffle(tests_by_type, key=_class_shuffle_key) return itertools.chain(*(tests_by_type[cls] for cls in classes)) def reorder_test_bin(tests, shuffler=None, reverse=False): """ Return an iterator that reorders the given tests, keeping tests next to other tests of their class. `tests` should be an iterable of tests that supports reversed(). """ if shuffler is None: if reverse: return reversed(tests) # The function must return an iterator. return iter(tests) tests = shuffle_tests(tests, shuffler) if not reverse: return tests # Arguments to reversed() must be reversible. return reversed(list(tests)) def reorder_tests(tests, classes, reverse=False, shuffler=None): """ Reorder an iterable of tests, grouping by the given TestCase classes. This function also removes any duplicates and reorders so that tests of the same type are consecutive. The result is returned as an iterator. `classes` is a sequence of types. Tests that are instances of `classes[0]` are grouped first, followed by instances of `classes[1]`, etc. Tests that are not instances of any of the classes are grouped last. If `reverse` is True, the tests within each `classes` group are reversed, but without reversing the order of `classes` itself. The `shuffler` argument is an optional instance of this module's `Shuffler` class. If provided, tests will be shuffled within each `classes` group, but keeping tests with other tests of their TestCase class. Reversing is applied after shuffling to allow reversing the same random order. """ # Each bin maps TestCase class to OrderedSet of tests. This permits tests # to be grouped by TestCase class even if provided non-consecutively. bins = [defaultdict(OrderedSet) for i in range(len(classes) + 1)] *class_bins, last_bin = bins for test in tests: for test_bin, test_class in zip(class_bins, classes): if isinstance(test, test_class): break else: test_bin = last_bin test_bin[type(test)].add(test) for test_bin in bins: # Call list() since reorder_test_bin()'s input must support reversed(). tests = list(itertools.chain.from_iterable(test_bin.values())) yield from reorder_test_bin(tests, shuffler=shuffler, reverse=reverse) def partition_suite_by_case(suite): """Partition a test suite by test case, preserving the order of tests.""" suite_class = type(suite) all_tests = iter_test_cases(suite) return [suite_class(tests) for _, tests in itertools.groupby(all_tests, type)] def test_match_tags(test, tags, exclude_tags): if isinstance(test, unittest.loader._FailedTest): # Tests that couldn't load always match to prevent tests from falsely # passing due e.g. to syntax errors. return True test_tags = set(getattr(test, "tags", [])) test_fn_name = getattr(test, "_testMethodName", str(test)) if hasattr(test, test_fn_name): test_fn = getattr(test, test_fn_name) test_fn_tags = list(getattr(test_fn, "tags", [])) test_tags = test_tags.union(test_fn_tags) if tags and test_tags.isdisjoint(tags): return False return test_tags.isdisjoint(exclude_tags) def filter_tests_by_tags(tests, tags, exclude_tags): """Return the matching tests as an iterator.""" return (test for test in tests if test_match_tags(test, tags, exclude_tags))
7789ef1c1b3c078e30320b8eb1c4d15f57a252cd7f119a9892ef09a22ccc5386
import asyncio import difflib import inspect import json import logging import posixpath import sys import threading import unittest import warnings from collections import Counter from contextlib import contextmanager from copy import copy, deepcopy from difflib import get_close_matches from functools import wraps from unittest.suite import _DebugResult from unittest.util import safe_repr from urllib.parse import ( parse_qsl, unquote, urlencode, urljoin, urlparse, urlsplit, urlunparse, ) from urllib.request import url2pathname from asgiref.sync import async_to_sync from django.apps import apps from django.conf import settings from django.core import mail from django.core.exceptions import ImproperlyConfigured, ValidationError from django.core.files import locks from django.core.handlers.wsgi import WSGIHandler, get_path_info from django.core.management import call_command from django.core.management.color import no_style from django.core.management.sql import emit_post_migrate_signal from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler from django.core.signals import setting_changed from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction from django.forms.fields import CharField from django.http import QueryDict from django.http.request import split_domain_port, validate_host from django.http.response import HttpResponseBase from django.test.client import AsyncClient, Client from django.test.html import HTMLParseError, parse_html from django.test.signals import template_rendered from django.test.utils import ( CaptureQueriesContext, ContextList, compare_xml, modify_settings, override_settings, ) from django.utils.deprecation import RemovedInDjango50Warning, RemovedInDjango51Warning from django.utils.functional import classproperty from django.utils.version import PY310 from django.views.static import serve logger = logging.getLogger("django.test") __all__ = ( "TestCase", "TransactionTestCase", "SimpleTestCase", "skipIfDBFeature", "skipUnlessDBFeature", ) def to_list(value): """Put value into a list if it's not already one.""" if not isinstance(value, list): value = [value] return value def assert_and_parse_html(self, html, user_msg, msg): try: dom = parse_html(html) except HTMLParseError as e: standardMsg = "%s\n%s" % (msg, e) self.fail(self._formatMessage(user_msg, standardMsg)) return dom class _AssertNumQueriesContext(CaptureQueriesContext): def __init__(self, test_case, num, connection): self.test_case = test_case self.num = num super().__init__(connection) def __exit__(self, exc_type, exc_value, traceback): super().__exit__(exc_type, exc_value, traceback) if exc_type is not None: return executed = len(self) self.test_case.assertEqual( executed, self.num, "%d queries executed, %d expected\nCaptured queries were:\n%s" % ( executed, self.num, "\n".join( "%d. %s" % (i, query["sql"]) for i, query in enumerate(self.captured_queries, start=1) ), ), ) class _AssertTemplateUsedContext: def __init__(self, test_case, template_name, msg_prefix="", count=None): self.test_case = test_case self.template_name = template_name self.msg_prefix = msg_prefix self.count = count self.rendered_templates = [] self.rendered_template_names = [] self.context = ContextList() def on_template_render(self, sender, signal, template, context, **kwargs): self.rendered_templates.append(template) self.rendered_template_names.append(template.name) self.context.append(copy(context)) def test(self): self.test_case._assert_template_used( self.template_name, self.rendered_template_names, self.msg_prefix, self.count, ) def __enter__(self): template_rendered.connect(self.on_template_render) return self def __exit__(self, exc_type, exc_value, traceback): template_rendered.disconnect(self.on_template_render) if exc_type is not None: return self.test() class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext): def test(self): self.test_case.assertFalse( self.template_name in self.rendered_template_names, f"{self.msg_prefix}Template '{self.template_name}' was used " f"unexpectedly in rendering the response", ) class DatabaseOperationForbidden(AssertionError): pass class _DatabaseFailure: def __init__(self, wrapped, message): self.wrapped = wrapped self.message = message def __call__(self): raise DatabaseOperationForbidden(self.message) # RemovedInDjango50Warning class _AssertFormErrorDeprecationHelper: @staticmethod def assertFormError(self, response, form, field, errors, msg_prefix=""): """ Search through all the rendered contexts of the `response` for a form named `form` then dispatch to the new assertFormError() using that instance. If multiple contexts contain the form, they're all checked in order and any failure will abort (this matches the old behavior). """ warning_msg = ( f"Passing response to assertFormError() is deprecated. Use the form object " f"directly: assertFormError(response.context[{form!r}], {field!r}, ...)" ) warnings.warn(warning_msg, RemovedInDjango50Warning, stacklevel=2) full_msg_prefix = f"{msg_prefix}: " if msg_prefix else "" contexts = to_list(response.context) if response.context is not None else [] if not contexts: self.fail( f"{full_msg_prefix}Response did not use any contexts to render the " f"response" ) # Search all contexts for the error. found_form = False for i, context in enumerate(contexts): if form not in context: continue found_form = True self.assertFormError(context[form], field, errors, msg_prefix=msg_prefix) if not found_form: self.fail( f"{full_msg_prefix}The form '{form}' was not used to render the " f"response" ) @staticmethod def assertFormSetError( self, response, formset, form_index, field, errors, msg_prefix="" ): """ Search for a formset named "formset" in the "response" and dispatch to the new assertFormSetError() using that instance. If the name is found in multiple contexts they're all checked in order and any failure will abort the test. """ warning_msg = ( f"Passing response to assertFormSetError() is deprecated. Use the formset " f"object directly: assertFormSetError(response.context[{formset!r}], " f"{form_index!r}, ...)" ) warnings.warn(warning_msg, RemovedInDjango50Warning, stacklevel=2) full_msg_prefix = f"{msg_prefix}: " if msg_prefix else "" contexts = to_list(response.context) if response.context is not None else [] if not contexts: self.fail( f"{full_msg_prefix}Response did not use any contexts to render the " f"response" ) found_formset = False for i, context in enumerate(contexts): if formset not in context or not hasattr(context[formset], "forms"): continue found_formset = True self.assertFormSetError( context[formset], form_index, field, errors, msg_prefix ) if not found_formset: self.fail( f"{full_msg_prefix}The formset '{formset}' was not used to render the " f"response" ) @classmethod def patch_signature(cls, new_method): """ Replace the decorated method with a new one that inspects the passed args/kwargs and dispatch to the old implementation (with deprecation warning) when it detects the old signature. """ @wraps(new_method) def patched_method(self, *args, **kwargs): old_method = getattr(cls, new_method.__name__) old_signature = inspect.signature(old_method) try: old_bound_args = old_signature.bind(self, *args, **kwargs) except TypeError: # If old signature doesn't match then either: # 1) new signature will match # 2) or a TypeError will be raised showing the user information # about the new signature. return new_method(self, *args, **kwargs) new_signature = inspect.signature(new_method) try: new_bound_args = new_signature.bind(self, *args, **kwargs) except TypeError: # Old signature matches but not the new one (because of # previous try/except). return old_method(self, *args, **kwargs) # If both signatures match, decide on which method to call by # inspecting the first arg (arg[0] = self). assert old_bound_args.args[1] == new_bound_args.args[1] if hasattr( old_bound_args.args[1], "context" ): # Looks like a response object => old method. return old_method(self, *args, **kwargs) elif isinstance(old_bound_args.args[1], HttpResponseBase): raise ValueError( f"{old_method.__name__}() is only usable on responses fetched " f"using the Django test Client." ) else: return new_method(self, *args, **kwargs) return patched_method class SimpleTestCase(unittest.TestCase): # The class we'll use for the test client self.client. # Can be overridden in derived classes. client_class = Client async_client_class = AsyncClient _overridden_settings = None _modified_settings = None databases = set() _disallowed_database_msg = ( "Database %(operation)s to %(alias)r are not allowed in SimpleTestCase " "subclasses. Either subclass TestCase or TransactionTestCase to ensure " "proper test isolation or add %(alias)r to %(test)s.databases to silence " "this failure." ) _disallowed_connection_methods = [ ("connect", "connections"), ("temporary_connection", "connections"), ("cursor", "queries"), ("chunked_cursor", "queries"), ] @classmethod def setUpClass(cls): super().setUpClass() if cls._overridden_settings: cls._cls_overridden_context = override_settings(**cls._overridden_settings) cls._cls_overridden_context.enable() cls.addClassCleanup(cls._cls_overridden_context.disable) if cls._modified_settings: cls._cls_modified_context = modify_settings(cls._modified_settings) cls._cls_modified_context.enable() cls.addClassCleanup(cls._cls_modified_context.disable) cls._add_databases_failures() cls.addClassCleanup(cls._remove_databases_failures) @classmethod def _validate_databases(cls): if cls.databases == "__all__": return frozenset(connections) for alias in cls.databases: if alias not in connections: message = ( "%s.%s.databases refers to %r which is not defined in " "settings.DATABASES." % ( cls.__module__, cls.__qualname__, alias, ) ) close_matches = get_close_matches(alias, list(connections)) if close_matches: message += " Did you mean %r?" % close_matches[0] raise ImproperlyConfigured(message) return frozenset(cls.databases) @classmethod def _add_databases_failures(cls): cls.databases = cls._validate_databases() for alias in connections: if alias in cls.databases: continue connection = connections[alias] for name, operation in cls._disallowed_connection_methods: message = cls._disallowed_database_msg % { "test": "%s.%s" % (cls.__module__, cls.__qualname__), "alias": alias, "operation": operation, } method = getattr(connection, name) setattr(connection, name, _DatabaseFailure(method, message)) @classmethod def _remove_databases_failures(cls): for alias in connections: if alias in cls.databases: continue connection = connections[alias] for name, _ in cls._disallowed_connection_methods: method = getattr(connection, name) setattr(connection, name, method.wrapped) def __call__(self, result=None): """ Wrapper around default __call__ method to perform common Django test set up. This means that user-defined Test Cases aren't required to include a call to super().setUp(). """ self._setup_and_call(result) def debug(self): """Perform the same as __call__(), without catching the exception.""" debug_result = _DebugResult() self._setup_and_call(debug_result, debug=True) def _setup_and_call(self, result, debug=False): """ Perform the following in order: pre-setup, run test, post-teardown, skipping pre/post hooks if test is set to be skipped. If debug=True, reraise any errors in setup and use super().debug() instead of __call__() to run the test. """ testMethod = getattr(self, self._testMethodName) skipped = getattr(self.__class__, "__unittest_skip__", False) or getattr( testMethod, "__unittest_skip__", False ) # Convert async test methods. if asyncio.iscoroutinefunction(testMethod): setattr(self, self._testMethodName, async_to_sync(testMethod)) if not skipped: try: self._pre_setup() except Exception: if debug: raise result.addError(self, sys.exc_info()) return if debug: super().debug() else: super().__call__(result) if not skipped: try: self._post_teardown() except Exception: if debug: raise result.addError(self, sys.exc_info()) return def _pre_setup(self): """ Perform pre-test setup: * Create a test client. * Clear the mail test outbox. """ self.client = self.client_class() self.async_client = self.async_client_class() mail.outbox = [] def _post_teardown(self): """Perform post-test things.""" pass def settings(self, **kwargs): """ A context manager that temporarily sets a setting and reverts to the original value when exiting the context. """ return override_settings(**kwargs) def modify_settings(self, **kwargs): """ A context manager that temporarily applies changes a list setting and reverts back to the original value when exiting the context. """ return modify_settings(**kwargs) def assertRedirects( self, response, expected_url, status_code=302, target_status_code=200, msg_prefix="", fetch_redirect_response=True, ): """ Assert that a response redirected to a specific URL and that the redirect URL can be loaded. Won't work for external links since it uses the test client to do a request (use fetch_redirect_response=False to check such links without fetching them). """ if msg_prefix: msg_prefix += ": " if hasattr(response, "redirect_chain"): # The request was a followed redirect self.assertTrue( response.redirect_chain, msg_prefix + ( "Response didn't redirect as expected: Response code was %d " "(expected %d)" ) % (response.status_code, status_code), ) self.assertEqual( response.redirect_chain[0][1], status_code, msg_prefix + ( "Initial response didn't redirect as expected: Response code was " "%d (expected %d)" ) % (response.redirect_chain[0][1], status_code), ) url, status_code = response.redirect_chain[-1] self.assertEqual( response.status_code, target_status_code, msg_prefix + ( "Response didn't redirect as expected: Final Response code was %d " "(expected %d)" ) % (response.status_code, target_status_code), ) else: # Not a followed redirect self.assertEqual( response.status_code, status_code, msg_prefix + ( "Response didn't redirect as expected: Response code was %d " "(expected %d)" ) % (response.status_code, status_code), ) url = response.url scheme, netloc, path, query, fragment = urlsplit(url) # Prepend the request path to handle relative path redirects. if not path.startswith("/"): url = urljoin(response.request["PATH_INFO"], url) path = urljoin(response.request["PATH_INFO"], path) if fetch_redirect_response: # netloc might be empty, or in cases where Django tests the # HTTP scheme, the convention is for netloc to be 'testserver'. # Trust both as "internal" URLs here. domain, port = split_domain_port(netloc) if domain and not validate_host(domain, settings.ALLOWED_HOSTS): raise ValueError( "The test client is unable to fetch remote URLs (got %s). " "If the host is served by Django, add '%s' to ALLOWED_HOSTS. " "Otherwise, use " "assertRedirects(..., fetch_redirect_response=False)." % (url, domain) ) # Get the redirection page, using the same client that was used # to obtain the original response. extra = response.client.extra or {} redirect_response = response.client.get( path, QueryDict(query), secure=(scheme == "https"), **extra, ) self.assertEqual( redirect_response.status_code, target_status_code, msg_prefix + ( "Couldn't retrieve redirection page '%s': response code was %d " "(expected %d)" ) % (path, redirect_response.status_code, target_status_code), ) self.assertURLEqual( url, expected_url, msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url), ) def assertURLEqual(self, url1, url2, msg_prefix=""): """ Assert that two URLs are the same, ignoring the order of query string parameters except for parameters with the same name. For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but /path/?a=1&a=2 isn't equal to /path/?a=2&a=1. """ def normalize(url): """Sort the URL's query string parameters.""" url = str(url) # Coerce reverse_lazy() URLs. scheme, netloc, path, params, query, fragment = urlparse(url) query_parts = sorted(parse_qsl(query)) return urlunparse( (scheme, netloc, path, params, urlencode(query_parts), fragment) ) self.assertEqual( normalize(url1), normalize(url2), msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2), ) def _assert_contains(self, response, text, status_code, msg_prefix, html): # If the response supports deferred rendering and hasn't been rendered # yet, then ensure that it does get rendered before proceeding further. if ( hasattr(response, "render") and callable(response.render) and not response.is_rendered ): response.render() if msg_prefix: msg_prefix += ": " self.assertEqual( response.status_code, status_code, msg_prefix + "Couldn't retrieve content: Response code was %d" " (expected %d)" % (response.status_code, status_code), ) if response.streaming: content = b"".join(response.streaming_content) else: content = response.content if not isinstance(text, bytes) or html: text = str(text) content = content.decode(response.charset) text_repr = "'%s'" % text else: text_repr = repr(text) if html: content = assert_and_parse_html( self, content, None, "Response's content is not valid HTML:" ) text = assert_and_parse_html( self, text, None, "Second argument is not valid HTML:" ) real_count = content.count(text) return (text_repr, real_count, msg_prefix) def assertContains( self, response, text, count=None, status_code=200, msg_prefix="", html=False ): """ Assert that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected) and that ``text`` occurs ``count`` times in the content of the response. If ``count`` is None, the count doesn't matter - the assertion is true if the text occurs at least once in the response. """ text_repr, real_count, msg_prefix = self._assert_contains( response, text, status_code, msg_prefix, html ) if count is not None: self.assertEqual( real_count, count, msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count), ) else: self.assertTrue( real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr ) def assertNotContains( self, response, text, status_code=200, msg_prefix="", html=False ): """ Assert that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected) and that ``text`` doesn't occur in the content of the response. """ text_repr, real_count, msg_prefix = self._assert_contains( response, text, status_code, msg_prefix, html ) self.assertEqual( real_count, 0, msg_prefix + "Response should not contain %s" % text_repr ) def _check_test_client_response(self, response, attribute, method_name): """ Raise a ValueError if the given response doesn't have the required attribute. """ if not hasattr(response, attribute): raise ValueError( f"{method_name}() is only usable on responses fetched using " "the Django test Client." ) def _assert_form_error(self, form, field, errors, msg_prefix, form_repr): if not form.is_bound: self.fail( f"{msg_prefix}The {form_repr} is not bound, it will never have any " f"errors." ) if field is not None and field not in form.fields: self.fail( f"{msg_prefix}The {form_repr} does not contain the field {field!r}." ) if field is None: field_errors = form.non_field_errors() failure_message = f"The non-field errors of {form_repr} don't match." else: field_errors = form.errors.get(field, []) failure_message = ( f"The errors of field {field!r} on {form_repr} don't match." ) self.assertEqual(field_errors, errors, msg_prefix + failure_message) # RemovedInDjango50Warning: When the deprecation ends, remove the # decorator. @_AssertFormErrorDeprecationHelper.patch_signature def assertFormError(self, form, field, errors, msg_prefix=""): """ Assert that a field named "field" on the given form object has specific errors. errors can be either a single error message or a list of errors messages. Using errors=[] test that the field has no errors. You can pass field=None to check the form's non-field errors. """ if errors is None: warnings.warn( "Passing errors=None to assertFormError() is deprecated, use " "errors=[] instead.", RemovedInDjango50Warning, stacklevel=2, ) errors = [] if msg_prefix: msg_prefix += ": " errors = to_list(errors) self._assert_form_error(form, field, errors, msg_prefix, f"form {form!r}") # RemovedInDjango51Warning. def assertFormsetError(self, *args, **kw): warnings.warn( "assertFormsetError() is deprecated in favor of assertFormSetError().", category=RemovedInDjango51Warning, stacklevel=2, ) return self.assertFormSetError(*args, **kw) # RemovedInDjango50Warning: When the deprecation ends, remove the # decorator. @_AssertFormErrorDeprecationHelper.patch_signature def assertFormSetError(self, formset, form_index, field, errors, msg_prefix=""): """ Similar to assertFormError() but for formsets. Use form_index=None to check the formset's non-form errors (in that case, you must also use field=None). Otherwise use an integer to check the formset's n-th form for errors. Other parameters are the same as assertFormError(). """ if errors is None: warnings.warn( "Passing errors=None to assertFormSetError() is deprecated, " "use errors=[] instead.", RemovedInDjango50Warning, stacklevel=2, ) errors = [] if form_index is None and field is not None: raise ValueError("You must use field=None with form_index=None.") if msg_prefix: msg_prefix += ": " errors = to_list(errors) if not formset.is_bound: self.fail( f"{msg_prefix}The formset {formset!r} is not bound, it will never have " f"any errors." ) if form_index is not None and form_index >= formset.total_form_count(): form_count = formset.total_form_count() form_or_forms = "forms" if form_count > 1 else "form" self.fail( f"{msg_prefix}The formset {formset!r} only has {form_count} " f"{form_or_forms}." ) if form_index is not None: form_repr = f"form {form_index} of formset {formset!r}" self._assert_form_error( formset.forms[form_index], field, errors, msg_prefix, form_repr ) else: failure_message = f"The non-form errors of formset {formset!r} don't match." self.assertEqual( formset.non_form_errors(), errors, msg_prefix + failure_message ) def _get_template_used(self, response, template_name, msg_prefix, method_name): if response is None and template_name is None: raise TypeError("response and/or template_name argument must be provided") if msg_prefix: msg_prefix += ": " if template_name is not None and response is not None: self._check_test_client_response(response, "templates", method_name) if not hasattr(response, "templates") or (response is None and template_name): if response: template_name = response response = None # use this template with context manager return template_name, None, msg_prefix template_names = [t.name for t in response.templates if t.name is not None] return None, template_names, msg_prefix def _assert_template_used(self, template_name, template_names, msg_prefix, count): if not template_names: self.fail(msg_prefix + "No templates used to render the response") self.assertTrue( template_name in template_names, msg_prefix + "Template '%s' was not a template used to render" " the response. Actual template(s) used: %s" % (template_name, ", ".join(template_names)), ) if count is not None: self.assertEqual( template_names.count(template_name), count, msg_prefix + "Template '%s' was expected to be rendered %d " "time(s) but was actually rendered %d time(s)." % (template_name, count, template_names.count(template_name)), ) def assertTemplateUsed( self, response=None, template_name=None, msg_prefix="", count=None ): """ Assert that the template with the provided name was used in rendering the response. Also usable as context manager. """ context_mgr_template, template_names, msg_prefix = self._get_template_used( response, template_name, msg_prefix, "assertTemplateUsed", ) if context_mgr_template: # Use assertTemplateUsed as context manager. return _AssertTemplateUsedContext( self, context_mgr_template, msg_prefix, count ) self._assert_template_used(template_name, template_names, msg_prefix, count) def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=""): """ Assert that the template with the provided name was NOT used in rendering the response. Also usable as context manager. """ context_mgr_template, template_names, msg_prefix = self._get_template_used( response, template_name, msg_prefix, "assertTemplateNotUsed", ) if context_mgr_template: # Use assertTemplateNotUsed as context manager. return _AssertTemplateNotUsedContext(self, context_mgr_template, msg_prefix) self.assertFalse( template_name in template_names, msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name, ) @contextmanager def _assert_raises_or_warns_cm( self, func, cm_attr, expected_exception, expected_message ): with func(expected_exception) as cm: yield cm self.assertIn(expected_message, str(getattr(cm, cm_attr))) def _assertFooMessage( self, func, cm_attr, expected_exception, expected_message, *args, **kwargs ): callable_obj = None if args: callable_obj, *args = args cm = self._assert_raises_or_warns_cm( func, cm_attr, expected_exception, expected_message ) # Assertion used in context manager fashion. if callable_obj is None: return cm # Assertion was passed a callable. with cm: callable_obj(*args, **kwargs) def assertRaisesMessage( self, expected_exception, expected_message, *args, **kwargs ): """ Assert that expected_message is found in the message of a raised exception. Args: expected_exception: Exception class expected to be raised. expected_message: expected error message string value. args: Function to be called and extra positional args. kwargs: Extra kwargs. """ return self._assertFooMessage( self.assertRaises, "exception", expected_exception, expected_message, *args, **kwargs, ) def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs): """ Same as assertRaisesMessage but for assertWarns() instead of assertRaises(). """ return self._assertFooMessage( self.assertWarns, "warning", expected_warning, expected_message, *args, **kwargs, ) # A similar method is available in Python 3.10+. if not PY310: @contextmanager def assertNoLogs(self, logger, level=None): """ Assert no messages are logged on the logger, with at least the given level. """ if isinstance(level, int): level = logging.getLevelName(level) elif level is None: level = "INFO" try: with self.assertLogs(logger, level) as cm: yield except AssertionError as e: msg = e.args[0] expected_msg = ( f"no logs of level {level} or higher triggered on {logger}" ) if msg != expected_msg: raise e else: self.fail(f"Unexpected logs found: {cm.output!r}") def assertFieldOutput( self, fieldclass, valid, invalid, field_args=None, field_kwargs=None, empty_value="", ): """ Assert that a form field behaves correctly with various inputs. Args: fieldclass: the class of the field to be tested. valid: a dictionary mapping valid inputs to their expected cleaned values. invalid: a dictionary mapping invalid inputs to one or more raised error messages. field_args: the args passed to instantiate the field field_kwargs: the kwargs passed to instantiate the field empty_value: the expected clean output for inputs in empty_values """ if field_args is None: field_args = [] if field_kwargs is None: field_kwargs = {} required = fieldclass(*field_args, **field_kwargs) optional = fieldclass(*field_args, **{**field_kwargs, "required": False}) # test valid inputs for input, output in valid.items(): self.assertEqual(required.clean(input), output) self.assertEqual(optional.clean(input), output) # test invalid inputs for input, errors in invalid.items(): with self.assertRaises(ValidationError) as context_manager: required.clean(input) self.assertEqual(context_manager.exception.messages, errors) with self.assertRaises(ValidationError) as context_manager: optional.clean(input) self.assertEqual(context_manager.exception.messages, errors) # test required inputs error_required = [required.error_messages["required"]] for e in required.empty_values: with self.assertRaises(ValidationError) as context_manager: required.clean(e) self.assertEqual(context_manager.exception.messages, error_required) self.assertEqual(optional.clean(e), empty_value) # test that max_length and min_length are always accepted if issubclass(fieldclass, CharField): field_kwargs.update({"min_length": 2, "max_length": 20}) self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass) def assertHTMLEqual(self, html1, html2, msg=None): """ Assert that two HTML snippets are semantically the same. Whitespace in most cases is ignored, and attribute ordering is not significant. The arguments must be valid HTML. """ dom1 = assert_and_parse_html( self, html1, msg, "First argument is not valid HTML:" ) dom2 = assert_and_parse_html( self, html2, msg, "Second argument is not valid HTML:" ) if dom1 != dom2: standardMsg = "%s != %s" % (safe_repr(dom1, True), safe_repr(dom2, True)) diff = "\n" + "\n".join( difflib.ndiff( str(dom1).splitlines(), str(dom2).splitlines(), ) ) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertHTMLNotEqual(self, html1, html2, msg=None): """Assert that two HTML snippets are not semantically equivalent.""" dom1 = assert_and_parse_html( self, html1, msg, "First argument is not valid HTML:" ) dom2 = assert_and_parse_html( self, html2, msg, "Second argument is not valid HTML:" ) if dom1 == dom2: standardMsg = "%s == %s" % (safe_repr(dom1, True), safe_repr(dom2, True)) self.fail(self._formatMessage(msg, standardMsg)) def assertInHTML(self, needle, haystack, count=None, msg_prefix=""): needle = assert_and_parse_html( self, needle, None, "First argument is not valid HTML:" ) haystack = assert_and_parse_html( self, haystack, None, "Second argument is not valid HTML:" ) real_count = haystack.count(needle) if count is not None: self.assertEqual( real_count, count, msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count), ) else: self.assertTrue( real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle ) def assertJSONEqual(self, raw, expected_data, msg=None): """ Assert that the JSON fragments raw and expected_data are equal. Usual JSON non-significant whitespace rules apply as the heavyweight is delegated to the json library. """ try: data = json.loads(raw) except json.JSONDecodeError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, str): try: expected_data = json.loads(expected_data) except ValueError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertEqual(data, expected_data, msg=msg) def assertJSONNotEqual(self, raw, expected_data, msg=None): """ Assert that the JSON fragments raw and expected_data are not equal. Usual JSON non-significant whitespace rules apply as the heavyweight is delegated to the json library. """ try: data = json.loads(raw) except json.JSONDecodeError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, str): try: expected_data = json.loads(expected_data) except json.JSONDecodeError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertNotEqual(data, expected_data, msg=msg) def assertXMLEqual(self, xml1, xml2, msg=None): """ Assert that two XML snippets are semantically the same. Whitespace in most cases is ignored and attribute ordering is not significant. The arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = "First or second argument is not valid XML\n%s" % e self.fail(self._formatMessage(msg, standardMsg)) else: if not result: standardMsg = "%s != %s" % ( safe_repr(xml1, True), safe_repr(xml2, True), ) diff = "\n" + "\n".join( difflib.ndiff(xml1.splitlines(), xml2.splitlines()) ) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertXMLNotEqual(self, xml1, xml2, msg=None): """ Assert that two XML snippets are not semantically equivalent. Whitespace in most cases is ignored and attribute ordering is not significant. The arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = "First or second argument is not valid XML\n%s" % e self.fail(self._formatMessage(msg, standardMsg)) else: if result: standardMsg = "%s == %s" % ( safe_repr(xml1, True), safe_repr(xml2, True), ) self.fail(self._formatMessage(msg, standardMsg)) class TransactionTestCase(SimpleTestCase): # Subclasses can ask for resetting of auto increment sequence before each # test case reset_sequences = False # Subclasses can enable only a subset of apps for faster tests available_apps = None # Subclasses can define fixtures which will be automatically installed. fixtures = None databases = {DEFAULT_DB_ALIAS} _disallowed_database_msg = ( "Database %(operation)s to %(alias)r are not allowed in this test. " "Add %(alias)r to %(test)s.databases to ensure proper test isolation " "and silence this failure." ) # If transactions aren't available, Django will serialize the database # contents into a fixture during setup and flush and reload them # during teardown (as flush does not restore data from migrations). # This can be slow; this flag allows enabling on a per-case basis. serialized_rollback = False def _pre_setup(self): """ Perform pre-test setup: * If the class has an 'available_apps' attribute, restrict the app registry to these applications, then fire the post_migrate signal -- it must run with the correct set of applications for the test case. * If the class has a 'fixtures' attribute, install those fixtures. """ super()._pre_setup() if self.available_apps is not None: apps.set_available_apps(self.available_apps) setting_changed.send( sender=settings._wrapped.__class__, setting="INSTALLED_APPS", value=self.available_apps, enter=True, ) for db_name in self._databases_names(include_mirrors=False): emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name) try: self._fixture_setup() except Exception: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send( sender=settings._wrapped.__class__, setting="INSTALLED_APPS", value=settings.INSTALLED_APPS, enter=False, ) raise # Clear the queries_log so that it's less likely to overflow (a single # test probably won't execute 9K queries). If queries_log overflows, # then assertNumQueries() doesn't work. for db_name in self._databases_names(include_mirrors=False): connections[db_name].queries_log.clear() @classmethod def _databases_names(cls, include_mirrors=True): # Only consider allowed database aliases, including mirrors or not. return [ alias for alias in connections if alias in cls.databases and ( include_mirrors or not connections[alias].settings_dict["TEST"]["MIRROR"] ) ] def _reset_sequences(self, db_name): conn = connections[db_name] if conn.features.supports_sequence_reset: sql_list = conn.ops.sequence_reset_by_name_sql( no_style(), conn.introspection.sequence_list() ) if sql_list: with transaction.atomic(using=db_name): with conn.cursor() as cursor: for sql in sql_list: cursor.execute(sql) def _fixture_setup(self): for db_name in self._databases_names(include_mirrors=False): # Reset sequences if self.reset_sequences: self._reset_sequences(db_name) # Provide replica initial data from migrated apps, if needed. if self.serialized_rollback and hasattr( connections[db_name], "_test_serialized_contents" ): if self.available_apps is not None: apps.unset_available_apps() connections[db_name].creation.deserialize_db_from_string( connections[db_name]._test_serialized_contents ) if self.available_apps is not None: apps.set_available_apps(self.available_apps) if self.fixtures: # We have to use this slightly awkward syntax due to the fact # that we're using *args and **kwargs together. call_command( "loaddata", *self.fixtures, **{"verbosity": 0, "database": db_name} ) def _should_reload_connections(self): return True def _post_teardown(self): """ Perform post-test things: * Flush the contents of the database to leave a clean slate. If the class has an 'available_apps' attribute, don't fire post_migrate. * Force-close the connection so the next test gets a clean cursor. """ try: self._fixture_teardown() super()._post_teardown() if self._should_reload_connections(): # Some DB cursors include SQL statements as part of cursor # creation. If you have a test that does a rollback, the effect # of these statements is lost, which can affect the operation of # tests (e.g., losing a timezone setting causing objects to be # created with the wrong time). To make sure this doesn't # happen, get a clean connection at the start of every test. for conn in connections.all(initialized_only=True): conn.close() finally: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send( sender=settings._wrapped.__class__, setting="INSTALLED_APPS", value=settings.INSTALLED_APPS, enter=False, ) def _fixture_teardown(self): # Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal # when flushing only a subset of the apps for db_name in self._databases_names(include_mirrors=False): # Flush the database inhibit_post_migrate = ( self.available_apps is not None or ( # Inhibit the post_migrate signal when using serialized # rollback to avoid trying to recreate the serialized data. self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents") ) ) call_command( "flush", verbosity=0, interactive=False, database=db_name, reset_sequences=False, allow_cascade=self.available_apps is not None, inhibit_post_migrate=inhibit_post_migrate, ) # RemovedInDjango51Warning. def assertQuerysetEqual(self, *args, **kw): warnings.warn( "assertQuerysetEqual() is deprecated in favor of assertQuerySetEqual().", category=RemovedInDjango51Warning, stacklevel=2, ) return self.assertQuerySetEqual(*args, **kw) def assertQuerySetEqual(self, qs, values, transform=None, ordered=True, msg=None): values = list(values) items = qs if transform is not None: items = map(transform, items) if not ordered: return self.assertDictEqual(Counter(items), Counter(values), msg=msg) # For example qs.iterator() could be passed as qs, but it does not # have 'ordered' attribute. if len(values) > 1 and hasattr(qs, "ordered") and not qs.ordered: raise ValueError( "Trying to compare non-ordered queryset against more than one " "ordered value." ) return self.assertEqual(list(items), values, msg=msg) def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs): conn = connections[using] context = _AssertNumQueriesContext(self, num, conn) if func is None: return context with context: func(*args, **kwargs) def connections_support_transactions(aliases=None): """ Return whether or not all (or specified) connections support transactions. """ conns = ( connections.all() if aliases is None else (connections[alias] for alias in aliases) ) return all(conn.features.supports_transactions for conn in conns) class TestData: """ Descriptor to provide TestCase instance isolation for attributes assigned during the setUpTestData() phase. Allow safe alteration of objects assigned in setUpTestData() by test methods by exposing deep copies instead of the original objects. Objects are deep copied using a memo kept on the test case instance in order to maintain their original relationships. """ memo_attr = "_testdata_memo" def __init__(self, name, data): self.name = name self.data = data def get_memo(self, testcase): try: memo = getattr(testcase, self.memo_attr) except AttributeError: memo = {} setattr(testcase, self.memo_attr, memo) return memo def __get__(self, instance, owner): if instance is None: return self.data memo = self.get_memo(instance) data = deepcopy(self.data, memo) setattr(instance, self.name, data) return data def __repr__(self): return "<TestData: name=%r, data=%r>" % (self.name, self.data) class TestCase(TransactionTestCase): """ Similar to TransactionTestCase, but use `transaction.atomic()` to achieve test isolation. In most situations, TestCase should be preferred to TransactionTestCase as it allows faster execution. However, there are some situations where using TransactionTestCase might be necessary (e.g. testing some transactional behavior). On database backends with no transaction support, TestCase behaves as TransactionTestCase. """ @classmethod def _enter_atomics(cls): """Open atomic blocks for multiple databases.""" atomics = {} for db_name in cls._databases_names(): atomic = transaction.atomic(using=db_name) atomic._from_testcase = True atomic.__enter__() atomics[db_name] = atomic return atomics @classmethod def _rollback_atomics(cls, atomics): """Rollback atomic blocks opened by the previous method.""" for db_name in reversed(cls._databases_names()): transaction.set_rollback(True, using=db_name) atomics[db_name].__exit__(None, None, None) @classmethod def _databases_support_transactions(cls): return connections_support_transactions(cls.databases) @classmethod def setUpClass(cls): super().setUpClass() if not cls._databases_support_transactions(): return cls.cls_atomics = cls._enter_atomics() if cls.fixtures: for db_name in cls._databases_names(include_mirrors=False): try: call_command( "loaddata", *cls.fixtures, **{"verbosity": 0, "database": db_name}, ) except Exception: cls._rollback_atomics(cls.cls_atomics) raise pre_attrs = cls.__dict__.copy() try: cls.setUpTestData() except Exception: cls._rollback_atomics(cls.cls_atomics) raise for name, value in cls.__dict__.items(): if value is not pre_attrs.get(name): setattr(cls, name, TestData(name, value)) @classmethod def tearDownClass(cls): if cls._databases_support_transactions(): cls._rollback_atomics(cls.cls_atomics) for conn in connections.all(initialized_only=True): conn.close() super().tearDownClass() @classmethod def setUpTestData(cls): """Load initial data for the TestCase.""" pass def _should_reload_connections(self): if self._databases_support_transactions(): return False return super()._should_reload_connections() def _fixture_setup(self): if not self._databases_support_transactions(): # If the backend does not support transactions, we should reload # class data before each test self.setUpTestData() return super()._fixture_setup() if self.reset_sequences: raise TypeError("reset_sequences cannot be used on TestCase instances") self.atomics = self._enter_atomics() def _fixture_teardown(self): if not self._databases_support_transactions(): return super()._fixture_teardown() try: for db_name in reversed(self._databases_names()): if self._should_check_constraints(connections[db_name]): connections[db_name].check_constraints() finally: self._rollback_atomics(self.atomics) def _should_check_constraints(self, connection): return ( connection.features.can_defer_constraint_checks and not connection.needs_rollback and connection.is_usable() ) @classmethod @contextmanager def captureOnCommitCallbacks(cls, *, using=DEFAULT_DB_ALIAS, execute=False): """Context manager to capture transaction.on_commit() callbacks.""" callbacks = [] start_count = len(connections[using].run_on_commit) try: yield callbacks finally: while True: callback_count = len(connections[using].run_on_commit) for _, callback, robust in connections[using].run_on_commit[ start_count: ]: callbacks.append(callback) if execute: if robust: try: callback() except Exception as e: logger.error( f"Error calling {callback.__qualname__} in " f"on_commit() (%s).", e, exc_info=True, ) else: callback() if callback_count == len(connections[using].run_on_commit): break start_count = callback_count class CheckCondition: """Descriptor class for deferred condition checking.""" def __init__(self, *conditions): self.conditions = conditions def add_condition(self, condition, reason): return self.__class__(*self.conditions, (condition, reason)) def __get__(self, instance, cls=None): # Trigger access for all bases. if any(getattr(base, "__unittest_skip__", False) for base in cls.__bases__): return True for condition, reason in self.conditions: if condition(): # Override this descriptor's value and set the skip reason. cls.__unittest_skip__ = True cls.__unittest_skip_why__ = reason return True return False def _deferredSkip(condition, reason, name): def decorator(test_func): nonlocal condition if not ( isinstance(test_func, type) and issubclass(test_func, unittest.TestCase) ): @wraps(test_func) def skip_wrapper(*args, **kwargs): if ( args and isinstance(args[0], unittest.TestCase) and connection.alias not in getattr(args[0], "databases", {}) ): raise ValueError( "%s cannot be used on %s as %s doesn't allow queries " "against the %r database." % ( name, args[0], args[0].__class__.__qualname__, connection.alias, ) ) if condition(): raise unittest.SkipTest(reason) return test_func(*args, **kwargs) test_item = skip_wrapper else: # Assume a class is decorated test_item = test_func databases = getattr(test_item, "databases", None) if not databases or connection.alias not in databases: # Defer raising to allow importing test class's module. def condition(): raise ValueError( "%s cannot be used on %s as it doesn't allow queries " "against the '%s' database." % ( name, test_item, connection.alias, ) ) # Retrieve the possibly existing value from the class's dict to # avoid triggering the descriptor. skip = test_func.__dict__.get("__unittest_skip__") if isinstance(skip, CheckCondition): test_item.__unittest_skip__ = skip.add_condition(condition, reason) elif skip is not True: test_item.__unittest_skip__ = CheckCondition((condition, reason)) return test_item return decorator def skipIfDBFeature(*features): """Skip a test if a database has at least one of the named features.""" return _deferredSkip( lambda: any( getattr(connection.features, feature, False) for feature in features ), "Database has feature(s) %s" % ", ".join(features), "skipIfDBFeature", ) def skipUnlessDBFeature(*features): """Skip a test unless a database has all the named features.""" return _deferredSkip( lambda: not all( getattr(connection.features, feature, False) for feature in features ), "Database doesn't support feature(s): %s" % ", ".join(features), "skipUnlessDBFeature", ) def skipUnlessAnyDBFeature(*features): """Skip a test unless a database has any of the named features.""" return _deferredSkip( lambda: not any( getattr(connection.features, feature, False) for feature in features ), "Database doesn't support any of the feature(s): %s" % ", ".join(features), "skipUnlessAnyDBFeature", ) class QuietWSGIRequestHandler(WSGIRequestHandler): """ A WSGIRequestHandler that doesn't log to standard output any of the requests received, so as to not clutter the test result output. """ def log_message(*args): pass class FSFilesHandler(WSGIHandler): """ WSGI middleware that intercepts calls to a directory, as defined by one of the *_ROOT settings, and serves those files, publishing them under *_URL. """ def __init__(self, application): self.application = application self.base_url = urlparse(self.get_base_url()) super().__init__() def _should_handle(self, path): """ Check if the path should be handled. Ignore the path if: * the host is provided as part of the base_url * the request's path isn't under the media path (or equal) """ return path.startswith(self.base_url[2]) and not self.base_url[1] def file_path(self, url): """Return the relative path to the file on disk for the given URL.""" relative_url = url[len(self.base_url[2]) :] return url2pathname(relative_url) def get_response(self, request): from django.http import Http404 if self._should_handle(request.path): try: return self.serve(request) except Http404: pass return super().get_response(request) def serve(self, request): os_rel_path = self.file_path(request.path) os_rel_path = posixpath.normpath(unquote(os_rel_path)) # Emulate behavior of django.contrib.staticfiles.views.serve() when it # invokes staticfiles' finders functionality. # TODO: Modify if/when that internal API is refactored final_rel_path = os_rel_path.replace("\\", "/").lstrip("/") return serve(request, final_rel_path, document_root=self.get_base_dir()) def __call__(self, environ, start_response): if not self._should_handle(get_path_info(environ)): return self.application(environ, start_response) return super().__call__(environ, start_response) class _StaticFilesHandler(FSFilesHandler): """ Handler for serving static files. A private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.STATIC_ROOT def get_base_url(self): return settings.STATIC_URL class _MediaFilesHandler(FSFilesHandler): """ Handler for serving the media files. A private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.MEDIA_ROOT def get_base_url(self): return settings.MEDIA_URL class LiveServerThread(threading.Thread): """Thread for running a live HTTP server while the tests are running.""" server_class = ThreadedWSGIServer def __init__(self, host, static_handler, connections_override=None, port=0): self.host = host self.port = port self.is_ready = threading.Event() self.error = None self.static_handler = static_handler self.connections_override = connections_override super().__init__() def run(self): """ Set up the live server and databases, and then loop over handling HTTP requests. """ if self.connections_override: # Override this thread's database connections with the ones # provided by the main thread. for alias, conn in self.connections_override.items(): connections[alias] = conn try: # Create the handler for serving static and media files handler = self.static_handler(_MediaFilesHandler(WSGIHandler())) self.httpd = self._create_server() # If binding to port zero, assign the port allocated by the OS. if self.port == 0: self.port = self.httpd.server_address[1] self.httpd.set_app(handler) self.is_ready.set() self.httpd.serve_forever() except Exception as e: self.error = e self.is_ready.set() finally: connections.close_all() def _create_server(self, connections_override=None): return self.server_class( (self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False, connections_override=connections_override, ) def terminate(self): if hasattr(self, "httpd"): # Stop the WSGI server self.httpd.shutdown() self.httpd.server_close() self.join() class LiveServerTestCase(TransactionTestCase): """ Do basically the same as TransactionTestCase but also launch a live HTTP server in a separate thread so that the tests may use another testing framework, such as Selenium for example, instead of the built-in dummy client. It inherits from TransactionTestCase instead of TestCase because the threads don't share the same transactions (unless if using in-memory sqlite) and each thread needs to commit all their transactions so that the other thread can see the changes. """ host = "localhost" port = 0 server_thread_class = LiveServerThread static_handler = _StaticFilesHandler @classproperty def live_server_url(cls): return "http://%s:%s" % (cls.host, cls.server_thread.port) @classproperty def allowed_host(cls): return cls.host @classmethod def _make_connections_override(cls): connections_override = {} for conn in connections.all(): # If using in-memory sqlite databases, pass the connections to # the server thread. if conn.vendor == "sqlite" and conn.is_in_memory_db(): connections_override[conn.alias] = conn return connections_override @classmethod def setUpClass(cls): super().setUpClass() cls._live_server_modified_settings = modify_settings( ALLOWED_HOSTS={"append": cls.allowed_host}, ) cls._live_server_modified_settings.enable() cls.addClassCleanup(cls._live_server_modified_settings.disable) cls._start_server_thread() @classmethod def _start_server_thread(cls): connections_override = cls._make_connections_override() for conn in connections_override.values(): # Explicitly enable thread-shareability for this connection. conn.inc_thread_sharing() cls.server_thread = cls._create_server_thread(connections_override) cls.server_thread.daemon = True cls.server_thread.start() cls.addClassCleanup(cls._terminate_thread) # Wait for the live server to be ready cls.server_thread.is_ready.wait() if cls.server_thread.error: raise cls.server_thread.error @classmethod def _create_server_thread(cls, connections_override): return cls.server_thread_class( cls.host, cls.static_handler, connections_override=connections_override, port=cls.port, ) @classmethod def _terminate_thread(cls): # Terminate the live server's thread. cls.server_thread.terminate() # Restore shared connections' non-shareability. for conn in cls.server_thread.connections_override.values(): conn.dec_thread_sharing() class SerializeMixin: """ Enforce serialization of TestCases that share a common resource. Define a common 'lockfile' for each set of TestCases to serialize. This file must exist on the filesystem. Place it early in the MRO in order to isolate setUpClass()/tearDownClass(). """ lockfile = None def __init_subclass__(cls, /, **kwargs): super().__init_subclass__(**kwargs) if cls.lockfile is None: raise ValueError( "{}.lockfile isn't set. Set it to a unique value " "in the base class.".format(cls.__name__) ) @classmethod def setUpClass(cls): cls._lockfile = open(cls.lockfile) cls.addClassCleanup(cls._lockfile.close) locks.lock(cls._lockfile, locks.LOCK_EX) super().setUpClass()
0146b3ef86fa8c7bef0a66e61c4bc32dbcc932aab7a175ca5c46b8ab236f478c
import json import os import re from django.apps import apps from django.conf import settings from django.http import HttpResponse, HttpResponseRedirect, JsonResponse from django.template import Context, Engine from django.urls import translate_url from django.utils.formats import get_format from django.utils.http import url_has_allowed_host_and_scheme from django.utils.translation import check_for_language, get_language from django.utils.translation.trans_real import DjangoTranslation from django.views.generic import View LANGUAGE_QUERY_PARAMETER = "language" def set_language(request): """ Redirect to a given URL while setting the chosen language in the session (if enabled) and in a cookie. The URL and the language code need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. """ next_url = request.POST.get("next", request.GET.get("next")) if ( next_url or request.accepts("text/html") ) and not url_has_allowed_host_and_scheme( url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure(), ): next_url = request.META.get("HTTP_REFERER") if not url_has_allowed_host_and_scheme( url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure(), ): next_url = "/" response = HttpResponseRedirect(next_url) if next_url else HttpResponse(status=204) if request.method == "POST": lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER) if lang_code and check_for_language(lang_code): if next_url: next_trans = translate_url(next_url, lang_code) if next_trans != next_url: response = HttpResponseRedirect(next_trans) response.set_cookie( settings.LANGUAGE_COOKIE_NAME, lang_code, max_age=settings.LANGUAGE_COOKIE_AGE, path=settings.LANGUAGE_COOKIE_PATH, domain=settings.LANGUAGE_COOKIE_DOMAIN, secure=settings.LANGUAGE_COOKIE_SECURE, httponly=settings.LANGUAGE_COOKIE_HTTPONLY, samesite=settings.LANGUAGE_COOKIE_SAMESITE, ) return response def get_formats(): """Return all formats strings required for i18n to work.""" FORMAT_SETTINGS = ( "DATE_FORMAT", "DATETIME_FORMAT", "TIME_FORMAT", "YEAR_MONTH_FORMAT", "MONTH_DAY_FORMAT", "SHORT_DATE_FORMAT", "SHORT_DATETIME_FORMAT", "FIRST_DAY_OF_WEEK", "DECIMAL_SEPARATOR", "THOUSAND_SEPARATOR", "NUMBER_GROUPING", "DATE_INPUT_FORMATS", "TIME_INPUT_FORMATS", "DATETIME_INPUT_FORMATS", ) return {attr: get_format(attr) for attr in FORMAT_SETTINGS} js_catalog_template = r""" {% autoescape off %} 'use strict'; { const globals = this; const django = globals.django || (globals.django = {}); {% if plural %} django.pluralidx = function(n) { const v = {{ plural }}; if (typeof v === 'boolean') { return v ? 1 : 0; } else { return v; } }; {% else %} django.pluralidx = function(count) { return (count == 1) ? 0 : 1; }; {% endif %} /* gettext library */ django.catalog = django.catalog || {}; {% if catalog_str %} const newcatalog = {{ catalog_str }}; for (const key in newcatalog) { django.catalog[key] = newcatalog[key]; } {% endif %} if (!django.jsi18n_initialized) { django.gettext = function(msgid) { const value = django.catalog[msgid]; if (typeof value === 'undefined') { return msgid; } else { return (typeof value === 'string') ? value : value[0]; } }; django.ngettext = function(singular, plural, count) { const value = django.catalog[singular]; if (typeof value === 'undefined') { return (count == 1) ? singular : plural; } else { return value.constructor === Array ? value[django.pluralidx(count)] : value; } }; django.gettext_noop = function(msgid) { return msgid; }; django.pgettext = function(context, msgid) { let value = django.gettext(context + '\x04' + msgid); if (value.includes('\x04')) { value = msgid; } return value; }; django.npgettext = function(context, singular, plural, count) { let value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count); if (value.includes('\x04')) { value = django.ngettext(singular, plural, count); } return value; }; django.interpolate = function(fmt, obj, named) { if (named) { return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])}); } else { return fmt.replace(/%s/g, function(match){return String(obj.shift())}); } }; /* formatting library */ django.formats = {{ formats_str }}; django.get_format = function(format_type) { const value = django.formats[format_type]; if (typeof value === 'undefined') { return format_type; } else { return value; } }; /* add to global namespace */ globals.pluralidx = django.pluralidx; globals.gettext = django.gettext; globals.ngettext = django.ngettext; globals.gettext_noop = django.gettext_noop; globals.pgettext = django.pgettext; globals.npgettext = django.npgettext; globals.interpolate = django.interpolate; globals.get_format = django.get_format; django.jsi18n_initialized = true; } }; {% endautoescape %} """ # NOQA class JavaScriptCatalog(View): """ Return the selected language catalog as a JavaScript library. Receive the list of packages to check for translations in the `packages` kwarg either from the extra dictionary passed to the path() function or as a plus-sign delimited string from the request. Default is 'django.conf'. You can override the gettext domain for this view, but usually you don't want to do that as JavaScript messages go to the djangojs domain. This might be needed if you deliver your JavaScript source from Django templates. """ domain = "djangojs" packages = None def get(self, request, *args, **kwargs): locale = get_language() domain = kwargs.get("domain", self.domain) # If packages are not provided, default to all installed packages, as # DjangoTranslation without localedirs harvests them all. packages = kwargs.get("packages", "") packages = packages.split("+") if packages else self.packages paths = self.get_paths(packages) if packages else None self.translation = DjangoTranslation(locale, domain=domain, localedirs=paths) context = self.get_context_data(**kwargs) return self.render_to_response(context) def get_paths(self, packages): allowable_packages = { app_config.name: app_config for app_config in apps.get_app_configs() } app_configs = [ allowable_packages[p] for p in packages if p in allowable_packages ] if len(app_configs) < len(packages): excluded = [p for p in packages if p not in allowable_packages] raise ValueError( "Invalid package(s) provided to JavaScriptCatalog: %s" % ",".join(excluded) ) # paths of requested packages return [os.path.join(app.path, "locale") for app in app_configs] @property def _num_plurals(self): """ Return the number of plurals for this catalog language, or 2 if no plural string is available. """ match = re.search(r"nplurals=\s*(\d+)", self._plural_string or "") if match: return int(match[1]) return 2 @property def _plural_string(self): """ Return the plural string (including nplurals) for this catalog language, or None if no plural string is available. """ if "" in self.translation._catalog: for line in self.translation._catalog[""].split("\n"): if line.startswith("Plural-Forms:"): return line.split(":", 1)[1].strip() return None def get_plural(self): plural = self._plural_string if plural is not None: # This should be a compiled function of a typical plural-form: # Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : # n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2; plural = [ el.strip() for el in plural.split(";") if el.strip().startswith("plural=") ][0].split("=", 1)[1] return plural def get_catalog(self): pdict = {} catalog = {} translation = self.translation seen_keys = set() while True: for key, value in translation._catalog.items(): if key == "" or key in seen_keys: continue if isinstance(key, str): catalog[key] = value elif isinstance(key, tuple): msgid, cnt = key pdict.setdefault(msgid, {})[cnt] = value else: raise TypeError(key) seen_keys.add(key) if translation._fallback: translation = translation._fallback else: break num_plurals = self._num_plurals for k, v in pdict.items(): catalog[k] = [v.get(i, "") for i in range(num_plurals)] return catalog def get_context_data(self, **kwargs): return { "catalog": self.get_catalog(), "formats": get_formats(), "plural": self.get_plural(), } def render_to_response(self, context, **response_kwargs): def indent(s): return s.replace("\n", "\n ") template = Engine().from_string(js_catalog_template) context["catalog_str"] = ( indent(json.dumps(context["catalog"], sort_keys=True, indent=2)) if context["catalog"] else None ) context["formats_str"] = indent( json.dumps(context["formats"], sort_keys=True, indent=2) ) return HttpResponse( template.render(Context(context)), 'text/javascript; charset="utf-8"' ) class JSONCatalog(JavaScriptCatalog): """ Return the selected language catalog as a JSON object. Receive the same parameters as JavaScriptCatalog and return a response with a JSON object of the following format: { "catalog": { # Translations catalog }, "formats": { # Language formats for date, time, etc. }, "plural": '...' # Expression for plural forms, or null. } """ def render_to_response(self, context, **response_kwargs): return JsonResponse(context)
b7199b9d6eb0acbf938c0ec26dbdddd3c38989353701d9becccbbe71897753f6
""" Default Django settings. Override these with settings in the module pointed to by the DJANGO_SETTINGS_MODULE environment variable. """ # This is defined here as a do-nothing function because we can't import # django.utils.translation -- that module depends on the settings. def gettext_noop(s): return s #################### # CORE # #################### DEBUG = False # Whether the framework should propagate raw exceptions rather than catching # them. This is useful under some testing situations and should never be used # on a live site. DEBUG_PROPAGATE_EXCEPTIONS = False # People who get code error notifications. In the format # [('Full Name', '[email protected]'), ('Full Name', '[email protected]')] ADMINS = [] # List of IP addresses, as strings, that: # * See debug comments, when DEBUG is true # * Receive x-headers INTERNAL_IPS = [] # Hosts/domain names that are valid for this site. # "*" matches anything, ".example.com" matches example.com and all subdomains ALLOWED_HOSTS = [] # Local time zone for this installation. All choices can be found here: # https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all # systems may support all possibilities). When USE_TZ is True, this is # interpreted as the default user time zone. TIME_ZONE = "America/Chicago" # If you set this to True, Django will use timezone-aware datetimes. USE_TZ = False # RemovedInDjango50Warning: It's a transitional setting helpful in migrating # from pytz tzinfo to ZoneInfo(). Set True to continue using pytz tzinfo # objects during the Django 4.x release cycle. USE_DEPRECATED_PYTZ = False # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = "en-us" # Languages we provide translations for, out of the box. LANGUAGES = [ ("af", gettext_noop("Afrikaans")), ("ar", gettext_noop("Arabic")), ("ar-dz", gettext_noop("Algerian Arabic")), ("ast", gettext_noop("Asturian")), ("az", gettext_noop("Azerbaijani")), ("bg", gettext_noop("Bulgarian")), ("be", gettext_noop("Belarusian")), ("bn", gettext_noop("Bengali")), ("br", gettext_noop("Breton")), ("bs", gettext_noop("Bosnian")), ("ca", gettext_noop("Catalan")), ("ckb", gettext_noop("Central Kurdish (Sorani)")), ("cs", gettext_noop("Czech")), ("cy", gettext_noop("Welsh")), ("da", gettext_noop("Danish")), ("de", gettext_noop("German")), ("dsb", gettext_noop("Lower Sorbian")), ("el", gettext_noop("Greek")), ("en", gettext_noop("English")), ("en-au", gettext_noop("Australian English")), ("en-gb", gettext_noop("British English")), ("eo", gettext_noop("Esperanto")), ("es", gettext_noop("Spanish")), ("es-ar", gettext_noop("Argentinian Spanish")), ("es-co", gettext_noop("Colombian Spanish")), ("es-mx", gettext_noop("Mexican Spanish")), ("es-ni", gettext_noop("Nicaraguan Spanish")), ("es-ve", gettext_noop("Venezuelan Spanish")), ("et", gettext_noop("Estonian")), ("eu", gettext_noop("Basque")), ("fa", gettext_noop("Persian")), ("fi", gettext_noop("Finnish")), ("fr", gettext_noop("French")), ("fy", gettext_noop("Frisian")), ("ga", gettext_noop("Irish")), ("gd", gettext_noop("Scottish Gaelic")), ("gl", gettext_noop("Galician")), ("he", gettext_noop("Hebrew")), ("hi", gettext_noop("Hindi")), ("hr", gettext_noop("Croatian")), ("hsb", gettext_noop("Upper Sorbian")), ("hu", gettext_noop("Hungarian")), ("hy", gettext_noop("Armenian")), ("ia", gettext_noop("Interlingua")), ("id", gettext_noop("Indonesian")), ("ig", gettext_noop("Igbo")), ("io", gettext_noop("Ido")), ("is", gettext_noop("Icelandic")), ("it", gettext_noop("Italian")), ("ja", gettext_noop("Japanese")), ("ka", gettext_noop("Georgian")), ("kab", gettext_noop("Kabyle")), ("kk", gettext_noop("Kazakh")), ("km", gettext_noop("Khmer")), ("kn", gettext_noop("Kannada")), ("ko", gettext_noop("Korean")), ("ky", gettext_noop("Kyrgyz")), ("lb", gettext_noop("Luxembourgish")), ("lt", gettext_noop("Lithuanian")), ("lv", gettext_noop("Latvian")), ("mk", gettext_noop("Macedonian")), ("ml", gettext_noop("Malayalam")), ("mn", gettext_noop("Mongolian")), ("mr", gettext_noop("Marathi")), ("ms", gettext_noop("Malay")), ("my", gettext_noop("Burmese")), ("nb", gettext_noop("Norwegian Bokmål")), ("ne", gettext_noop("Nepali")), ("nl", gettext_noop("Dutch")), ("nn", gettext_noop("Norwegian Nynorsk")), ("os", gettext_noop("Ossetic")), ("pa", gettext_noop("Punjabi")), ("pl", gettext_noop("Polish")), ("pt", gettext_noop("Portuguese")), ("pt-br", gettext_noop("Brazilian Portuguese")), ("ro", gettext_noop("Romanian")), ("ru", gettext_noop("Russian")), ("sk", gettext_noop("Slovak")), ("sl", gettext_noop("Slovenian")), ("sq", gettext_noop("Albanian")), ("sr", gettext_noop("Serbian")), ("sr-latn", gettext_noop("Serbian Latin")), ("sv", gettext_noop("Swedish")), ("sw", gettext_noop("Swahili")), ("ta", gettext_noop("Tamil")), ("te", gettext_noop("Telugu")), ("tg", gettext_noop("Tajik")), ("th", gettext_noop("Thai")), ("tk", gettext_noop("Turkmen")), ("tr", gettext_noop("Turkish")), ("tt", gettext_noop("Tatar")), ("udm", gettext_noop("Udmurt")), ("uk", gettext_noop("Ukrainian")), ("ur", gettext_noop("Urdu")), ("uz", gettext_noop("Uzbek")), ("vi", gettext_noop("Vietnamese")), ("zh-hans", gettext_noop("Simplified Chinese")), ("zh-hant", gettext_noop("Traditional Chinese")), ] # Languages using BiDi (right-to-left) layout LANGUAGES_BIDI = ["he", "ar", "ar-dz", "ckb", "fa", "ur"] # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True LOCALE_PATHS = [] # Settings for language cookie LANGUAGE_COOKIE_NAME = "django_language" LANGUAGE_COOKIE_AGE = None LANGUAGE_COOKIE_DOMAIN = None LANGUAGE_COOKIE_PATH = "/" LANGUAGE_COOKIE_SECURE = False LANGUAGE_COOKIE_HTTPONLY = False LANGUAGE_COOKIE_SAMESITE = None # If you set this to True, Django will format dates, numbers and calendars # according to user current locale. USE_L10N = True # Not-necessarily-technical managers of the site. They get broken link # notifications and other various emails. MANAGERS = ADMINS # Default charset to use for all HttpResponse objects, if a MIME type isn't # manually specified. It's used to construct the Content-Type header. DEFAULT_CHARSET = "utf-8" # Email address that error messages come from. SERVER_EMAIL = "root@localhost" # Database connection info. If left empty, will default to the dummy backend. DATABASES = {} # Classes used to implement DB routing behavior. DATABASE_ROUTERS = [] # The email backend to use. For possible shortcuts see django.core.mail. # The default is to use the SMTP backend. # Third-party backends can be specified by providing a Python path # to a module that defines an EmailBackend class. EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" # Host for sending email. EMAIL_HOST = "localhost" # Port for sending email. EMAIL_PORT = 25 # Whether to send SMTP 'Date' header in the local time zone or in UTC. EMAIL_USE_LOCALTIME = False # Optional SMTP authentication information for EMAIL_HOST. EMAIL_HOST_USER = "" EMAIL_HOST_PASSWORD = "" EMAIL_USE_TLS = False EMAIL_USE_SSL = False EMAIL_SSL_CERTFILE = None EMAIL_SSL_KEYFILE = None EMAIL_TIMEOUT = None # List of strings representing installed apps. INSTALLED_APPS = [] TEMPLATES = [] # Default form rendering class. FORM_RENDERER = "django.forms.renderers.DjangoTemplates" # Default email address to use for various automated correspondence from # the site managers. DEFAULT_FROM_EMAIL = "webmaster@localhost" # Subject-line prefix for email messages send with django.core.mail.mail_admins # or ...mail_managers. Make sure to include the trailing space. EMAIL_SUBJECT_PREFIX = "[Django] " # Whether to append trailing slashes to URLs. APPEND_SLASH = True # Whether to prepend the "www." subdomain to URLs that don't have it. PREPEND_WWW = False # Override the server-derived value of SCRIPT_NAME FORCE_SCRIPT_NAME = None # List of compiled regular expression objects representing User-Agent strings # that are not allowed to visit any page, systemwide. Use this for bad # robots/crawlers. Here are a few examples: # import re # DISALLOWED_USER_AGENTS = [ # re.compile(r'^NaverBot.*'), # re.compile(r'^EmailSiphon.*'), # re.compile(r'^SiteSucker.*'), # re.compile(r'^sohu-search'), # ] DISALLOWED_USER_AGENTS = [] ABSOLUTE_URL_OVERRIDES = {} # List of compiled regular expression objects representing URLs that need not # be reported by BrokenLinkEmailsMiddleware. Here are a few examples: # import re # IGNORABLE_404_URLS = [ # re.compile(r'^/apple-touch-icon.*\.png$'), # re.compile(r'^/favicon.ico$'), # re.compile(r'^/robots.txt$'), # re.compile(r'^/phpmyadmin/'), # re.compile(r'\.(cgi|php|pl)$'), # ] IGNORABLE_404_URLS = [] # A secret key for this particular Django installation. Used in secret-key # hashing algorithms. Set this in your settings, or Django will complain # loudly. SECRET_KEY = "" # List of secret keys used to verify the validity of signatures. This allows # secret key rotation. SECRET_KEY_FALLBACKS = [] # Default file storage mechanism that holds media. DEFAULT_FILE_STORAGE = "django.core.files.storage.FileSystemStorage" # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = "" # URL that handles the media served from MEDIA_ROOT. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = "" # Absolute path to the directory static files should be collected to. # Example: "/var/www/example.com/static/" STATIC_ROOT = None # URL that handles the static files served from STATIC_ROOT. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = None # List of upload handler classes to be applied in order. FILE_UPLOAD_HANDLERS = [ "django.core.files.uploadhandler.MemoryFileUploadHandler", "django.core.files.uploadhandler.TemporaryFileUploadHandler", ] # Maximum size, in bytes, of a request before it will be streamed to the # file system instead of into memory. FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Maximum size in bytes of request data (excluding file uploads) that will be # read before a SuspiciousOperation (RequestDataTooBig) is raised. DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Maximum number of GET/POST parameters that will be read before a # SuspiciousOperation (TooManyFieldsSent) is raised. DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000 # Directory in which upload streamed files will be temporarily saved. A value of # `None` will make Django use the operating system's default temporary directory # (i.e. "/tmp" on *nix systems). FILE_UPLOAD_TEMP_DIR = None # The numeric mode to set newly-uploaded files to. The value should be a mode # you'd pass directly to os.chmod; see # https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_PERMISSIONS = 0o644 # The numeric mode to assign to newly-created directories, when uploading files. # The value should be a mode as you'd pass to os.chmod; # see https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_DIRECTORY_PERMISSIONS = None # Python module path where user will place custom format definition. # The directory where this setting is pointing should contain subdirectories # named as the locales, containing a formats.py file # (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use) FORMAT_MODULE_PATH = None # Default formatting for date objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = "N j, Y" # Default formatting for datetime objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATETIME_FORMAT = "N j, Y, P" # Default formatting for time objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date TIME_FORMAT = "P" # Default formatting for date objects when only the year and month are relevant. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date YEAR_MONTH_FORMAT = "F Y" # Default formatting for date objects when only the month and day are relevant. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date MONTH_DAY_FORMAT = "F j" # Default short formatting for date objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATE_FORMAT = "m/d/Y" # Default short formatting for datetime objects. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATETIME_FORMAT = "m/d/Y P" # Default formats to be used when parsing dates from input boxes, in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATE_INPUT_FORMATS = [ "%Y-%m-%d", # '2006-10-25' "%m/%d/%Y", # '10/25/2006' "%m/%d/%y", # '10/25/06' "%b %d %Y", # 'Oct 25 2006' "%b %d, %Y", # 'Oct 25, 2006' "%d %b %Y", # '25 Oct 2006' "%d %b, %Y", # '25 Oct, 2006' "%B %d %Y", # 'October 25 2006' "%B %d, %Y", # 'October 25, 2006' "%d %B %Y", # '25 October 2006' "%d %B, %Y", # '25 October, 2006' ] # Default formats to be used when parsing times from input boxes, in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates TIME_INPUT_FORMATS = [ "%H:%M:%S", # '14:30:59' "%H:%M:%S.%f", # '14:30:59.000200' "%H:%M", # '14:30' ] # Default formats to be used when parsing dates and times from input boxes, # in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATETIME_INPUT_FORMATS = [ "%Y-%m-%d %H:%M:%S", # '2006-10-25 14:30:59' "%Y-%m-%d %H:%M:%S.%f", # '2006-10-25 14:30:59.000200' "%Y-%m-%d %H:%M", # '2006-10-25 14:30' "%m/%d/%Y %H:%M:%S", # '10/25/2006 14:30:59' "%m/%d/%Y %H:%M:%S.%f", # '10/25/2006 14:30:59.000200' "%m/%d/%Y %H:%M", # '10/25/2006 14:30' "%m/%d/%y %H:%M:%S", # '10/25/06 14:30:59' "%m/%d/%y %H:%M:%S.%f", # '10/25/06 14:30:59.000200' "%m/%d/%y %H:%M", # '10/25/06 14:30' ] # First day of week, to be used on calendars # 0 means Sunday, 1 means Monday... FIRST_DAY_OF_WEEK = 0 # Decimal separator symbol DECIMAL_SEPARATOR = "." # Boolean that sets whether to add thousand separator when formatting numbers USE_THOUSAND_SEPARATOR = False # Number of digits that will be together, when splitting them by # THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands... NUMBER_GROUPING = 0 # Thousand separator symbol THOUSAND_SEPARATOR = "," # The tablespaces to use for each model when not specified otherwise. DEFAULT_TABLESPACE = "" DEFAULT_INDEX_TABLESPACE = "" # Default primary key field type. DEFAULT_AUTO_FIELD = "django.db.models.AutoField" # Default X-Frame-Options header value X_FRAME_OPTIONS = "DENY" USE_X_FORWARDED_HOST = False USE_X_FORWARDED_PORT = False # The Python dotted path to the WSGI application that Django's internal server # (runserver) will use. If `None`, the return value of # 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same # behavior as previous versions of Django. Otherwise this should point to an # actual WSGI application object. WSGI_APPLICATION = None # If your Django app is behind a proxy that sets a header to specify secure # connections, AND that proxy ensures that user-submitted headers with the # same name are ignored (so that people can't spoof it), set this value to # a tuple of (header_name, header_value). For any requests that come in with # that header/value, request.is_secure() will return True. # WARNING! Only set this if you fully understand what you're doing. Otherwise, # you may be opening yourself up to a security risk. SECURE_PROXY_SSL_HEADER = None ############## # MIDDLEWARE # ############## # List of middleware to use. Order is important; in the request phase, these # middleware will be applied in the order given, and in the response # phase the middleware will be applied in reverse order. MIDDLEWARE = [] ############ # SESSIONS # ############ # Cache to store session data if using the cache session backend. SESSION_CACHE_ALIAS = "default" # Cookie name. This can be whatever you want. SESSION_COOKIE_NAME = "sessionid" # Age of cookie, in seconds (default: 2 weeks). SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # A string like "example.com", or None for standard domain cookie. SESSION_COOKIE_DOMAIN = None # Whether the session cookie should be secure (https:// only). SESSION_COOKIE_SECURE = False # The path of the session cookie. SESSION_COOKIE_PATH = "/" # Whether to use the HttpOnly flag. SESSION_COOKIE_HTTPONLY = True # Whether to set the flag restricting cookie leaks on cross-site requests. # This can be 'Lax', 'Strict', 'None', or False to disable the flag. SESSION_COOKIE_SAMESITE = "Lax" # Whether to save the session data on every request. SESSION_SAVE_EVERY_REQUEST = False # Whether a user's session cookie expires when the web browser is closed. SESSION_EXPIRE_AT_BROWSER_CLOSE = False # The module to store session data SESSION_ENGINE = "django.contrib.sessions.backends.db" # Directory to store session files if using the file session module. If None, # the backend will use a sensible default. SESSION_FILE_PATH = None # class to serialize session data SESSION_SERIALIZER = "django.contrib.sessions.serializers.JSONSerializer" ######### # CACHE # ######### # The cache backends to use. CACHES = { "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", } } CACHE_MIDDLEWARE_KEY_PREFIX = "" CACHE_MIDDLEWARE_SECONDS = 600 CACHE_MIDDLEWARE_ALIAS = "default" ################## # AUTHENTICATION # ################## AUTH_USER_MODEL = "auth.User" AUTHENTICATION_BACKENDS = ["django.contrib.auth.backends.ModelBackend"] LOGIN_URL = "/accounts/login/" LOGIN_REDIRECT_URL = "/accounts/profile/" LOGOUT_REDIRECT_URL = None # The number of seconds a password reset link is valid for (default: 3 days). PASSWORD_RESET_TIMEOUT = 60 * 60 * 24 * 3 # the first hasher in this list is the preferred algorithm. any # password using different algorithms will be converted automatically # upon login PASSWORD_HASHERS = [ "django.contrib.auth.hashers.PBKDF2PasswordHasher", "django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher", "django.contrib.auth.hashers.Argon2PasswordHasher", "django.contrib.auth.hashers.BCryptSHA256PasswordHasher", "django.contrib.auth.hashers.ScryptPasswordHasher", ] AUTH_PASSWORD_VALIDATORS = [] ########### # SIGNING # ########### SIGNING_BACKEND = "django.core.signing.TimestampSigner" ######## # CSRF # ######## # Dotted path to callable to be used as view when a request is # rejected by the CSRF middleware. CSRF_FAILURE_VIEW = "django.views.csrf.csrf_failure" # Settings for CSRF cookie. CSRF_COOKIE_NAME = "csrftoken" CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52 CSRF_COOKIE_DOMAIN = None CSRF_COOKIE_PATH = "/" CSRF_COOKIE_SECURE = False CSRF_COOKIE_HTTPONLY = False CSRF_COOKIE_SAMESITE = "Lax" CSRF_HEADER_NAME = "HTTP_X_CSRFTOKEN" CSRF_TRUSTED_ORIGINS = [] CSRF_USE_SESSIONS = False # Whether to mask CSRF cookie value. It's a transitional setting helpful in # migrating multiple instance of the same project to Django 4.1+. CSRF_COOKIE_MASKED = False ############ # MESSAGES # ############ # Class to use as messages backend MESSAGE_STORAGE = "django.contrib.messages.storage.fallback.FallbackStorage" # Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within # django.contrib.messages to avoid imports in this settings file. ########### # LOGGING # ########### # The callable to use to configure logging LOGGING_CONFIG = "logging.config.dictConfig" # Custom logging configuration. LOGGING = {} # Default exception reporter class used in case none has been # specifically assigned to the HttpRequest instance. DEFAULT_EXCEPTION_REPORTER = "django.views.debug.ExceptionReporter" # Default exception reporter filter class used in case none has been # specifically assigned to the HttpRequest instance. DEFAULT_EXCEPTION_REPORTER_FILTER = "django.views.debug.SafeExceptionReporterFilter" ########### # TESTING # ########### # The name of the class to use to run the test suite TEST_RUNNER = "django.test.runner.DiscoverRunner" # Apps that don't need to be serialized at test database creation time # (only apps with migrations are to start with) TEST_NON_SERIALIZED_APPS = [] ############ # FIXTURES # ############ # The list of directories to search for fixtures FIXTURE_DIRS = [] ############### # STATICFILES # ############### # A list of locations of additional static files STATICFILES_DIRS = [] # The default file storage backend used during the build process STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage" # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = [ "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ] ############## # MIGRATIONS # ############## # Migration module overrides for apps, by app label. MIGRATION_MODULES = {} ################# # SYSTEM CHECKS # ################# # List of all issues generated by system checks that should be silenced. Light # issues like warnings, infos or debugs will not generate a message. Silencing # serious issues like errors and criticals does not result in hiding the # message, but Django will not stop you from e.g. running server. SILENCED_SYSTEM_CHECKS = [] ####################### # SECURITY MIDDLEWARE # ####################### SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_CROSS_ORIGIN_OPENER_POLICY = "same-origin" SECURE_HSTS_INCLUDE_SUBDOMAINS = False SECURE_HSTS_PRELOAD = False SECURE_HSTS_SECONDS = 0 SECURE_REDIRECT_EXEMPT = [] SECURE_REFERRER_POLICY = "same-origin" SECURE_SSL_HOST = None SECURE_SSL_REDIRECT = False
d81bc741fca8765970e8bfebb63b6fecf186494402f2a088e2367b79f1f066be
import itertools import logging import os import signal import subprocess import sys import threading import time import traceback import weakref from collections import defaultdict from functools import lru_cache, wraps from pathlib import Path from types import ModuleType from zipimport import zipimporter import django from django.apps import apps from django.core.signals import request_finished from django.dispatch import Signal from django.utils.functional import cached_property from django.utils.version import get_version_tuple autoreload_started = Signal() file_changed = Signal() DJANGO_AUTORELOAD_ENV = "RUN_MAIN" logger = logging.getLogger("django.utils.autoreload") # If an error is raised while importing a file, it's not placed in sys.modules. # This means that any future modifications aren't caught. Keep a list of these # file paths to allow watching them in the future. _error_files = [] _exception = None try: import termios except ImportError: termios = None try: import pywatchman except ImportError: pywatchman = None def is_django_module(module): """Return True if the given module is nested under Django.""" return module.__name__.startswith("django.") def is_django_path(path): """Return True if the given file path is nested under Django.""" return Path(django.__file__).parent in Path(path).parents def check_errors(fn): @wraps(fn) def wrapper(*args, **kwargs): global _exception try: fn(*args, **kwargs) except Exception: _exception = sys.exc_info() et, ev, tb = _exception if getattr(ev, "filename", None) is None: # get the filename from the last item in the stack filename = traceback.extract_tb(tb)[-1][0] else: filename = ev.filename if filename not in _error_files: _error_files.append(filename) raise return wrapper def raise_last_exception(): global _exception if _exception is not None: raise _exception[1] def ensure_echo_on(): """ Ensure that echo mode is enabled. Some tools such as PDB disable it which causes usability issues after reload. """ if not termios or not sys.stdin.isatty(): return attr_list = termios.tcgetattr(sys.stdin) if not attr_list[3] & termios.ECHO: attr_list[3] |= termios.ECHO if hasattr(signal, "SIGTTOU"): old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN) else: old_handler = None termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list) if old_handler is not None: signal.signal(signal.SIGTTOU, old_handler) def iter_all_python_module_files(): # This is a hot path during reloading. Create a stable sorted list of # modules based on the module name and pass it to iter_modules_and_files(). # This ensures cached results are returned in the usual case that modules # aren't loaded on the fly. keys = sorted(sys.modules) modules = tuple( m for m in map(sys.modules.__getitem__, keys) if not isinstance(m, weakref.ProxyTypes) ) return iter_modules_and_files(modules, frozenset(_error_files)) @lru_cache(maxsize=1) def iter_modules_and_files(modules, extra_files): """Iterate through all modules needed to be watched.""" sys_file_paths = [] for module in modules: # During debugging (with PyDev) the 'typing.io' and 'typing.re' objects # are added to sys.modules, however they are types not modules and so # cause issues here. if not isinstance(module, ModuleType): continue if module.__name__ in ("__main__", "__mp_main__"): # __main__ (usually manage.py) doesn't always have a __spec__ set. # Handle this by falling back to using __file__, resolved below. # See https://docs.python.org/reference/import.html#main-spec # __file__ may not exists, e.g. when running ipdb debugger. if hasattr(module, "__file__"): sys_file_paths.append(module.__file__) continue if getattr(module, "__spec__", None) is None: continue spec = module.__spec__ # Modules could be loaded from places without a concrete location. If # this is the case, skip them. if spec.has_location: origin = ( spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin ) sys_file_paths.append(origin) results = set() for filename in itertools.chain(sys_file_paths, extra_files): if not filename: continue path = Path(filename) try: if not path.exists(): # The module could have been removed, don't fail loudly if this # is the case. continue except ValueError as e: # Network filesystems may return null bytes in file paths. logger.debug('"%s" raised when resolving path: "%s"', e, path) continue resolved_path = path.resolve().absolute() results.add(resolved_path) return frozenset(results) @lru_cache(maxsize=1) def common_roots(paths): """ Return a tuple of common roots that are shared between the given paths. File system watchers operate on directories and aren't cheap to create. Try to find the minimum set of directories to watch that encompass all of the files that need to be watched. """ # Inspired from Werkzeug: # https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py # Create a sorted list of the path components, longest first. path_parts = sorted([x.parts for x in paths], key=len, reverse=True) tree = {} for chunks in path_parts: node = tree # Add each part of the path to the tree. for chunk in chunks: node = node.setdefault(chunk, {}) # Clear the last leaf in the tree. node.clear() # Turn the tree into a list of Path instances. def _walk(node, path): for prefix, child in node.items(): yield from _walk(child, path + (prefix,)) if not node: yield Path(*path) return tuple(_walk(tree, ())) def sys_path_directories(): """ Yield absolute directories from sys.path, ignoring entries that don't exist. """ for path in sys.path: path = Path(path) if not path.exists(): continue resolved_path = path.resolve().absolute() # If the path is a file (like a zip file), watch the parent directory. if resolved_path.is_file(): yield resolved_path.parent else: yield resolved_path def get_child_arguments(): """ Return the executable. This contains a workaround for Windows if the executable is reported to not have the .exe extension which can cause bugs on reloading. """ import __main__ py_script = Path(sys.argv[0]) args = [sys.executable] + ["-W%s" % o for o in sys.warnoptions] if sys.implementation.name == "cpython": args.extend( f"-X{key}" if value is True else f"-X{key}={value}" for key, value in sys._xoptions.items() ) # __spec__ is set when the server was started with the `-m` option, # see https://docs.python.org/3/reference/import.html#main-spec # __spec__ may not exist, e.g. when running in a Conda env. if getattr(__main__, "__spec__", None) is not None: spec = __main__.__spec__ if (spec.name == "__main__" or spec.name.endswith(".__main__")) and spec.parent: name = spec.parent else: name = spec.name args += ["-m", name] args += sys.argv[1:] elif not py_script.exists(): # sys.argv[0] may not exist for several reasons on Windows. # It may exist with a .exe extension or have a -script.py suffix. exe_entrypoint = py_script.with_suffix(".exe") if exe_entrypoint.exists(): # Should be executed directly, ignoring sys.executable. return [exe_entrypoint, *sys.argv[1:]] script_entrypoint = py_script.with_name("%s-script.py" % py_script.name) if script_entrypoint.exists(): # Should be executed as usual. return [*args, script_entrypoint, *sys.argv[1:]] raise RuntimeError("Script %s does not exist." % py_script) else: args += sys.argv return args def trigger_reload(filename): logger.info("%s changed, reloading.", filename) sys.exit(3) def restart_with_reloader(): new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: "true"} args = get_child_arguments() while True: p = subprocess.run(args, env=new_environ, close_fds=False) if p.returncode != 3: return p.returncode class BaseReloader: def __init__(self): self.extra_files = set() self.directory_globs = defaultdict(set) self._stop_condition = threading.Event() def watch_dir(self, path, glob): path = Path(path) try: path = path.absolute() except FileNotFoundError: logger.debug( "Unable to watch directory %s as it cannot be resolved.", path, exc_info=True, ) return logger.debug("Watching dir %s with glob %s.", path, glob) self.directory_globs[path].add(glob) def watched_files(self, include_globs=True): """ Yield all files that need to be watched, including module files and files within globs. """ yield from iter_all_python_module_files() yield from self.extra_files if include_globs: for directory, patterns in self.directory_globs.items(): for pattern in patterns: yield from directory.glob(pattern) def wait_for_apps_ready(self, app_reg, django_main_thread): """ Wait until Django reports that the apps have been loaded. If the given thread has terminated before the apps are ready, then a SyntaxError or other non-recoverable error has been raised. In that case, stop waiting for the apps_ready event and continue processing. Return True if the thread is alive and the ready event has been triggered, or False if the thread is terminated while waiting for the event. """ while django_main_thread.is_alive(): if app_reg.ready_event.wait(timeout=0.1): return True else: logger.debug("Main Django thread has terminated before apps are ready.") return False def run(self, django_main_thread): logger.debug("Waiting for apps ready_event.") self.wait_for_apps_ready(apps, django_main_thread) from django.urls import get_resolver # Prevent a race condition where URL modules aren't loaded when the # reloader starts by accessing the urlconf_module property. try: get_resolver().urlconf_module except Exception: # Loading the urlconf can result in errors during development. # If this occurs then swallow the error and continue. pass logger.debug("Apps ready_event triggered. Sending autoreload_started signal.") autoreload_started.send(sender=self) self.run_loop() def run_loop(self): ticker = self.tick() while not self.should_stop: try: next(ticker) except StopIteration: break self.stop() def tick(self): """ This generator is called in a loop from run_loop. It's important that the method takes care of pausing or otherwise waiting for a period of time. This split between run_loop() and tick() is to improve the testability of the reloader implementations by decoupling the work they do from the loop. """ raise NotImplementedError("subclasses must implement tick().") @classmethod def check_availability(cls): raise NotImplementedError("subclasses must implement check_availability().") def notify_file_changed(self, path): results = file_changed.send(sender=self, file_path=path) logger.debug("%s notified as changed. Signal results: %s.", path, results) if not any(res[1] for res in results): trigger_reload(path) # These are primarily used for testing. @property def should_stop(self): return self._stop_condition.is_set() def stop(self): self._stop_condition.set() class StatReloader(BaseReloader): SLEEP_TIME = 1 # Check for changes once per second. def tick(self): mtimes = {} while True: for filepath, mtime in self.snapshot_files(): old_time = mtimes.get(filepath) mtimes[filepath] = mtime if old_time is None: logger.debug("File %s first seen with mtime %s", filepath, mtime) continue elif mtime > old_time: logger.debug( "File %s previous mtime: %s, current mtime: %s", filepath, old_time, mtime, ) self.notify_file_changed(filepath) time.sleep(self.SLEEP_TIME) yield def snapshot_files(self): # watched_files may produce duplicate paths if globs overlap. seen_files = set() for file in self.watched_files(): if file in seen_files: continue try: mtime = file.stat().st_mtime except OSError: # This is thrown when the file does not exist. continue seen_files.add(file) yield file, mtime @classmethod def check_availability(cls): return True class WatchmanUnavailable(RuntimeError): pass class WatchmanReloader(BaseReloader): def __init__(self): self.roots = defaultdict(set) self.processed_request = threading.Event() self.client_timeout = int(os.environ.get("DJANGO_WATCHMAN_TIMEOUT", 5)) super().__init__() @cached_property def client(self): return pywatchman.client(timeout=self.client_timeout) def _watch_root(self, root): # In practice this shouldn't occur, however, it's possible that a # directory that doesn't exist yet is being watched. If it's outside of # sys.path then this will end up a new root. How to handle this isn't # clear: Not adding the root will likely break when subscribing to the # changes, however, as this is currently an internal API, no files # will be being watched outside of sys.path. Fixing this by checking # inside watch_glob() and watch_dir() is expensive, instead this could # could fall back to the StatReloader if this case is detected? For # now, watching its parent, if possible, is sufficient. if not root.exists(): if not root.parent.exists(): logger.warning( "Unable to watch root dir %s as neither it or its parent exist.", root, ) return root = root.parent result = self.client.query("watch-project", str(root.absolute())) if "warning" in result: logger.warning("Watchman warning: %s", result["warning"]) logger.debug("Watchman watch-project result: %s", result) return result["watch"], result.get("relative_path") @lru_cache def _get_clock(self, root): return self.client.query("clock", root)["clock"] def _subscribe(self, directory, name, expression): root, rel_path = self._watch_root(directory) # Only receive notifications of files changing, filtering out other types # like special files: https://facebook.github.io/watchman/docs/type only_files_expression = [ "allof", ["anyof", ["type", "f"], ["type", "l"]], expression, ] query = { "expression": only_files_expression, "fields": ["name"], "since": self._get_clock(root), "dedup_results": True, } if rel_path: query["relative_root"] = rel_path logger.debug( "Issuing watchman subscription %s, for root %s. Query: %s", name, root, query, ) self.client.query("subscribe", root, name, query) def _subscribe_dir(self, directory, filenames): if not directory.exists(): if not directory.parent.exists(): logger.warning( "Unable to watch directory %s as neither it or its parent exist.", directory, ) return prefix = "files-parent-%s" % directory.name filenames = ["%s/%s" % (directory.name, filename) for filename in filenames] directory = directory.parent expression = ["name", filenames, "wholename"] else: prefix = "files" expression = ["name", filenames] self._subscribe(directory, "%s:%s" % (prefix, directory), expression) def _watch_glob(self, directory, patterns): """ Watch a directory with a specific glob. If the directory doesn't yet exist, attempt to watch the parent directory and amend the patterns to include this. It's important this method isn't called more than one per directory when updating all subscriptions. Subsequent calls will overwrite the named subscription, so it must include all possible glob expressions. """ prefix = "glob" if not directory.exists(): if not directory.parent.exists(): logger.warning( "Unable to watch directory %s as neither it or its parent exist.", directory, ) return prefix = "glob-parent-%s" % directory.name patterns = ["%s/%s" % (directory.name, pattern) for pattern in patterns] directory = directory.parent expression = ["anyof"] for pattern in patterns: expression.append(["match", pattern, "wholename"]) self._subscribe(directory, "%s:%s" % (prefix, directory), expression) def watched_roots(self, watched_files): extra_directories = self.directory_globs.keys() watched_file_dirs = [f.parent for f in watched_files] sys_paths = list(sys_path_directories()) return frozenset((*extra_directories, *watched_file_dirs, *sys_paths)) def _update_watches(self): watched_files = list(self.watched_files(include_globs=False)) found_roots = common_roots(self.watched_roots(watched_files)) logger.debug("Watching %s files", len(watched_files)) logger.debug("Found common roots: %s", found_roots) # Setup initial roots for performance, shortest roots first. for root in sorted(found_roots): self._watch_root(root) for directory, patterns in self.directory_globs.items(): self._watch_glob(directory, patterns) # Group sorted watched_files by their parent directory. sorted_files = sorted(watched_files, key=lambda p: p.parent) for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent): # These paths need to be relative to the parent directory. self._subscribe_dir( directory, [str(p.relative_to(directory)) for p in group] ) def update_watches(self): try: self._update_watches() except Exception as ex: # If the service is still available, raise the original exception. if self.check_server_status(ex): raise def _check_subscription(self, sub): subscription = self.client.getSubscription(sub) if not subscription: return logger.debug("Watchman subscription %s has results.", sub) for result in subscription: # When using watch-project, it's not simple to get the relative # directory without storing some specific state. Store the full # path to the directory in the subscription name, prefixed by its # type (glob, files). root_directory = Path(result["subscription"].split(":", 1)[1]) logger.debug("Found root directory %s", root_directory) for file in result.get("files", []): self.notify_file_changed(root_directory / file) def request_processed(self, **kwargs): logger.debug("Request processed. Setting update_watches event.") self.processed_request.set() def tick(self): request_finished.connect(self.request_processed) self.update_watches() while True: if self.processed_request.is_set(): self.update_watches() self.processed_request.clear() try: self.client.receive() except pywatchman.SocketTimeout: pass except pywatchman.WatchmanError as ex: logger.debug("Watchman error: %s, checking server status.", ex) self.check_server_status(ex) else: for sub in list(self.client.subs.keys()): self._check_subscription(sub) yield # Protect against busy loops. time.sleep(0.1) def stop(self): self.client.close() super().stop() def check_server_status(self, inner_ex=None): """Return True if the server is available.""" try: self.client.query("version") except Exception: raise WatchmanUnavailable(str(inner_ex)) from inner_ex return True @classmethod def check_availability(cls): if not pywatchman: raise WatchmanUnavailable("pywatchman not installed.") client = pywatchman.client(timeout=0.1) try: result = client.capabilityCheck() except Exception: # The service is down? raise WatchmanUnavailable("Cannot connect to the watchman service.") version = get_version_tuple(result["version"]) # Watchman 4.9 includes multiple improvements to watching project # directories as well as case insensitive filesystems. logger.debug("Watchman version %s", version) if version < (4, 9): raise WatchmanUnavailable("Watchman 4.9 or later is required.") def get_reloader(): """Return the most suitable reloader for this environment.""" try: WatchmanReloader.check_availability() except WatchmanUnavailable: return StatReloader() return WatchmanReloader() def start_django(reloader, main_func, *args, **kwargs): ensure_echo_on() main_func = check_errors(main_func) django_main_thread = threading.Thread( target=main_func, args=args, kwargs=kwargs, name="django-main-thread" ) django_main_thread.daemon = True django_main_thread.start() while not reloader.should_stop: reloader.run(django_main_thread) def run_with_reloader(main_func, *args, **kwargs): signal.signal(signal.SIGTERM, lambda *args: sys.exit(0)) try: if os.environ.get(DJANGO_AUTORELOAD_ENV) == "true": reloader = get_reloader() logger.info( "Watching for file changes with %s", reloader.__class__.__name__ ) start_django(reloader, main_func, *args, **kwargs) else: exit_code = restart_with_reloader() sys.exit(exit_code) except KeyboardInterrupt: pass
f7687266ec093387c177c8753a0e45846dd3fc36f7d49821fa208916172e2722
from decimal import Decimal from django.conf import settings from django.utils.safestring import mark_safe def format( number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep="", force_grouping=False, use_l10n=None, ): """ Get a number (as a number or string), and return it as a string, using formats defined as arguments: * decimal_sep: Decimal separator symbol (for example ".") * decimal_pos: Number of decimal positions * grouping: Number of digits in every group limited by thousand separator. For non-uniform digit grouping, it can be a sequence with the number of digit group sizes following the format used by the Python locale module in locale.localeconv() LC_NUMERIC grouping (e.g. (3, 2, 0)). * thousand_sep: Thousand separator symbol (for example ",") """ if number is None or number == "": return mark_safe(number) use_grouping = ( use_l10n or (use_l10n is None and settings.USE_L10N) ) and settings.USE_THOUSAND_SEPARATOR use_grouping = use_grouping or force_grouping use_grouping = use_grouping and grouping != 0 # Make the common case fast if isinstance(number, int) and not use_grouping and not decimal_pos: return mark_safe(number) # sign sign = "" # Treat potentially very large/small floats as Decimals. if isinstance(number, float) and "e" in str(number).lower(): number = Decimal(str(number)) if isinstance(number, Decimal): if decimal_pos is not None: # If the provided number is too small to affect any of the visible # decimal places, consider it equal to '0'. cutoff = Decimal("0." + "1".rjust(decimal_pos, "0")) if abs(number) < cutoff: number = Decimal("0") # Format values with more than 200 digits (an arbitrary cutoff) using # scientific notation to avoid high memory usage in {:f}'.format(). _, digits, exponent = number.as_tuple() if abs(exponent) + len(digits) > 200: number = "{:e}".format(number) coefficient, exponent = number.split("e") # Format the coefficient. coefficient = format( coefficient, decimal_sep, decimal_pos, grouping, thousand_sep, force_grouping, use_l10n, ) return "{}e{}".format(coefficient, exponent) else: str_number = "{:f}".format(number) else: str_number = str(number) if str_number[0] == "-": sign = "-" str_number = str_number[1:] # decimal part if "." in str_number: int_part, dec_part = str_number.split(".") if decimal_pos is not None: dec_part = dec_part[:decimal_pos] else: int_part, dec_part = str_number, "" if decimal_pos is not None: dec_part = dec_part + ("0" * (decimal_pos - len(dec_part))) dec_part = dec_part and decimal_sep + dec_part # grouping if use_grouping: try: # if grouping is a sequence intervals = list(grouping) except TypeError: # grouping is a single value intervals = [grouping, 0] active_interval = intervals.pop(0) int_part_gd = "" cnt = 0 for digit in int_part[::-1]: if cnt and cnt == active_interval: if intervals: active_interval = intervals.pop(0) or active_interval int_part_gd += thousand_sep[::-1] cnt = 0 int_part_gd += digit cnt += 1 int_part = int_part_gd[::-1] return sign + int_part + dec_part
bfcb0d52c9ec1b62e88bdc412089341a6579c8ded558e193cdb9a4f01384b3a1
""" A class for storing a tree graph. Primarily used for filter constructs in the ORM. """ import copy from django.utils.hashable import make_hashable class Node: """ A single internal node in the tree graph. A Node should be viewed as a connection (the root) with the children being either leaf nodes or other Node instances. """ # Standard connector type. Clients usually won't use this at all and # subclasses will usually override the value. default = "DEFAULT" def __init__(self, children=None, connector=None, negated=False): """Construct a new Node. If no connector is given, use the default.""" self.children = children[:] if children else [] self.connector = connector or self.default self.negated = negated @classmethod def create(cls, children=None, connector=None, negated=False): """ Create a new instance using Node() instead of __init__() as some subclasses, e.g. django.db.models.query_utils.Q, may implement a custom __init__() with a signature that conflicts with the one defined in Node.__init__(). """ obj = Node(children, connector or cls.default, negated) obj.__class__ = cls return obj def __str__(self): template = "(NOT (%s: %s))" if self.negated else "(%s: %s)" return template % (self.connector, ", ".join(str(c) for c in self.children)) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def __copy__(self): obj = self.create(connector=self.connector, negated=self.negated) obj.children = self.children # Don't [:] as .__init__() via .create() does. return obj copy = __copy__ def __deepcopy__(self, memodict): obj = self.create(connector=self.connector, negated=self.negated) obj.children = copy.deepcopy(self.children, memodict) return obj def __len__(self): """Return the number of children this node has.""" return len(self.children) def __bool__(self): """Return whether or not this node has children.""" return bool(self.children) def __contains__(self, other): """Return True if 'other' is a direct child of this instance.""" return other in self.children def __eq__(self, other): return ( self.__class__ == other.__class__ and self.connector == other.connector and self.negated == other.negated and self.children == other.children ) def __hash__(self): return hash( ( self.__class__, self.connector, self.negated, *make_hashable(self.children), ) ) def add(self, data, conn_type): """ Combine this tree and the data represented by data using the connector conn_type. The combine is done by squashing the node other away if possible. This tree (self) will never be pushed to a child node of the combined tree, nor will the connector or negated properties change. Return a node which can be used in place of data regardless if the node other got squashed or not. """ if self.connector != conn_type: obj = self.copy() self.connector = conn_type self.children = [obj, data] return data elif ( isinstance(data, Node) and not data.negated and (data.connector == conn_type or len(data) == 1) ): # We can squash the other node's children directly into this node. # We are just doing (AB)(CD) == (ABCD) here, with the addition that # if the length of the other node is 1 the connector doesn't # matter. However, for the len(self) == 1 case we don't want to do # the squashing, as it would alter self.connector. self.children.extend(data.children) return self else: # We could use perhaps additional logic here to see if some # children could be used for pushdown here. self.children.append(data) return data def negate(self): """Negate the sense of the root connector.""" self.negated = not self.negated
90d264a339fd1814d85a97502af12284dbcbd502cee8cc0ed4c7451aede9495a
"""Default variable filters.""" import random as random_module import re import types import warnings from decimal import ROUND_HALF_UP, Context, Decimal, InvalidOperation from functools import wraps from inspect import unwrap from operator import itemgetter from pprint import pformat from urllib.parse import quote from django.utils import formats from django.utils.dateformat import format, time_format from django.utils.deprecation import RemovedInDjango51Warning from django.utils.encoding import iri_to_uri from django.utils.html import avoid_wrapping, conditional_escape, escape, escapejs from django.utils.html import json_script as _json_script from django.utils.html import linebreaks, strip_tags from django.utils.html import urlize as _urlize from django.utils.safestring import SafeData, mark_safe from django.utils.text import Truncator, normalize_newlines, phone2numeric from django.utils.text import slugify as _slugify from django.utils.text import wrap from django.utils.timesince import timesince, timeuntil from django.utils.translation import gettext, ngettext from .base import VARIABLE_ATTRIBUTE_SEPARATOR from .library import Library register = Library() ####################### # STRING DECORATOR # ####################### def stringfilter(func): """ Decorator for filters which should only receive strings. The object passed as the first positional argument will be converted to a string. """ @wraps(func) def _dec(first, *args, **kwargs): first = str(first) result = func(first, *args, **kwargs) if isinstance(first, SafeData) and getattr(unwrap(func), "is_safe", False): result = mark_safe(result) return result return _dec ################### # STRINGS # ################### @register.filter(is_safe=True) @stringfilter def addslashes(value): """ Add slashes before quotes. Useful for escaping strings in CSV, for example. Less useful for escaping JavaScript; use the ``escapejs`` filter instead. """ return value.replace("\\", "\\\\").replace('"', '\\"').replace("'", "\\'") @register.filter(is_safe=True) @stringfilter def capfirst(value): """Capitalize the first character of the value.""" return value and value[0].upper() + value[1:] @register.filter("escapejs") @stringfilter def escapejs_filter(value): """Hex encode characters for use in JavaScript strings.""" return escapejs(value) @register.filter(is_safe=True) def json_script(value, element_id=None): """ Output value JSON-encoded, wrapped in a <script type="application/json"> tag (with an optional id). """ return _json_script(value, element_id) @register.filter(is_safe=True) def floatformat(text, arg=-1): """ Display a float to a specified number of decimal places. If called without an argument, display the floating point number with one decimal place -- but only if there's a decimal place to be displayed: * num1 = 34.23234 * num2 = 34.00000 * num3 = 34.26000 * {{ num1|floatformat }} displays "34.2" * {{ num2|floatformat }} displays "34" * {{ num3|floatformat }} displays "34.3" If arg is positive, always display exactly arg number of decimal places: * {{ num1|floatformat:3 }} displays "34.232" * {{ num2|floatformat:3 }} displays "34.000" * {{ num3|floatformat:3 }} displays "34.260" If arg is negative, display arg number of decimal places -- but only if there are places to be displayed: * {{ num1|floatformat:"-3" }} displays "34.232" * {{ num2|floatformat:"-3" }} displays "34" * {{ num3|floatformat:"-3" }} displays "34.260" If arg has the 'g' suffix, force the result to be grouped by the THOUSAND_SEPARATOR for the active locale. When the active locale is en (English): * {{ 6666.6666|floatformat:"2g" }} displays "6,666.67" * {{ 10000|floatformat:"g" }} displays "10,000" If arg has the 'u' suffix, force the result to be unlocalized. When the active locale is pl (Polish): * {{ 66666.6666|floatformat:"2" }} displays "66666,67" * {{ 66666.6666|floatformat:"2u" }} displays "66666.67" If the input float is infinity or NaN, display the string representation of that value. """ force_grouping = False use_l10n = True if isinstance(arg, str): last_char = arg[-1] if arg[-2:] in {"gu", "ug"}: force_grouping = True use_l10n = False arg = arg[:-2] or -1 elif last_char == "g": force_grouping = True arg = arg[:-1] or -1 elif last_char == "u": use_l10n = False arg = arg[:-1] or -1 try: input_val = repr(text) d = Decimal(input_val) except InvalidOperation: try: d = Decimal(str(float(text))) except (ValueError, InvalidOperation, TypeError): return "" try: p = int(arg) except ValueError: return input_val try: m = int(d) - d except (ValueError, OverflowError, InvalidOperation): return input_val if not m and p < 0: return mark_safe( formats.number_format( "%d" % (int(d)), 0, use_l10n=use_l10n, force_grouping=force_grouping, ) ) exp = Decimal(1).scaleb(-abs(p)) # Set the precision high enough to avoid an exception (#15789). tupl = d.as_tuple() units = len(tupl[1]) units += -tupl[2] if m else tupl[2] prec = abs(p) + units + 1 # Avoid conversion to scientific notation by accessing `sign`, `digits`, # and `exponent` from Decimal.as_tuple() directly. rounded_d = d.quantize(exp, ROUND_HALF_UP, Context(prec=prec)) sign, digits, exponent = rounded_d.as_tuple() digits = [str(digit) for digit in reversed(digits)] while len(digits) <= abs(exponent): digits.append("0") digits.insert(-exponent, ".") if sign and rounded_d: digits.append("-") number = "".join(reversed(digits)) return mark_safe( formats.number_format( number, abs(p), use_l10n=use_l10n, force_grouping=force_grouping, ) ) @register.filter(is_safe=True) @stringfilter def iriencode(value): """Escape an IRI value for use in a URL.""" return iri_to_uri(value) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def linenumbers(value, autoescape=True): """Display text with line numbers.""" lines = value.split("\n") # Find the maximum width of the line count, for use with zero padding # string format command width = str(len(str(len(lines)))) if not autoescape or isinstance(value, SafeData): for i, line in enumerate(lines): lines[i] = ("%0" + width + "d. %s") % (i + 1, line) else: for i, line in enumerate(lines): lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line)) return mark_safe("\n".join(lines)) @register.filter(is_safe=True) @stringfilter def lower(value): """Convert a string into all lowercase.""" return value.lower() @register.filter(is_safe=False) @stringfilter def make_list(value): """ Return the value turned into a list. For an integer, it's a list of digits. For a string, it's a list of characters. """ return list(value) @register.filter(is_safe=True) @stringfilter def slugify(value): """ Convert to ASCII. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace. """ return _slugify(value) @register.filter(is_safe=True) def stringformat(value, arg): """ Format the variable according to the arg, a string formatting specifier. This specifier uses Python string formatting syntax, with the exception that the leading "%" is dropped. See https://docs.python.org/library/stdtypes.html#printf-style-string-formatting for documentation of Python string formatting. """ if isinstance(value, tuple): value = str(value) try: return ("%" + str(arg)) % value except (ValueError, TypeError): return "" @register.filter(is_safe=True) @stringfilter def title(value): """Convert a string into titlecase.""" t = re.sub("([a-z])'([A-Z])", lambda m: m[0].lower(), value.title()) return re.sub(r"\d([A-Z])", lambda m: m[0].lower(), t) @register.filter(is_safe=True) @stringfilter def truncatechars(value, arg): """Truncate a string after `arg` number of characters.""" try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. return Truncator(value).chars(length) @register.filter(is_safe=True) @stringfilter def truncatechars_html(value, arg): """ Truncate HTML after `arg` number of chars. Preserve newlines in the HTML. """ try: length = int(arg) except ValueError: # invalid literal for int() return value # Fail silently. return Truncator(value).chars(length, html=True) @register.filter(is_safe=True) @stringfilter def truncatewords(value, arg): """ Truncate a string after `arg` number of words. Remove newlines within the string. """ try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. return Truncator(value).words(length, truncate=" …") @register.filter(is_safe=True) @stringfilter def truncatewords_html(value, arg): """ Truncate HTML after `arg` number of words. Preserve newlines in the HTML. """ try: length = int(arg) except ValueError: # invalid literal for int() return value # Fail silently. return Truncator(value).words(length, html=True, truncate=" …") @register.filter(is_safe=False) @stringfilter def upper(value): """Convert a string into all uppercase.""" return value.upper() @register.filter(is_safe=False) @stringfilter def urlencode(value, safe=None): """ Escape a value for use in a URL. The ``safe`` parameter determines the characters which should not be escaped by Python's quote() function. If not provided, use the default safe characters (but an empty string can be provided when *all* characters should be escaped). """ kwargs = {} if safe is not None: kwargs["safe"] = safe return quote(value, **kwargs) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def urlize(value, autoescape=True): """Convert URLs in plain text into clickable links.""" return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape)) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def urlizetrunc(value, limit, autoescape=True): """ Convert URLs into clickable links, truncating URLs to the given character limit, and adding 'rel=nofollow' attribute to discourage spamming. Argument: Length to truncate URLs to. """ return mark_safe( _urlize(value, trim_url_limit=int(limit), nofollow=True, autoescape=autoescape) ) @register.filter(is_safe=False) @stringfilter def wordcount(value): """Return the number of words.""" return len(value.split()) @register.filter(is_safe=True) @stringfilter def wordwrap(value, arg): """Wrap words at `arg` line length.""" return wrap(value, int(arg)) @register.filter(is_safe=True) @stringfilter def ljust(value, arg): """Left-align the value in a field of a given width.""" return value.ljust(int(arg)) @register.filter(is_safe=True) @stringfilter def rjust(value, arg): """Right-align the value in a field of a given width.""" return value.rjust(int(arg)) @register.filter(is_safe=True) @stringfilter def center(value, arg): """Center the value in a field of a given width.""" return value.center(int(arg)) @register.filter @stringfilter def cut(value, arg): """Remove all values of arg from the given string.""" safe = isinstance(value, SafeData) value = value.replace(arg, "") if safe and arg != ";": return mark_safe(value) return value ################### # HTML STRINGS # ################### @register.filter("escape", is_safe=True) @stringfilter def escape_filter(value): """Mark the value as a string that should be auto-escaped.""" return conditional_escape(value) @register.filter(is_safe=True) @stringfilter def force_escape(value): """ Escape a string's HTML. Return a new string containing the escaped characters (as opposed to "escape", which marks the content for later possible escaping). """ return escape(value) @register.filter("linebreaks", is_safe=True, needs_autoescape=True) @stringfilter def linebreaks_filter(value, autoescape=True): """ Replace line breaks in plain text with appropriate HTML; a single newline becomes an HTML line break (``<br>``) and a new line followed by a blank line becomes a paragraph break (``</p>``). """ autoescape = autoescape and not isinstance(value, SafeData) return mark_safe(linebreaks(value, autoescape)) @register.filter(is_safe=True, needs_autoescape=True) @stringfilter def linebreaksbr(value, autoescape=True): """ Convert all newlines in a piece of plain text to HTML line breaks (``<br>``). """ autoescape = autoescape and not isinstance(value, SafeData) value = normalize_newlines(value) if autoescape: value = escape(value) return mark_safe(value.replace("\n", "<br>")) @register.filter(is_safe=True) @stringfilter def safe(value): """Mark the value as a string that should not be auto-escaped.""" return mark_safe(value) @register.filter(is_safe=True) def safeseq(value): """ A "safe" filter for sequences. Mark each element in the sequence, individually, as safe, after converting them to strings. Return a list with the results. """ return [mark_safe(obj) for obj in value] @register.filter(is_safe=True) @stringfilter def striptags(value): """Strip all [X]HTML tags.""" return strip_tags(value) ################### # LISTS # ################### def _property_resolver(arg): """ When arg is convertible to float, behave like operator.itemgetter(arg) Otherwise, chain __getitem__() and getattr(). >>> _property_resolver(1)('abc') 'b' >>> _property_resolver('1')('abc') Traceback (most recent call last): ... TypeError: string indices must be integers >>> class Foo: ... a = 42 ... b = 3.14 ... c = 'Hey!' >>> _property_resolver('b')(Foo()) 3.14 """ try: float(arg) except ValueError: if VARIABLE_ATTRIBUTE_SEPARATOR + "_" in arg or arg[0] == "_": raise AttributeError("Access to private variables is forbidden.") parts = arg.split(VARIABLE_ATTRIBUTE_SEPARATOR) def resolve(value): for part in parts: try: value = value[part] except (AttributeError, IndexError, KeyError, TypeError, ValueError): value = getattr(value, part) return value return resolve else: return itemgetter(arg) @register.filter(is_safe=False) def dictsort(value, arg): """ Given a list of dicts, return that list sorted by the property given in the argument. """ try: return sorted(value, key=_property_resolver(arg)) except (AttributeError, TypeError): return "" @register.filter(is_safe=False) def dictsortreversed(value, arg): """ Given a list of dicts, return that list sorted in reverse order by the property given in the argument. """ try: return sorted(value, key=_property_resolver(arg), reverse=True) except (AttributeError, TypeError): return "" @register.filter(is_safe=False) def first(value): """Return the first item in a list.""" try: return value[0] except IndexError: return "" @register.filter(is_safe=True, needs_autoescape=True) def join(value, arg, autoescape=True): """Join a list with a string, like Python's ``str.join(list)``.""" try: if autoescape: value = [conditional_escape(v) for v in value] data = conditional_escape(arg).join(value) except TypeError: # Fail silently if arg isn't iterable. return value return mark_safe(data) @register.filter(is_safe=True) def last(value): """Return the last item in a list.""" try: return value[-1] except IndexError: return "" @register.filter(is_safe=False) def length(value): """Return the length of the value - useful for lists.""" try: return len(value) except (ValueError, TypeError): return 0 @register.filter(is_safe=False) def length_is(value, arg): """Return a boolean of whether the value's length is the argument.""" warnings.warn( "The length_is template filter is deprecated in favor of the length template " "filter and the == operator within an {% if %} tag.", RemovedInDjango51Warning, ) try: return len(value) == int(arg) except (ValueError, TypeError): return "" @register.filter(is_safe=True) def random(value): """Return a random item from the list.""" return random_module.choice(value) @register.filter("slice", is_safe=True) def slice_filter(value, arg): """ Return a slice of the list using the same syntax as Python's list slicing. """ try: bits = [] for x in str(arg).split(":"): if not x: bits.append(None) else: bits.append(int(x)) return value[slice(*bits)] except (ValueError, TypeError): return value # Fail silently. @register.filter(is_safe=True, needs_autoescape=True) def unordered_list(value, autoescape=True): """ Recursively take a self-nested list and return an HTML unordered list -- WITHOUT opening and closing <ul> tags. Assume the list is in the proper format. For example, if ``var`` contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``, then ``{{ var|unordered_list }}`` returns:: <li>States <ul> <li>Kansas <ul> <li>Lawrence</li> <li>Topeka</li> </ul> </li> <li>Illinois</li> </ul> </li> """ if autoescape: escaper = conditional_escape else: def escaper(x): return x def walk_items(item_list): item_iterator = iter(item_list) try: item = next(item_iterator) while True: try: next_item = next(item_iterator) except StopIteration: yield item, None break if isinstance(next_item, (list, tuple, types.GeneratorType)): try: iter(next_item) except TypeError: pass else: yield item, next_item item = next(item_iterator) continue yield item, None item = next_item except StopIteration: pass def list_formatter(item_list, tabs=1): indent = "\t" * tabs output = [] for item, children in walk_items(item_list): sublist = "" if children: sublist = "\n%s<ul>\n%s\n%s</ul>\n%s" % ( indent, list_formatter(children, tabs + 1), indent, indent, ) output.append("%s<li>%s%s</li>" % (indent, escaper(item), sublist)) return "\n".join(output) return mark_safe(list_formatter(value)) ################### # INTEGERS # ################### @register.filter(is_safe=False) def add(value, arg): """Add the arg to the value.""" try: return int(value) + int(arg) except (ValueError, TypeError): try: return value + arg except Exception: return "" @register.filter(is_safe=False) def get_digit(value, arg): """ Given a whole number, return the requested digit of it, where 1 is the right-most digit, 2 is the second-right-most digit, etc. Return the original value for invalid input (if input or argument is not an integer, or if argument is less than 1). Otherwise, output is always an integer. """ try: arg = int(arg) value = int(value) except ValueError: return value # Fail silently for an invalid argument if arg < 1: return value try: return int(str(value)[-arg]) except IndexError: return 0 ################### # DATES # ################### @register.filter(expects_localtime=True, is_safe=False) def date(value, arg=None): """Format a date according to the given format.""" if value in (None, ""): return "" try: return formats.date_format(value, arg) except AttributeError: try: return format(value, arg) except AttributeError: return "" @register.filter(expects_localtime=True, is_safe=False) def time(value, arg=None): """Format a time according to the given format.""" if value in (None, ""): return "" try: return formats.time_format(value, arg) except (AttributeError, TypeError): try: return time_format(value, arg) except (AttributeError, TypeError): return "" @register.filter("timesince", is_safe=False) def timesince_filter(value, arg=None): """Format a date as the time since that date (i.e. "4 days, 6 hours").""" if not value: return "" try: if arg: return timesince(value, arg) return timesince(value) except (ValueError, TypeError): return "" @register.filter("timeuntil", is_safe=False) def timeuntil_filter(value, arg=None): """Format a date as the time until that date (i.e. "4 days, 6 hours").""" if not value: return "" try: return timeuntil(value, arg) except (ValueError, TypeError): return "" ################### # LOGIC # ################### @register.filter(is_safe=False) def default(value, arg): """If value is unavailable, use given default.""" return value or arg @register.filter(is_safe=False) def default_if_none(value, arg): """If value is None, use given default.""" if value is None: return arg return value @register.filter(is_safe=False) def divisibleby(value, arg): """Return True if the value is divisible by the argument.""" return int(value) % int(arg) == 0 @register.filter(is_safe=False) def yesno(value, arg=None): """ Given a string mapping values for true, false, and (optionally) None, return one of those strings according to the value: ========== ====================== ================================== Value Argument Outputs ========== ====================== ================================== ``True`` ``"yeah,no,maybe"`` ``yeah`` ``False`` ``"yeah,no,maybe"`` ``no`` ``None`` ``"yeah,no,maybe"`` ``maybe`` ``None`` ``"yeah,no"`` ``"no"`` (converts None to False if no mapping for None is given. ========== ====================== ================================== """ if arg is None: # Translators: Please do not add spaces around commas. arg = gettext("yes,no,maybe") bits = arg.split(",") if len(bits) < 2: return value # Invalid arg. try: yes, no, maybe = bits except ValueError: # Unpack list of wrong size (no "maybe" value provided). yes, no, maybe = bits[0], bits[1], bits[1] if value is None: return maybe if value: return yes return no ################### # MISC # ################### @register.filter(is_safe=True) def filesizeformat(bytes_): """ Format the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102 bytes, etc.). """ try: bytes_ = int(bytes_) except (TypeError, ValueError, UnicodeDecodeError): value = ngettext("%(size)d byte", "%(size)d bytes", 0) % {"size": 0} return avoid_wrapping(value) def filesize_number_format(value): return formats.number_format(round(value, 1), 1) KB = 1 << 10 MB = 1 << 20 GB = 1 << 30 TB = 1 << 40 PB = 1 << 50 negative = bytes_ < 0 if negative: bytes_ = -bytes_ # Allow formatting of negative numbers. if bytes_ < KB: value = ngettext("%(size)d byte", "%(size)d bytes", bytes_) % {"size": bytes_} elif bytes_ < MB: value = gettext("%s KB") % filesize_number_format(bytes_ / KB) elif bytes_ < GB: value = gettext("%s MB") % filesize_number_format(bytes_ / MB) elif bytes_ < TB: value = gettext("%s GB") % filesize_number_format(bytes_ / GB) elif bytes_ < PB: value = gettext("%s TB") % filesize_number_format(bytes_ / TB) else: value = gettext("%s PB") % filesize_number_format(bytes_ / PB) if negative: value = "-%s" % value return avoid_wrapping(value) @register.filter(is_safe=False) def pluralize(value, arg="s"): """ Return a plural suffix if the value is not 1, '1', or an object of length 1. By default, use 's' as the suffix: * If value is 0, vote{{ value|pluralize }} display "votes". * If value is 1, vote{{ value|pluralize }} display "vote". * If value is 2, vote{{ value|pluralize }} display "votes". If an argument is provided, use that string instead: * If value is 0, class{{ value|pluralize:"es" }} display "classes". * If value is 1, class{{ value|pluralize:"es" }} display "class". * If value is 2, class{{ value|pluralize:"es" }} display "classes". If the provided argument contains a comma, use the text before the comma for the singular case and the text after the comma for the plural case: * If value is 0, cand{{ value|pluralize:"y,ies" }} display "candies". * If value is 1, cand{{ value|pluralize:"y,ies" }} display "candy". * If value is 2, cand{{ value|pluralize:"y,ies" }} display "candies". """ if "," not in arg: arg = "," + arg bits = arg.split(",") if len(bits) > 2: return "" singular_suffix, plural_suffix = bits[:2] try: return singular_suffix if float(value) == 1 else plural_suffix except ValueError: # Invalid string that's not a number. pass except TypeError: # Value isn't a string or a number; maybe it's a list? try: return singular_suffix if len(value) == 1 else plural_suffix except TypeError: # len() of unsized object. pass return "" @register.filter("phone2numeric", is_safe=True) def phone2numeric_filter(value): """Take a phone number and converts it in to its numerical equivalent.""" return phone2numeric(value) @register.filter(is_safe=True) def pprint(value): """A wrapper around pprint.pprint -- for debugging, really.""" try: return pformat(value) except Exception as e: return "Error in formatting: %s: %s" % (e.__class__.__name__, e)
e34c67d1d8f6451da791f459fc67ea733618eee1ef41be26624cdcfe96b9d970
from django.conf import settings from django.conf.urls.i18n import is_language_prefix_patterns_used from django.http import HttpResponseRedirect from django.urls import get_script_prefix, is_valid_path from django.utils import translation from django.utils.cache import patch_vary_headers from django.utils.deprecation import MiddlewareMixin class LocaleMiddleware(MiddlewareMixin): """ Parse a request and decide what translation object to install in the current thread context. This allows pages to be dynamically translated to the language the user desires (if the language is available). """ response_redirect_class = HttpResponseRedirect def get_fallback_language(self, request): """ Return the fallback language for the current request based on the settings. If LANGUAGE_CODE is a variant not included in the supported languages, get_fallback_language() will try to fallback to a supported generic variant. Can be overridden to have a fallback language depending on the request, e.g. based on top level domain. """ try: return translation.get_supported_language_variant(settings.LANGUAGE_CODE) except LookupError: return settings.LANGUAGE_CODE def process_request(self, request): urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF) i18n_patterns_used, _ = is_language_prefix_patterns_used(urlconf) language = translation.get_language_from_request( request, check_path=i18n_patterns_used ) if not language: language = self.get_fallback_language(request) translation.activate(language) request.LANGUAGE_CODE = translation.get_language() def process_response(self, request, response): language = translation.get_language() language_from_path = translation.get_language_from_path(request.path_info) language_from_request = translation.get_language_from_request(request) urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF) ( i18n_patterns_used, prefixed_default_language, ) = is_language_prefix_patterns_used(urlconf) if ( response.status_code == 404 and not language_from_path and i18n_patterns_used and (prefixed_default_language or language_from_request) ): # Maybe the language code is missing in the URL? Try adding the # language prefix and redirecting to that URL. language_path = "/%s%s" % (language, request.path_info) path_valid = is_valid_path(language_path, urlconf) path_needs_slash = not path_valid and ( settings.APPEND_SLASH and not language_path.endswith("/") and is_valid_path("%s/" % language_path, urlconf) ) if path_valid or path_needs_slash: script_prefix = get_script_prefix() # Insert language after the script prefix and before the # rest of the URL language_url = request.get_full_path( force_append_slash=path_needs_slash ).replace(script_prefix, "%s%s/" % (script_prefix, language), 1) # Redirect to the language-specific URL as detected by # get_language_from_request(). HTTP caches may cache this # redirect, so add the Vary header. redirect = self.response_redirect_class(language_url) patch_vary_headers(redirect, ("Accept-Language", "Cookie")) return redirect if not (i18n_patterns_used and language_from_path): patch_vary_headers(response, ("Accept-Language",)) response.headers.setdefault("Content-Language", language) return response
53d3b90c5fc483c486d5dbdc9ffa058a653e38d6972a81dd0ec5e5d1962d8c53
from contextlib import ContextDecorator, contextmanager from django.db import ( DEFAULT_DB_ALIAS, DatabaseError, Error, ProgrammingError, connections, ) class TransactionManagementError(ProgrammingError): """Transaction management is used improperly.""" pass def get_connection(using=None): """ Get a database connection by name, or the default database connection if no name is provided. This is a private API. """ if using is None: using = DEFAULT_DB_ALIAS return connections[using] def get_autocommit(using=None): """Get the autocommit status of the connection.""" return get_connection(using).get_autocommit() def set_autocommit(autocommit, using=None): """Set the autocommit status of the connection.""" return get_connection(using).set_autocommit(autocommit) def commit(using=None): """Commit a transaction.""" get_connection(using).commit() def rollback(using=None): """Roll back a transaction.""" get_connection(using).rollback() def savepoint(using=None): """ Create a savepoint (if supported and required by the backend) inside the current transaction. Return an identifier for the savepoint that will be used for the subsequent rollback or commit. """ return get_connection(using).savepoint() def savepoint_rollback(sid, using=None): """ Roll back the most recent savepoint (if one exists). Do nothing if savepoints are not supported. """ get_connection(using).savepoint_rollback(sid) def savepoint_commit(sid, using=None): """ Commit the most recent savepoint (if one exists). Do nothing if savepoints are not supported. """ get_connection(using).savepoint_commit(sid) def clean_savepoints(using=None): """ Reset the counter used to generate unique savepoint ids in this thread. """ get_connection(using).clean_savepoints() def get_rollback(using=None): """Get the "needs rollback" flag -- for *advanced use* only.""" return get_connection(using).get_rollback() def set_rollback(rollback, using=None): """ Set or unset the "needs rollback" flag -- for *advanced use* only. When `rollback` is `True`, trigger a rollback when exiting the innermost enclosing atomic block that has `savepoint=True` (that's the default). Use this to force a rollback without raising an exception. When `rollback` is `False`, prevent such a rollback. Use this only after rolling back to a known-good state! Otherwise, you break the atomic block and data corruption may occur. """ return get_connection(using).set_rollback(rollback) @contextmanager def mark_for_rollback_on_error(using=None): """ Internal low-level utility to mark a transaction as "needs rollback" when an exception is raised while not enforcing the enclosed block to be in a transaction. This is needed by Model.save() and friends to avoid starting a transaction when in autocommit mode and a single query is executed. It's equivalent to: connection = get_connection(using) if connection.get_autocommit(): yield else: with transaction.atomic(using=using, savepoint=False): yield but it uses low-level utilities to avoid performance overhead. """ try: yield except Exception as exc: connection = get_connection(using) if connection.in_atomic_block: connection.needs_rollback = True connection.rollback_exc = exc raise def on_commit(func, using=None, robust=False): """ Register `func` to be called when the current transaction is committed. If the current transaction is rolled back, `func` will not be called. """ get_connection(using).on_commit(func, robust) ################################# # Decorators / context managers # ################################# class Atomic(ContextDecorator): """ Guarantee the atomic execution of a given block. An instance can be used either as a decorator or as a context manager. When it's used as a decorator, __call__ wraps the execution of the decorated function in the instance itself, used as a context manager. When it's used as a context manager, __enter__ creates a transaction or a savepoint, depending on whether a transaction is already in progress, and __exit__ commits the transaction or releases the savepoint on normal exit, and rolls back the transaction or to the savepoint on exceptions. It's possible to disable the creation of savepoints if the goal is to ensure that some code runs within a transaction without creating overhead. A stack of savepoints identifiers is maintained as an attribute of the connection. None denotes the absence of a savepoint. This allows reentrancy even if the same AtomicWrapper is reused. For example, it's possible to define `oa = atomic('other')` and use `@oa` or `with oa:` multiple times. Since database connections are thread-local, this is thread-safe. An atomic block can be tagged as durable. In this case, raise a RuntimeError if it's nested within another atomic block. This guarantees that database changes in a durable block are committed to the database when the block exists without error. This is a private API. """ def __init__(self, using, savepoint, durable): self.using = using self.savepoint = savepoint self.durable = durable self._from_testcase = False def __enter__(self): connection = get_connection(self.using) if ( self.durable and connection.atomic_blocks and not connection.atomic_blocks[-1]._from_testcase ): raise RuntimeError( "A durable atomic block cannot be nested within another " "atomic block." ) if not connection.in_atomic_block: # Reset state when entering an outermost atomic block. connection.commit_on_exit = True connection.needs_rollback = False if not connection.get_autocommit(): # Pretend we're already in an atomic block to bypass the code # that disables autocommit to enter a transaction, and make a # note to deal with this case in __exit__. connection.in_atomic_block = True connection.commit_on_exit = False if connection.in_atomic_block: # We're already in a transaction; create a savepoint, unless we # were told not to or we're already waiting for a rollback. The # second condition avoids creating useless savepoints and prevents # overwriting needs_rollback until the rollback is performed. if self.savepoint and not connection.needs_rollback: sid = connection.savepoint() connection.savepoint_ids.append(sid) else: connection.savepoint_ids.append(None) else: connection.set_autocommit( False, force_begin_transaction_with_broken_autocommit=True ) connection.in_atomic_block = True if connection.in_atomic_block: connection.atomic_blocks.append(self) def __exit__(self, exc_type, exc_value, traceback): connection = get_connection(self.using) if connection.in_atomic_block: connection.atomic_blocks.pop() if connection.savepoint_ids: sid = connection.savepoint_ids.pop() else: # Prematurely unset this flag to allow using commit or rollback. connection.in_atomic_block = False try: if connection.closed_in_transaction: # The database will perform a rollback by itself. # Wait until we exit the outermost block. pass elif exc_type is None and not connection.needs_rollback: if connection.in_atomic_block: # Release savepoint if there is one if sid is not None: try: connection.savepoint_commit(sid) except DatabaseError: try: connection.savepoint_rollback(sid) # The savepoint won't be reused. Release it to # minimize overhead for the database server. connection.savepoint_commit(sid) except Error: # If rolling back to a savepoint fails, mark for # rollback at a higher level and avoid shadowing # the original exception. connection.needs_rollback = True raise else: # Commit transaction try: connection.commit() except DatabaseError: try: connection.rollback() except Error: # An error during rollback means that something # went wrong with the connection. Drop it. connection.close() raise else: # This flag will be set to True again if there isn't a savepoint # allowing to perform the rollback at this level. connection.needs_rollback = False if connection.in_atomic_block: # Roll back to savepoint if there is one, mark for rollback # otherwise. if sid is None: connection.needs_rollback = True else: try: connection.savepoint_rollback(sid) # The savepoint won't be reused. Release it to # minimize overhead for the database server. connection.savepoint_commit(sid) except Error: # If rolling back to a savepoint fails, mark for # rollback at a higher level and avoid shadowing # the original exception. connection.needs_rollback = True else: # Roll back transaction try: connection.rollback() except Error: # An error during rollback means that something # went wrong with the connection. Drop it. connection.close() finally: # Outermost block exit when autocommit was enabled. if not connection.in_atomic_block: if connection.closed_in_transaction: connection.connection = None else: connection.set_autocommit(True) # Outermost block exit when autocommit was disabled. elif not connection.savepoint_ids and not connection.commit_on_exit: if connection.closed_in_transaction: connection.connection = None else: connection.in_atomic_block = False def atomic(using=None, savepoint=True, durable=False): # Bare decorator: @atomic -- although the first argument is called # `using`, it's actually the function being decorated. if callable(using): return Atomic(DEFAULT_DB_ALIAS, savepoint, durable)(using) # Decorator: @atomic(...) or context manager: with atomic(...): ... else: return Atomic(using, savepoint, durable) def _non_atomic_requests(view, using): try: view._non_atomic_requests.add(using) except AttributeError: view._non_atomic_requests = {using} return view def non_atomic_requests(using=None): if callable(using): return _non_atomic_requests(using, DEFAULT_DB_ALIAS) else: if using is None: using = DEFAULT_DB_ALIAS return lambda view: _non_atomic_requests(view, using)
23a0fb3ce6046fd9616b8fdff8c59bc1681639c839f1faff39dc68ff3c07d819
""" This module converts requested URLs to callback view functions. URLResolver is the main class here. Its resolve() method takes a URL (as a string) and returns a ResolverMatch object which provides access to all attributes of the resolved URL match. """ import functools import inspect import re import string from importlib import import_module from pickle import PicklingError from urllib.parse import quote from asgiref.local import Local from django.conf import settings from django.core.checks import Error, Warning from django.core.checks.urls import check_resolver from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.utils.datastructures import MultiValueDict from django.utils.functional import cached_property from django.utils.http import RFC3986_SUBDELIMS, escape_leading_slashes from django.utils.regex_helper import _lazy_re_compile, normalize from django.utils.translation import get_language from .converters import get_converter from .exceptions import NoReverseMatch, Resolver404 from .utils import get_callable class ResolverMatch: def __init__( self, func, args, kwargs, url_name=None, app_names=None, namespaces=None, route=None, tried=None, captured_kwargs=None, extra_kwargs=None, ): self.func = func self.args = args self.kwargs = kwargs self.url_name = url_name self.route = route self.tried = tried self.captured_kwargs = captured_kwargs self.extra_kwargs = extra_kwargs # If a URLRegexResolver doesn't have a namespace or app_name, it passes # in an empty value. self.app_names = [x for x in app_names if x] if app_names else [] self.app_name = ":".join(self.app_names) self.namespaces = [x for x in namespaces if x] if namespaces else [] self.namespace = ":".join(self.namespaces) if hasattr(func, "view_class"): func = func.view_class if not hasattr(func, "__name__"): # A class-based view self._func_path = func.__class__.__module__ + "." + func.__class__.__name__ else: # A function-based view self._func_path = func.__module__ + "." + func.__name__ view_path = url_name or self._func_path self.view_name = ":".join(self.namespaces + [view_path]) def __getitem__(self, index): return (self.func, self.args, self.kwargs)[index] def __repr__(self): if isinstance(self.func, functools.partial): func = repr(self.func) else: func = self._func_path return ( "ResolverMatch(func=%s, args=%r, kwargs=%r, url_name=%r, " "app_names=%r, namespaces=%r, route=%r%s%s)" % ( func, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces, self.route, f", captured_kwargs={self.captured_kwargs!r}" if self.captured_kwargs else "", f", extra_kwargs={self.extra_kwargs!r}" if self.extra_kwargs else "", ) ) def __reduce_ex__(self, protocol): raise PicklingError(f"Cannot pickle {self.__class__.__qualname__}.") def get_resolver(urlconf=None): if urlconf is None: urlconf = settings.ROOT_URLCONF return _get_cached_resolver(urlconf) @functools.lru_cache(maxsize=None) def _get_cached_resolver(urlconf=None): return URLResolver(RegexPattern(r"^/"), urlconf) @functools.lru_cache(maxsize=None) def get_ns_resolver(ns_pattern, resolver, converters): # Build a namespaced resolver for the given parent URLconf pattern. # This makes it possible to have captured parameters in the parent # URLconf pattern. pattern = RegexPattern(ns_pattern) pattern.converters = dict(converters) ns_resolver = URLResolver(pattern, resolver.url_patterns) return URLResolver(RegexPattern(r"^/"), [ns_resolver]) class LocaleRegexDescriptor: def __init__(self, attr): self.attr = attr def __get__(self, instance, cls=None): """ Return a compiled regular expression based on the active language. """ if instance is None: return self # As a performance optimization, if the given regex string is a regular # string (not a lazily-translated string proxy), compile it once and # avoid per-language compilation. pattern = getattr(instance, self.attr) if isinstance(pattern, str): instance.__dict__["regex"] = instance._compile(pattern) return instance.__dict__["regex"] language_code = get_language() if language_code not in instance._regex_dict: instance._regex_dict[language_code] = instance._compile(str(pattern)) return instance._regex_dict[language_code] class CheckURLMixin: def describe(self): """ Format the URL pattern for display in warning messages. """ description = "'{}'".format(self) if self.name: description += " [name='{}']".format(self.name) return description def _check_pattern_startswith_slash(self): """ Check that the pattern does not begin with a forward slash. """ regex_pattern = self.regex.pattern if not settings.APPEND_SLASH: # Skip check as it can be useful to start a URL pattern with a slash # when APPEND_SLASH=False. return [] if regex_pattern.startswith(("/", "^/", "^\\/")) and not regex_pattern.endswith( "/" ): warning = Warning( "Your URL pattern {} has a route beginning with a '/'. Remove this " "slash as it is unnecessary. If this pattern is targeted in an " "include(), ensure the include() pattern has a trailing '/'.".format( self.describe() ), id="urls.W002", ) return [warning] else: return [] class RegexPattern(CheckURLMixin): regex = LocaleRegexDescriptor("_regex") def __init__(self, regex, name=None, is_endpoint=False): self._regex = regex self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = {} def match(self, path): match = ( self.regex.fullmatch(path) if self._is_endpoint and self.regex.pattern.endswith("$") else self.regex.search(path) ) if match: # If there are any named groups, use those as kwargs, ignoring # non-named groups. Otherwise, pass all non-named arguments as # positional arguments. kwargs = match.groupdict() args = () if kwargs else match.groups() kwargs = {k: v for k, v in kwargs.items() if v is not None} return path[match.end() :], args, kwargs return None def check(self): warnings = [] warnings.extend(self._check_pattern_startswith_slash()) if not self._is_endpoint: warnings.extend(self._check_include_trailing_dollar()) return warnings def _check_include_trailing_dollar(self): regex_pattern = self.regex.pattern if regex_pattern.endswith("$") and not regex_pattern.endswith(r"\$"): return [ Warning( "Your URL pattern {} uses include with a route ending with a '$'. " "Remove the dollar from the route to avoid problems including " "URLs.".format(self.describe()), id="urls.W001", ) ] else: return [] def _compile(self, regex): """Compile and return the given regular expression.""" try: return re.compile(regex) except re.error as e: raise ImproperlyConfigured( '"%s" is not a valid regular expression: %s' % (regex, e) ) from e def __str__(self): return str(self._regex) _PATH_PARAMETER_COMPONENT_RE = _lazy_re_compile( r"<(?:(?P<converter>[^>:]+):)?(?P<parameter>[^>]+)>" ) def _route_to_regex(route, is_endpoint=False): """ Convert a path pattern into a regular expression. Return the regular expression and a dictionary mapping the capture names to the converters. For example, 'foo/<int:pk>' returns '^foo\\/(?P<pk>[0-9]+)' and {'pk': <django.urls.converters.IntConverter>}. """ original_route = route parts = ["^"] converters = {} while True: match = _PATH_PARAMETER_COMPONENT_RE.search(route) if not match: parts.append(re.escape(route)) break elif not set(match.group()).isdisjoint(string.whitespace): raise ImproperlyConfigured( "URL route '%s' cannot contain whitespace in angle brackets " "<…>." % original_route ) parts.append(re.escape(route[: match.start()])) route = route[match.end() :] parameter = match["parameter"] if not parameter.isidentifier(): raise ImproperlyConfigured( "URL route '%s' uses parameter name %r which isn't a valid " "Python identifier." % (original_route, parameter) ) raw_converter = match["converter"] if raw_converter is None: # If a converter isn't specified, the default is `str`. raw_converter = "str" try: converter = get_converter(raw_converter) except KeyError as e: raise ImproperlyConfigured( "URL route %r uses invalid converter %r." % (original_route, raw_converter) ) from e converters[parameter] = converter parts.append("(?P<" + parameter + ">" + converter.regex + ")") if is_endpoint: parts.append(r"\Z") return "".join(parts), converters class RoutePattern(CheckURLMixin): regex = LocaleRegexDescriptor("_route") def __init__(self, route, name=None, is_endpoint=False): self._route = route self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = _route_to_regex(str(route), is_endpoint)[1] def match(self, path): match = self.regex.search(path) if match: # RoutePattern doesn't allow non-named groups so args are ignored. kwargs = match.groupdict() for key, value in kwargs.items(): converter = self.converters[key] try: kwargs[key] = converter.to_python(value) except ValueError: return None return path[match.end() :], (), kwargs return None def check(self): warnings = self._check_pattern_startswith_slash() route = self._route if "(?P<" in route or route.startswith("^") or route.endswith("$"): warnings.append( Warning( "Your URL pattern {} has a route that contains '(?P<', begins " "with a '^', or ends with a '$'. This was likely an oversight " "when migrating to django.urls.path().".format(self.describe()), id="2_0.W001", ) ) return warnings def _compile(self, route): return re.compile(_route_to_regex(route, self._is_endpoint)[0]) def __str__(self): return str(self._route) class LocalePrefixPattern: def __init__(self, prefix_default_language=True): self.prefix_default_language = prefix_default_language self.converters = {} @property def regex(self): # This is only used by reverse() and cached in _reverse_dict. return re.compile(re.escape(self.language_prefix)) @property def language_prefix(self): language_code = get_language() or settings.LANGUAGE_CODE if language_code == settings.LANGUAGE_CODE and not self.prefix_default_language: return "" else: return "%s/" % language_code def match(self, path): language_prefix = self.language_prefix if path.startswith(language_prefix): return path[len(language_prefix) :], (), {} return None def check(self): return [] def describe(self): return "'{}'".format(self) def __str__(self): return self.language_prefix class URLPattern: def __init__(self, pattern, callback, default_args=None, name=None): self.pattern = pattern self.callback = callback # the view self.default_args = default_args or {} self.name = name def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.pattern.describe()) def check(self): warnings = self._check_pattern_name() warnings.extend(self.pattern.check()) warnings.extend(self._check_callback()) return warnings def _check_pattern_name(self): """ Check that the pattern name does not contain a colon. """ if self.pattern.name is not None and ":" in self.pattern.name: warning = Warning( "Your URL pattern {} has a name including a ':'. Remove the colon, to " "avoid ambiguous namespace references.".format(self.pattern.describe()), id="urls.W003", ) return [warning] else: return [] def _check_callback(self): from django.views import View view = self.callback if inspect.isclass(view) and issubclass(view, View): return [ Error( "Your URL pattern %s has an invalid view, pass %s.as_view() " "instead of %s." % ( self.pattern.describe(), view.__name__, view.__name__, ), id="urls.E009", ) ] return [] def resolve(self, path): match = self.pattern.match(path) if match: new_path, args, captured_kwargs = match # Pass any default args as **kwargs. kwargs = {**captured_kwargs, **self.default_args} return ResolverMatch( self.callback, args, kwargs, self.pattern.name, route=str(self.pattern), captured_kwargs=captured_kwargs, extra_kwargs=self.default_args, ) @cached_property def lookup_str(self): """ A string that identifies the view (e.g. 'path.to.view_function' or 'path.to.ClassBasedView'). """ callback = self.callback if isinstance(callback, functools.partial): callback = callback.func if hasattr(callback, "view_class"): callback = callback.view_class elif not hasattr(callback, "__name__"): return callback.__module__ + "." + callback.__class__.__name__ return callback.__module__ + "." + callback.__qualname__ class URLResolver: def __init__( self, pattern, urlconf_name, default_kwargs=None, app_name=None, namespace=None ): self.pattern = pattern # urlconf_name is the dotted Python path to the module defining # urlpatterns. It may also be an object with an urlpatterns attribute # or urlpatterns itself. self.urlconf_name = urlconf_name self.callback = None self.default_kwargs = default_kwargs or {} self.namespace = namespace self.app_name = app_name self._reverse_dict = {} self._namespace_dict = {} self._app_dict = {} # set of dotted paths to all functions and classes that are used in # urlpatterns self._callback_strs = set() self._populated = False self._local = Local() def __repr__(self): if isinstance(self.urlconf_name, list) and self.urlconf_name: # Don't bother to output the whole list, it can be huge urlconf_repr = "<%s list>" % self.urlconf_name[0].__class__.__name__ else: urlconf_repr = repr(self.urlconf_name) return "<%s %s (%s:%s) %s>" % ( self.__class__.__name__, urlconf_repr, self.app_name, self.namespace, self.pattern.describe(), ) def check(self): messages = [] for pattern in self.url_patterns: messages.extend(check_resolver(pattern)) messages.extend(self._check_custom_error_handlers()) return messages or self.pattern.check() def _check_custom_error_handlers(self): messages = [] # All handlers take (request, exception) arguments except handler500 # which takes (request). for status_code, num_parameters in [(400, 2), (403, 2), (404, 2), (500, 1)]: try: handler = self.resolve_error_handler(status_code) except (ImportError, ViewDoesNotExist) as e: path = getattr(self.urlconf_module, "handler%s" % status_code) msg = ( "The custom handler{status_code} view '{path}' could not be " "imported." ).format(status_code=status_code, path=path) messages.append(Error(msg, hint=str(e), id="urls.E008")) continue signature = inspect.signature(handler) args = [None] * num_parameters try: signature.bind(*args) except TypeError: msg = ( "The custom handler{status_code} view '{path}' does not " "take the correct number of arguments ({args})." ).format( status_code=status_code, path=handler.__module__ + "." + handler.__qualname__, args="request, exception" if num_parameters == 2 else "request", ) messages.append(Error(msg, id="urls.E007")) return messages def _populate(self): # Short-circuit if called recursively in this thread to prevent # infinite recursion. Concurrent threads may call this at the same # time and will need to continue, so set 'populating' on a # thread-local variable. if getattr(self._local, "populating", False): return try: self._local.populating = True lookups = MultiValueDict() namespaces = {} apps = {} language_code = get_language() for url_pattern in reversed(self.url_patterns): p_pattern = url_pattern.pattern.regex.pattern if p_pattern.startswith("^"): p_pattern = p_pattern[1:] if isinstance(url_pattern, URLPattern): self._callback_strs.add(url_pattern.lookup_str) bits = normalize(url_pattern.pattern.regex.pattern) lookups.appendlist( url_pattern.callback, ( bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters, ), ) if url_pattern.name is not None: lookups.appendlist( url_pattern.name, ( bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters, ), ) else: # url_pattern is a URLResolver. url_pattern._populate() if url_pattern.app_name: apps.setdefault(url_pattern.app_name, []).append( url_pattern.namespace ) namespaces[url_pattern.namespace] = (p_pattern, url_pattern) else: for name in url_pattern.reverse_dict: for ( matches, pat, defaults, converters, ) in url_pattern.reverse_dict.getlist(name): new_matches = normalize(p_pattern + pat) lookups.appendlist( name, ( new_matches, p_pattern + pat, {**defaults, **url_pattern.default_kwargs}, { **self.pattern.converters, **url_pattern.pattern.converters, **converters, }, ), ) for namespace, ( prefix, sub_pattern, ) in url_pattern.namespace_dict.items(): current_converters = url_pattern.pattern.converters sub_pattern.pattern.converters.update(current_converters) namespaces[namespace] = (p_pattern + prefix, sub_pattern) for app_name, namespace_list in url_pattern.app_dict.items(): apps.setdefault(app_name, []).extend(namespace_list) self._callback_strs.update(url_pattern._callback_strs) self._namespace_dict[language_code] = namespaces self._app_dict[language_code] = apps self._reverse_dict[language_code] = lookups self._populated = True finally: self._local.populating = False @property def reverse_dict(self): language_code = get_language() if language_code not in self._reverse_dict: self._populate() return self._reverse_dict[language_code] @property def namespace_dict(self): language_code = get_language() if language_code not in self._namespace_dict: self._populate() return self._namespace_dict[language_code] @property def app_dict(self): language_code = get_language() if language_code not in self._app_dict: self._populate() return self._app_dict[language_code] @staticmethod def _extend_tried(tried, pattern, sub_tried=None): if sub_tried is None: tried.append([pattern]) else: tried.extend([pattern, *t] for t in sub_tried) @staticmethod def _join_route(route1, route2): """Join two routes, without the starting ^ in the second route.""" if not route1: return route2 if route2.startswith("^"): route2 = route2[1:] return route1 + route2 def _is_callback(self, name): if not self._populated: self._populate() return name in self._callback_strs def resolve(self, path): path = str(path) # path may be a reverse_lazy object tried = [] match = self.pattern.match(path) if match: new_path, args, kwargs = match for pattern in self.url_patterns: try: sub_match = pattern.resolve(new_path) except Resolver404 as e: self._extend_tried(tried, pattern, e.args[0].get("tried")) else: if sub_match: # Merge captured arguments in match with submatch sub_match_dict = {**kwargs, **self.default_kwargs} # Update the sub_match_dict with the kwargs from the sub_match. sub_match_dict.update(sub_match.kwargs) # If there are *any* named groups, ignore all non-named groups. # Otherwise, pass all non-named arguments as positional # arguments. sub_match_args = sub_match.args if not sub_match_dict: sub_match_args = args + sub_match.args current_route = ( "" if isinstance(pattern, URLPattern) else str(pattern.pattern) ) self._extend_tried(tried, pattern, sub_match.tried) return ResolverMatch( sub_match.func, sub_match_args, sub_match_dict, sub_match.url_name, [self.app_name] + sub_match.app_names, [self.namespace] + sub_match.namespaces, self._join_route(current_route, sub_match.route), tried, captured_kwargs=sub_match.captured_kwargs, extra_kwargs={ **self.default_kwargs, **sub_match.extra_kwargs, }, ) tried.append([pattern]) raise Resolver404({"tried": tried, "path": new_path}) raise Resolver404({"path": path}) @cached_property def urlconf_module(self): if isinstance(self.urlconf_name, str): return import_module(self.urlconf_name) else: return self.urlconf_name @cached_property def url_patterns(self): # urlconf_module might be a valid set of patterns, so we default to it patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module) try: iter(patterns) except TypeError as e: msg = ( "The included URLconf '{name}' does not appear to have " "any patterns in it. If you see the 'urlpatterns' variable " "with valid patterns in the file then the issue is probably " "caused by a circular import." ) raise ImproperlyConfigured(msg.format(name=self.urlconf_name)) from e return patterns def resolve_error_handler(self, view_type): callback = getattr(self.urlconf_module, "handler%s" % view_type, None) if not callback: # No handler specified in file; use lazy import, since # django.conf.urls imports this file. from django.conf import urls callback = getattr(urls, "handler%s" % view_type) return get_callable(callback) def reverse(self, lookup_view, *args, **kwargs): return self._reverse_with_prefix(lookup_view, "", *args, **kwargs) def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs): if args and kwargs: raise ValueError("Don't mix *args and **kwargs in call to reverse()!") if not self._populated: self._populate() possibilities = self.reverse_dict.getlist(lookup_view) for possibility, pattern, defaults, converters in possibilities: for result, params in possibility: if args: if len(args) != len(params): continue candidate_subs = dict(zip(params, args)) else: if set(kwargs).symmetric_difference(params).difference(defaults): continue matches = True for k, v in defaults.items(): if k in params: continue if kwargs.get(k, v) != v: matches = False break if not matches: continue candidate_subs = kwargs # Convert the candidate subs to text using Converter.to_url(). text_candidate_subs = {} match = True for k, v in candidate_subs.items(): if k in converters: try: text_candidate_subs[k] = converters[k].to_url(v) except ValueError: match = False break else: text_candidate_subs[k] = str(v) if not match: continue # WSGI provides decoded URLs, without %xx escapes, and the URL # resolver operates on such URLs. First substitute arguments # without quoting to build a decoded URL and look for a match. # Then, if we have a match, redo the substitution with quoted # arguments in order to return a properly encoded URL. candidate_pat = _prefix.replace("%", "%%") + result if re.search( "^%s%s" % (re.escape(_prefix), pattern), candidate_pat % text_candidate_subs, ): # safe characters from `pchar` definition of RFC 3986 url = quote( candidate_pat % text_candidate_subs, safe=RFC3986_SUBDELIMS + "/~:@", ) # Don't allow construction of scheme relative urls. return escape_leading_slashes(url) # lookup_view can be URL name or callable, but callables are not # friendly in error messages. m = getattr(lookup_view, "__module__", None) n = getattr(lookup_view, "__name__", None) if m is not None and n is not None: lookup_view_s = "%s.%s" % (m, n) else: lookup_view_s = lookup_view patterns = [pattern for (_, pattern, _, _) in possibilities] if patterns: if args: arg_msg = "arguments '%s'" % (args,) elif kwargs: arg_msg = "keyword arguments '%s'" % kwargs else: arg_msg = "no arguments" msg = "Reverse for '%s' with %s not found. %d pattern(s) tried: %s" % ( lookup_view_s, arg_msg, len(patterns), patterns, ) else: msg = ( "Reverse for '%(view)s' not found. '%(view)s' is not " "a valid view function or pattern name." % {"view": lookup_view_s} ) raise NoReverseMatch(msg)
520276c784554e153c674adf8c70053ba00f4d62d62f0f8832f59ca30b38eb57
""" Helper functions for creating Form classes from Django models and database field objects. """ from itertools import chain from django.core.exceptions import ( NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError, ) from django.forms.fields import ChoiceField, Field from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass from django.forms.formsets import BaseFormSet, formset_factory from django.forms.utils import ErrorList from django.forms.widgets import ( HiddenInput, MultipleHiddenInput, RadioSelect, SelectMultiple, ) from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext from django.utils.translation import gettext_lazy as _ __all__ = ( "ModelForm", "BaseModelForm", "model_to_dict", "fields_for_model", "ModelChoiceField", "ModelMultipleChoiceField", "ALL_FIELDS", "BaseModelFormSet", "modelformset_factory", "BaseInlineFormSet", "inlineformset_factory", "modelform_factory", ) ALL_FIELDS = "__all__" def construct_instance(form, instance, fields=None, exclude=None): """ Construct and return a model instance from the bound ``form``'s ``cleaned_data``, but do not save the returned instance to the database. """ from django.db import models opts = instance._meta cleaned_data = form.cleaned_data file_field_list = [] for f in opts.fields: if ( not f.editable or isinstance(f, models.AutoField) or f.name not in cleaned_data ): continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue # Leave defaults for fields that aren't in POST data, except for # checkbox inputs because they don't appear in POST data if not checked. if ( f.has_default() and form[f.name].field.widget.value_omitted_from_data( form.data, form.files, form.add_prefix(f.name) ) and cleaned_data.get(f.name) in form[f.name].field.empty_values ): continue # Defer saving file-type fields until after the other fields, so a # callable upload_to can use the values from other fields. if isinstance(f, models.FileField): file_field_list.append(f) else: f.save_form_data(instance, cleaned_data[f.name]) for f in file_field_list: f.save_form_data(instance, cleaned_data[f.name]) return instance # ModelForms ################################################################# def model_to_dict(instance, fields=None, exclude=None): """ Return a dict containing the data in ``instance`` suitable for passing as a Form's ``initial`` keyword argument. ``fields`` is an optional list of field names. If provided, return only the named. ``exclude`` is an optional list of field names. If provided, exclude the named from the returned dict, even if they are listed in the ``fields`` argument. """ opts = instance._meta data = {} for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many): if not getattr(f, "editable", False): continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue data[f.name] = f.value_from_object(instance) return data def apply_limit_choices_to_to_formfield(formfield): """Apply limit_choices_to to the formfield's queryset if needed.""" from django.db.models import Exists, OuterRef, Q if hasattr(formfield, "queryset") and hasattr(formfield, "get_limit_choices_to"): limit_choices_to = formfield.get_limit_choices_to() if limit_choices_to: complex_filter = limit_choices_to if not isinstance(complex_filter, Q): complex_filter = Q(**limit_choices_to) complex_filter &= Q(pk=OuterRef("pk")) # Use Exists() to avoid potential duplicates. formfield.queryset = formfield.queryset.filter( Exists(formfield.queryset.model._base_manager.filter(complex_filter)), ) def fields_for_model( model, fields=None, exclude=None, widgets=None, formfield_callback=None, localized_fields=None, labels=None, help_texts=None, error_messages=None, field_classes=None, *, apply_limit_choices_to=True, ): """ Return a dictionary containing form fields for the given model. ``fields`` is an optional list of field names. If provided, return only the named fields. ``exclude`` is an optional list of field names. If provided, exclude the named fields from the returned fields, even if they are listed in the ``fields`` argument. ``widgets`` is a dictionary of model field names mapped to a widget. ``formfield_callback`` is a callable that takes a model field and returns a form field. ``localized_fields`` is a list of names of fields which should be localized. ``labels`` is a dictionary of model field names mapped to a label. ``help_texts`` is a dictionary of model field names mapped to a help text. ``error_messages`` is a dictionary of model field names mapped to a dictionary of error messages. ``field_classes`` is a dictionary of model field names mapped to a form field class. ``apply_limit_choices_to`` is a boolean indicating if limit_choices_to should be applied to a field's queryset. """ field_dict = {} ignored = [] opts = model._meta # Avoid circular import from django.db.models import Field as ModelField sortable_private_fields = [ f for f in opts.private_fields if isinstance(f, ModelField) ] for f in sorted( chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many) ): if not getattr(f, "editable", False): if ( fields is not None and f.name in fields and (exclude is None or f.name not in exclude) ): raise FieldError( "'%s' cannot be specified for %s model form as it is a " "non-editable field" % (f.name, model.__name__) ) continue if fields is not None and f.name not in fields: continue if exclude and f.name in exclude: continue kwargs = {} if widgets and f.name in widgets: kwargs["widget"] = widgets[f.name] if localized_fields == ALL_FIELDS or ( localized_fields and f.name in localized_fields ): kwargs["localize"] = True if labels and f.name in labels: kwargs["label"] = labels[f.name] if help_texts and f.name in help_texts: kwargs["help_text"] = help_texts[f.name] if error_messages and f.name in error_messages: kwargs["error_messages"] = error_messages[f.name] if field_classes and f.name in field_classes: kwargs["form_class"] = field_classes[f.name] if formfield_callback is None: formfield = f.formfield(**kwargs) elif not callable(formfield_callback): raise TypeError("formfield_callback must be a function or callable") else: formfield = formfield_callback(f, **kwargs) if formfield: if apply_limit_choices_to: apply_limit_choices_to_to_formfield(formfield) field_dict[f.name] = formfield else: ignored.append(f.name) if fields: field_dict = { f: field_dict.get(f) for f in fields if (not exclude or f not in exclude) and f not in ignored } return field_dict class ModelFormOptions: def __init__(self, options=None): self.model = getattr(options, "model", None) self.fields = getattr(options, "fields", None) self.exclude = getattr(options, "exclude", None) self.widgets = getattr(options, "widgets", None) self.localized_fields = getattr(options, "localized_fields", None) self.labels = getattr(options, "labels", None) self.help_texts = getattr(options, "help_texts", None) self.error_messages = getattr(options, "error_messages", None) self.field_classes = getattr(options, "field_classes", None) self.formfield_callback = getattr(options, "formfield_callback", None) class ModelFormMetaclass(DeclarativeFieldsMetaclass): def __new__(mcs, name, bases, attrs): new_class = super().__new__(mcs, name, bases, attrs) if bases == (BaseModelForm,): return new_class opts = new_class._meta = ModelFormOptions(getattr(new_class, "Meta", None)) # We check if a string was passed to `fields` or `exclude`, # which is likely to be a mistake where the user typed ('foo') instead # of ('foo',) for opt in ["fields", "exclude", "localized_fields"]: value = getattr(opts, opt) if isinstance(value, str) and value != ALL_FIELDS: msg = ( "%(model)s.Meta.%(opt)s cannot be a string. " "Did you mean to type: ('%(value)s',)?" % { "model": new_class.__name__, "opt": opt, "value": value, } ) raise TypeError(msg) if opts.model: # If a model is defined, extract form fields from it. if opts.fields is None and opts.exclude is None: raise ImproperlyConfigured( "Creating a ModelForm without either the 'fields' attribute " "or the 'exclude' attribute is prohibited; form %s " "needs updating." % name ) if opts.fields == ALL_FIELDS: # Sentinel for fields_for_model to indicate "get the list of # fields from the model" opts.fields = None fields = fields_for_model( opts.model, opts.fields, opts.exclude, opts.widgets, opts.formfield_callback, opts.localized_fields, opts.labels, opts.help_texts, opts.error_messages, opts.field_classes, # limit_choices_to will be applied during ModelForm.__init__(). apply_limit_choices_to=False, ) # make sure opts.fields doesn't specify an invalid field none_model_fields = {k for k, v in fields.items() if not v} missing_fields = none_model_fields.difference(new_class.declared_fields) if missing_fields: message = "Unknown field(s) (%s) specified for %s" message = message % (", ".join(missing_fields), opts.model.__name__) raise FieldError(message) # Override default model fields with any custom declared ones # (plus, include all the other declared fields). fields.update(new_class.declared_fields) else: fields = new_class.declared_fields new_class.base_fields = fields return new_class class BaseModelForm(BaseForm): def __init__( self, data=None, files=None, auto_id="id_%s", prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, instance=None, use_required_attribute=None, renderer=None, ): opts = self._meta if opts.model is None: raise ValueError("ModelForm has no model class specified.") if instance is None: # if we didn't get an instance, instantiate a new one self.instance = opts.model() object_data = {} else: self.instance = instance object_data = model_to_dict(instance, opts.fields, opts.exclude) # if initial was provided, it should override the values from instance if initial is not None: object_data.update(initial) # self._validate_unique will be set to True by BaseModelForm.clean(). # It is False by default so overriding self.clean() and failing to call # super will stop validate_unique from being called. self._validate_unique = False super().__init__( data, files, auto_id, prefix, object_data, error_class, label_suffix, empty_permitted, use_required_attribute=use_required_attribute, renderer=renderer, ) for formfield in self.fields.values(): apply_limit_choices_to_to_formfield(formfield) def _get_validation_exclusions(self): """ For backwards-compatibility, exclude several types of fields from model validation. See tickets #12507, #12521, #12553. """ exclude = set() # Build up a list of fields that should be excluded from model field # validation and unique checks. for f in self.instance._meta.fields: field = f.name # Exclude fields that aren't on the form. The developer may be # adding these values to the model after form validation. if field not in self.fields: exclude.add(f.name) # Don't perform model validation on fields that were defined # manually on the form and excluded via the ModelForm's Meta # class. See #12901. elif self._meta.fields and field not in self._meta.fields: exclude.add(f.name) elif self._meta.exclude and field in self._meta.exclude: exclude.add(f.name) # Exclude fields that failed form validation. There's no need for # the model fields to validate them as well. elif field in self._errors: exclude.add(f.name) # Exclude empty fields that are not required by the form, if the # underlying model field is required. This keeps the model field # from raising a required error. Note: don't exclude the field from # validation if the model field allows blanks. If it does, the blank # value may be included in a unique check, so cannot be excluded # from validation. else: form_field = self.fields[field] field_value = self.cleaned_data.get(field) if ( not f.blank and not form_field.required and field_value in form_field.empty_values ): exclude.add(f.name) return exclude def clean(self): self._validate_unique = True return self.cleaned_data def _update_errors(self, errors): # Override any validation error messages defined at the model level # with those defined at the form level. opts = self._meta # Allow the model generated by construct_instance() to raise # ValidationError and have them handled in the same way as others. if hasattr(errors, "error_dict"): error_dict = errors.error_dict else: error_dict = {NON_FIELD_ERRORS: errors} for field, messages in error_dict.items(): if ( field == NON_FIELD_ERRORS and opts.error_messages and NON_FIELD_ERRORS in opts.error_messages ): error_messages = opts.error_messages[NON_FIELD_ERRORS] elif field in self.fields: error_messages = self.fields[field].error_messages else: continue for message in messages: if ( isinstance(message, ValidationError) and message.code in error_messages ): message.message = error_messages[message.code] self.add_error(None, errors) def _post_clean(self): opts = self._meta exclude = self._get_validation_exclusions() # Foreign Keys being used to represent inline relationships # are excluded from basic field value validation. This is for two # reasons: firstly, the value may not be supplied (#12507; the # case of providing new values to the admin); secondly the # object being referred to may not yet fully exist (#12749). # However, these fields *must* be included in uniqueness checks, # so this can't be part of _get_validation_exclusions(). for name, field in self.fields.items(): if isinstance(field, InlineForeignKeyField): exclude.add(name) try: self.instance = construct_instance( self, self.instance, opts.fields, opts.exclude ) except ValidationError as e: self._update_errors(e) try: self.instance.full_clean(exclude=exclude, validate_unique=False) except ValidationError as e: self._update_errors(e) # Validate uniqueness if needed. if self._validate_unique: self.validate_unique() def validate_unique(self): """ Call the instance's validate_unique() method and update the form's validation errors if any were raised. """ exclude = self._get_validation_exclusions() try: self.instance.validate_unique(exclude=exclude) except ValidationError as e: self._update_errors(e) def _save_m2m(self): """ Save the many-to-many fields and generic relations for this form. """ cleaned_data = self.cleaned_data exclude = self._meta.exclude fields = self._meta.fields opts = self.instance._meta # Note that for historical reasons we want to include also # private_fields here. (GenericRelation was previously a fake # m2m field). for f in chain(opts.many_to_many, opts.private_fields): if not hasattr(f, "save_form_data"): continue if fields and f.name not in fields: continue if exclude and f.name in exclude: continue if f.name in cleaned_data: f.save_form_data(self.instance, cleaned_data[f.name]) def save(self, commit=True): """ Save this form's self.instance object if commit=True. Otherwise, add a save_m2m() method to the form which can be called after the instance is saved manually at a later time. Return the model instance. """ if self.errors: raise ValueError( "The %s could not be %s because the data didn't validate." % ( self.instance._meta.object_name, "created" if self.instance._state.adding else "changed", ) ) if commit: # If committing, save the instance and the m2m data immediately. self.instance.save() self._save_m2m() else: # If not committing, add a method to the form to allow deferred # saving of m2m data. self.save_m2m = self._save_m2m return self.instance save.alters_data = True class ModelForm(BaseModelForm, metaclass=ModelFormMetaclass): pass def modelform_factory( model, form=ModelForm, fields=None, exclude=None, formfield_callback=None, widgets=None, localized_fields=None, labels=None, help_texts=None, error_messages=None, field_classes=None, ): """ Return a ModelForm containing form fields for the given model. You can optionally pass a `form` argument to use as a starting point for constructing the ModelForm. ``fields`` is an optional list of field names. If provided, include only the named fields in the returned fields. If omitted or '__all__', use all fields. ``exclude`` is an optional list of field names. If provided, exclude the named fields from the returned fields, even if they are listed in the ``fields`` argument. ``widgets`` is a dictionary of model field names mapped to a widget. ``localized_fields`` is a list of names of fields which should be localized. ``formfield_callback`` is a callable that takes a model field and returns a form field. ``labels`` is a dictionary of model field names mapped to a label. ``help_texts`` is a dictionary of model field names mapped to a help text. ``error_messages`` is a dictionary of model field names mapped to a dictionary of error messages. ``field_classes`` is a dictionary of model field names mapped to a form field class. """ # Create the inner Meta class. FIXME: ideally, we should be able to # construct a ModelForm without creating and passing in a temporary # inner class. # Build up a list of attributes that the Meta object will have. attrs = {"model": model} if fields is not None: attrs["fields"] = fields if exclude is not None: attrs["exclude"] = exclude if widgets is not None: attrs["widgets"] = widgets if localized_fields is not None: attrs["localized_fields"] = localized_fields if labels is not None: attrs["labels"] = labels if help_texts is not None: attrs["help_texts"] = help_texts if error_messages is not None: attrs["error_messages"] = error_messages if field_classes is not None: attrs["field_classes"] = field_classes # If parent form class already has an inner Meta, the Meta we're # creating needs to inherit from the parent's inner meta. bases = (form.Meta,) if hasattr(form, "Meta") else () Meta = type("Meta", bases, attrs) if formfield_callback: Meta.formfield_callback = staticmethod(formfield_callback) # Give this new form class a reasonable name. class_name = model.__name__ + "Form" # Class attributes for the new form class. form_class_attrs = {"Meta": Meta} if getattr(Meta, "fields", None) is None and getattr(Meta, "exclude", None) is None: raise ImproperlyConfigured( "Calling modelform_factory without defining 'fields' or " "'exclude' explicitly is prohibited." ) # Instantiate type(form) in order to use the same metaclass as form. return type(form)(class_name, (form,), form_class_attrs) # ModelFormSets ############################################################## class BaseModelFormSet(BaseFormSet): """ A ``FormSet`` for editing a queryset and/or adding new objects to it. """ model = None edit_only = False # Set of fields that must be unique among forms of this set. unique_fields = set() def __init__( self, data=None, files=None, auto_id="id_%s", prefix=None, queryset=None, *, initial=None, **kwargs, ): self.queryset = queryset self.initial_extra = initial super().__init__( **{ "data": data, "files": files, "auto_id": auto_id, "prefix": prefix, **kwargs, } ) def initial_form_count(self): """Return the number of forms that are required in this FormSet.""" if not self.is_bound: return len(self.get_queryset()) return super().initial_form_count() def _existing_object(self, pk): if not hasattr(self, "_object_dict"): self._object_dict = {o.pk: o for o in self.get_queryset()} return self._object_dict.get(pk) def _get_to_python(self, field): """ If the field is a related field, fetch the concrete field's (that is, the ultimate pointed-to field's) to_python. """ while field.remote_field is not None: field = field.remote_field.get_related_field() return field.to_python def _construct_form(self, i, **kwargs): pk_required = i < self.initial_form_count() if pk_required: if self.is_bound: pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name) try: pk = self.data[pk_key] except KeyError: # The primary key is missing. The user may have tampered # with POST data. pass else: to_python = self._get_to_python(self.model._meta.pk) try: pk = to_python(pk) except ValidationError: # The primary key exists but is an invalid value. The # user may have tampered with POST data. pass else: kwargs["instance"] = self._existing_object(pk) else: kwargs["instance"] = self.get_queryset()[i] elif self.initial_extra: # Set initial values for extra forms try: kwargs["initial"] = self.initial_extra[i - self.initial_form_count()] except IndexError: pass form = super()._construct_form(i, **kwargs) if pk_required: form.fields[self.model._meta.pk.name].required = True return form def get_queryset(self): if not hasattr(self, "_queryset"): if self.queryset is not None: qs = self.queryset else: qs = self.model._default_manager.get_queryset() # If the queryset isn't already ordered we need to add an # artificial ordering here to make sure that all formsets # constructed from this queryset have the same form order. if not qs.ordered: qs = qs.order_by(self.model._meta.pk.name) # Removed queryset limiting here. As per discussion re: #13023 # on django-dev, max_num should not prevent existing # related objects/inlines from being displayed. self._queryset = qs return self._queryset def save_new(self, form, commit=True): """Save and return a new model instance for the given form.""" return form.save(commit=commit) def save_existing(self, form, instance, commit=True): """Save and return an existing model instance for the given form.""" return form.save(commit=commit) def delete_existing(self, obj, commit=True): """Deletes an existing model instance.""" if commit: obj.delete() def save(self, commit=True): """ Save model instances for every form, adding and changing instances as necessary, and return the list of instances. """ if not commit: self.saved_forms = [] def save_m2m(): for form in self.saved_forms: form.save_m2m() self.save_m2m = save_m2m if self.edit_only: return self.save_existing_objects(commit) else: return self.save_existing_objects(commit) + self.save_new_objects(commit) save.alters_data = True def clean(self): self.validate_unique() def validate_unique(self): # Collect unique_checks and date_checks to run from all the forms. all_unique_checks = set() all_date_checks = set() forms_to_delete = self.deleted_forms valid_forms = [ form for form in self.forms if form.is_valid() and form not in forms_to_delete ] for form in valid_forms: exclude = form._get_validation_exclusions() unique_checks, date_checks = form.instance._get_unique_checks( exclude=exclude, include_meta_constraints=True, ) all_unique_checks.update(unique_checks) all_date_checks.update(date_checks) errors = [] # Do each of the unique checks (unique and unique_together) for uclass, unique_check in all_unique_checks: seen_data = set() for form in valid_forms: # Get the data for the set of fields that must be unique among # the forms. row_data = ( field if field in self.unique_fields else form.cleaned_data[field] for field in unique_check if field in form.cleaned_data ) # Reduce Model instances to their primary key values row_data = tuple( d._get_pk_val() if hasattr(d, "_get_pk_val") # Prevent "unhashable type: list" errors later on. else tuple(d) if isinstance(d, list) else d for d in row_data ) if row_data and None not in row_data: # if we've already seen it then we have a uniqueness failure if row_data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_unique_error_message(unique_check)) form._errors[NON_FIELD_ERRORS] = self.error_class( [self.get_form_error()], renderer=self.renderer, ) # Remove the data from the cleaned_data dict since it # was invalid. for field in unique_check: if field in form.cleaned_data: del form.cleaned_data[field] # mark the data as seen seen_data.add(row_data) # iterate over each of the date checks now for date_check in all_date_checks: seen_data = set() uclass, lookup, field, unique_for = date_check for form in valid_forms: # see if we have data for both fields if ( form.cleaned_data and form.cleaned_data[field] is not None and form.cleaned_data[unique_for] is not None ): # if it's a date lookup we need to get the data for all the fields if lookup == "date": date = form.cleaned_data[unique_for] date_data = (date.year, date.month, date.day) # otherwise it's just the attribute on the date/datetime # object else: date_data = (getattr(form.cleaned_data[unique_for], lookup),) data = (form.cleaned_data[field],) + date_data # if we've already seen it then we have a uniqueness failure if data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_date_error_message(date_check)) form._errors[NON_FIELD_ERRORS] = self.error_class( [self.get_form_error()], renderer=self.renderer, ) # Remove the data from the cleaned_data dict since it # was invalid. del form.cleaned_data[field] # mark the data as seen seen_data.add(data) if errors: raise ValidationError(errors) def get_unique_error_message(self, unique_check): if len(unique_check) == 1: return gettext("Please correct the duplicate data for %(field)s.") % { "field": unique_check[0], } else: return gettext( "Please correct the duplicate data for %(field)s, which must be unique." ) % { "field": get_text_list(unique_check, _("and")), } def get_date_error_message(self, date_check): return gettext( "Please correct the duplicate data for %(field_name)s " "which must be unique for the %(lookup)s in %(date_field)s." ) % { "field_name": date_check[2], "date_field": date_check[3], "lookup": str(date_check[1]), } def get_form_error(self): return gettext("Please correct the duplicate values below.") def save_existing_objects(self, commit=True): self.changed_objects = [] self.deleted_objects = [] if not self.initial_forms: return [] saved_instances = [] forms_to_delete = self.deleted_forms for form in self.initial_forms: obj = form.instance # If the pk is None, it means either: # 1. The object is an unexpected empty model, created by invalid # POST data such as an object outside the formset's queryset. # 2. The object was already deleted from the database. if obj.pk is None: continue if form in forms_to_delete: self.deleted_objects.append(obj) self.delete_existing(obj, commit=commit) elif form.has_changed(): self.changed_objects.append((obj, form.changed_data)) saved_instances.append(self.save_existing(form, obj, commit=commit)) if not commit: self.saved_forms.append(form) return saved_instances def save_new_objects(self, commit=True): self.new_objects = [] for form in self.extra_forms: if not form.has_changed(): continue # If someone has marked an add form for deletion, don't save the # object. if self.can_delete and self._should_delete_form(form): continue self.new_objects.append(self.save_new(form, commit=commit)) if not commit: self.saved_forms.append(form) return self.new_objects def add_fields(self, form, index): """Add a hidden field for the object's primary key.""" from django.db.models import AutoField, ForeignKey, OneToOneField self._pk_field = pk = self.model._meta.pk # If a pk isn't editable, then it won't be on the form, so we need to # add it here so we can tell which object is which when we get the # data back. Generally, pk.editable should be false, but for some # reason, auto_created pk fields and AutoField's editable attribute is # True, so check for that as well. def pk_is_not_editable(pk): return ( (not pk.editable) or (pk.auto_created or isinstance(pk, AutoField)) or ( pk.remote_field and pk.remote_field.parent_link and pk_is_not_editable(pk.remote_field.model._meta.pk) ) ) if pk_is_not_editable(pk) or pk.name not in form.fields: if form.is_bound: # If we're adding the related instance, ignore its primary key # as it could be an auto-generated default which isn't actually # in the database. pk_value = None if form.instance._state.adding else form.instance.pk else: try: if index is not None: pk_value = self.get_queryset()[index].pk else: pk_value = None except IndexError: pk_value = None if isinstance(pk, (ForeignKey, OneToOneField)): qs = pk.remote_field.model._default_manager.get_queryset() else: qs = self.model._default_manager.get_queryset() qs = qs.using(form.instance._state.db) if form._meta.widgets: widget = form._meta.widgets.get(self._pk_field.name, HiddenInput) else: widget = HiddenInput form.fields[self._pk_field.name] = ModelChoiceField( qs, initial=pk_value, required=False, widget=widget ) super().add_fields(form, index) def modelformset_factory( model, form=ModelForm, formfield_callback=None, formset=BaseModelFormSet, extra=1, can_delete=False, can_order=False, max_num=None, fields=None, exclude=None, widgets=None, validate_max=False, localized_fields=None, labels=None, help_texts=None, error_messages=None, min_num=None, validate_min=False, field_classes=None, absolute_max=None, can_delete_extra=True, renderer=None, edit_only=False, ): """Return a FormSet class for the given Django model class.""" meta = getattr(form, "Meta", None) if ( getattr(meta, "fields", fields) is None and getattr(meta, "exclude", exclude) is None ): raise ImproperlyConfigured( "Calling modelformset_factory without defining 'fields' or " "'exclude' explicitly is prohibited." ) form = modelform_factory( model, form=form, fields=fields, exclude=exclude, formfield_callback=formfield_callback, widgets=widgets, localized_fields=localized_fields, labels=labels, help_texts=help_texts, error_messages=error_messages, field_classes=field_classes, ) FormSet = formset_factory( form, formset, extra=extra, min_num=min_num, max_num=max_num, can_order=can_order, can_delete=can_delete, validate_min=validate_min, validate_max=validate_max, absolute_max=absolute_max, can_delete_extra=can_delete_extra, renderer=renderer, ) FormSet.model = model FormSet.edit_only = edit_only return FormSet # InlineFormSets ############################################################# class BaseInlineFormSet(BaseModelFormSet): """A formset for child objects related to a parent.""" def __init__( self, data=None, files=None, instance=None, save_as_new=False, prefix=None, queryset=None, **kwargs, ): if instance is None: self.instance = self.fk.remote_field.model() else: self.instance = instance self.save_as_new = save_as_new if queryset is None: queryset = self.model._default_manager if self.instance.pk is not None: qs = queryset.filter(**{self.fk.name: self.instance}) else: qs = queryset.none() self.unique_fields = {self.fk.name} super().__init__(data, files, prefix=prefix, queryset=qs, **kwargs) # Add the generated field to form._meta.fields if it's defined to make # sure validation isn't skipped on that field. if self.form._meta.fields and self.fk.name not in self.form._meta.fields: if isinstance(self.form._meta.fields, tuple): self.form._meta.fields = list(self.form._meta.fields) self.form._meta.fields.append(self.fk.name) def initial_form_count(self): if self.save_as_new: return 0 return super().initial_form_count() def _construct_form(self, i, **kwargs): form = super()._construct_form(i, **kwargs) if self.save_as_new: mutable = getattr(form.data, "_mutable", None) # Allow modifying an immutable QueryDict. if mutable is not None: form.data._mutable = True # Remove the primary key from the form's data, we are only # creating new instances form.data[form.add_prefix(self._pk_field.name)] = None # Remove the foreign key from the form's data form.data[form.add_prefix(self.fk.name)] = None if mutable is not None: form.data._mutable = mutable # Set the fk value here so that the form can do its validation. fk_value = self.instance.pk if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name: fk_value = getattr(self.instance, self.fk.remote_field.field_name) fk_value = getattr(fk_value, "pk", fk_value) setattr(form.instance, self.fk.get_attname(), fk_value) return form @classmethod def get_default_prefix(cls): return cls.fk.remote_field.get_accessor_name(model=cls.model).replace("+", "") def save_new(self, form, commit=True): # Ensure the latest copy of the related instance is present on each # form (it may have been saved after the formset was originally # instantiated). setattr(form.instance, self.fk.name, self.instance) return super().save_new(form, commit=commit) def add_fields(self, form, index): super().add_fields(form, index) if self._pk_field == self.fk: name = self._pk_field.name kwargs = {"pk_field": True} else: # The foreign key field might not be on the form, so we poke at the # Model field to get the label, since we need that for error messages. name = self.fk.name kwargs = { "label": getattr( form.fields.get(name), "label", capfirst(self.fk.verbose_name) ) } # The InlineForeignKeyField assumes that the foreign key relation is # based on the parent model's pk. If this isn't the case, set to_field # to correctly resolve the initial form value. if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name: kwargs["to_field"] = self.fk.remote_field.field_name # If we're adding a new object, ignore a parent's auto-generated key # as it will be regenerated on the save request. if self.instance._state.adding: if kwargs.get("to_field") is not None: to_field = self.instance._meta.get_field(kwargs["to_field"]) else: to_field = self.instance._meta.pk if to_field.has_default(): setattr(self.instance, to_field.attname, None) form.fields[name] = InlineForeignKeyField(self.instance, **kwargs) def get_unique_error_message(self, unique_check): unique_check = [field for field in unique_check if field != self.fk.name] return super().get_unique_error_message(unique_check) def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False): """ Find and return the ForeignKey from model to parent if there is one (return None if can_fail is True and no such field exists). If fk_name is provided, assume it is the name of the ForeignKey field. Unless can_fail is True, raise an exception if there isn't a ForeignKey from model to parent_model. """ # avoid circular import from django.db.models import ForeignKey opts = model._meta if fk_name: fks_to_parent = [f for f in opts.fields if f.name == fk_name] if len(fks_to_parent) == 1: fk = fks_to_parent[0] parent_list = parent_model._meta.get_parent_list() if ( not isinstance(fk, ForeignKey) or ( # ForeignKey to proxy models. fk.remote_field.model._meta.proxy and fk.remote_field.model._meta.proxy_for_model not in parent_list ) or ( # ForeignKey to concrete models. not fk.remote_field.model._meta.proxy and fk.remote_field.model != parent_model and fk.remote_field.model not in parent_list ) ): raise ValueError( "fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label) ) elif not fks_to_parent: raise ValueError( "'%s' has no field named '%s'." % (model._meta.label, fk_name) ) else: # Try to discover what the ForeignKey from model to parent_model is parent_list = parent_model._meta.get_parent_list() fks_to_parent = [ f for f in opts.fields if isinstance(f, ForeignKey) and ( f.remote_field.model == parent_model or f.remote_field.model in parent_list or ( f.remote_field.model._meta.proxy and f.remote_field.model._meta.proxy_for_model in parent_list ) ) ] if len(fks_to_parent) == 1: fk = fks_to_parent[0] elif not fks_to_parent: if can_fail: return raise ValueError( "'%s' has no ForeignKey to '%s'." % ( model._meta.label, parent_model._meta.label, ) ) else: raise ValueError( "'%s' has more than one ForeignKey to '%s'. You must specify " "a 'fk_name' attribute." % ( model._meta.label, parent_model._meta.label, ) ) return fk def inlineformset_factory( parent_model, model, form=ModelForm, formset=BaseInlineFormSet, fk_name=None, fields=None, exclude=None, extra=3, can_order=False, can_delete=True, max_num=None, formfield_callback=None, widgets=None, validate_max=False, localized_fields=None, labels=None, help_texts=None, error_messages=None, min_num=None, validate_min=False, field_classes=None, absolute_max=None, can_delete_extra=True, renderer=None, edit_only=False, ): """ Return an ``InlineFormSet`` for the given kwargs. ``fk_name`` must be provided if ``model`` has more than one ``ForeignKey`` to ``parent_model``. """ fk = _get_foreign_key(parent_model, model, fk_name=fk_name) # enforce a max_num=1 when the foreign key to the parent model is unique. if fk.unique: max_num = 1 kwargs = { "form": form, "formfield_callback": formfield_callback, "formset": formset, "extra": extra, "can_delete": can_delete, "can_order": can_order, "fields": fields, "exclude": exclude, "min_num": min_num, "max_num": max_num, "widgets": widgets, "validate_min": validate_min, "validate_max": validate_max, "localized_fields": localized_fields, "labels": labels, "help_texts": help_texts, "error_messages": error_messages, "field_classes": field_classes, "absolute_max": absolute_max, "can_delete_extra": can_delete_extra, "renderer": renderer, "edit_only": edit_only, } FormSet = modelformset_factory(model, **kwargs) FormSet.fk = fk return FormSet # Fields ##################################################################### class InlineForeignKeyField(Field): """ A basic integer field that deals with validating the given value to a given parent instance in an inline. """ widget = HiddenInput default_error_messages = { "invalid_choice": _("The inline value did not match the parent instance."), } def __init__(self, parent_instance, *args, pk_field=False, to_field=None, **kwargs): self.parent_instance = parent_instance self.pk_field = pk_field self.to_field = to_field if self.parent_instance is not None: if self.to_field: kwargs["initial"] = getattr(self.parent_instance, self.to_field) else: kwargs["initial"] = self.parent_instance.pk kwargs["required"] = False super().__init__(*args, **kwargs) def clean(self, value): if value in self.empty_values: if self.pk_field: return None # if there is no value act as we did before. return self.parent_instance # ensure the we compare the values as equal types. if self.to_field: orig = getattr(self.parent_instance, self.to_field) else: orig = self.parent_instance.pk if str(value) != str(orig): raise ValidationError( self.error_messages["invalid_choice"], code="invalid_choice" ) return self.parent_instance def has_changed(self, initial, data): return False class ModelChoiceIteratorValue: def __init__(self, value, instance): self.value = value self.instance = instance def __str__(self): return str(self.value) def __hash__(self): return hash(self.value) def __eq__(self, other): if isinstance(other, ModelChoiceIteratorValue): other = other.value return self.value == other class ModelChoiceIterator: def __init__(self, field): self.field = field self.queryset = field.queryset def __iter__(self): if self.field.empty_label is not None: yield ("", self.field.empty_label) queryset = self.queryset # Can't use iterator() when queryset uses prefetch_related() if not queryset._prefetch_related_lookups: queryset = queryset.iterator() for obj in queryset: yield self.choice(obj) def __len__(self): # count() adds a query but uses less memory since the QuerySet results # won't be cached. In most cases, the choices will only be iterated on, # and __len__() won't be called. return self.queryset.count() + (1 if self.field.empty_label is not None else 0) def __bool__(self): return self.field.empty_label is not None or self.queryset.exists() def choice(self, obj): return ( ModelChoiceIteratorValue(self.field.prepare_value(obj), obj), self.field.label_from_instance(obj), ) class ModelChoiceField(ChoiceField): """A ChoiceField whose choices are a model QuerySet.""" # This class is a subclass of ChoiceField for purity, but it doesn't # actually use any of ChoiceField's implementation. default_error_messages = { "invalid_choice": _( "Select a valid choice. That choice is not one of the available choices." ), } iterator = ModelChoiceIterator def __init__( self, queryset, *, empty_label="---------", required=True, widget=None, label=None, initial=None, help_text="", to_field_name=None, limit_choices_to=None, blank=False, **kwargs, ): # Call Field instead of ChoiceField __init__() because we don't need # ChoiceField.__init__(). Field.__init__( self, required=required, widget=widget, label=label, initial=initial, help_text=help_text, **kwargs, ) if (required and initial is not None) or ( isinstance(self.widget, RadioSelect) and not blank ): self.empty_label = None else: self.empty_label = empty_label self.queryset = queryset self.limit_choices_to = limit_choices_to # limit the queryset later. self.to_field_name = to_field_name def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this form field. If it is a callable, invoke it and return the result. """ if callable(self.limit_choices_to): return self.limit_choices_to() return self.limit_choices_to def __deepcopy__(self, memo): result = super(ChoiceField, self).__deepcopy__(memo) # Need to force a new ModelChoiceIterator to be created, bug #11183 if self.queryset is not None: result.queryset = self.queryset.all() return result def _get_queryset(self): return self._queryset def _set_queryset(self, queryset): self._queryset = None if queryset is None else queryset.all() self.widget.choices = self.choices queryset = property(_get_queryset, _set_queryset) # this method will be used to create object labels by the QuerySetIterator. # Override it to customize the label. def label_from_instance(self, obj): """ Convert objects into strings and generate the labels for the choices presented by this object. Subclasses can override this method to customize the display of the choices. """ return str(obj) def _get_choices(self): # If self._choices is set, then somebody must have manually set # the property self.choices. In this case, just return self._choices. if hasattr(self, "_choices"): return self._choices # Otherwise, execute the QuerySet in self.queryset to determine the # choices dynamically. Return a fresh ModelChoiceIterator that has not been # consumed. Note that we're instantiating a new ModelChoiceIterator *each* # time _get_choices() is called (and, thus, each time self.choices is # accessed) so that we can ensure the QuerySet has not been consumed. This # construct might look complicated but it allows for lazy evaluation of # the queryset. return self.iterator(self) choices = property(_get_choices, ChoiceField._set_choices) def prepare_value(self, value): if hasattr(value, "_meta"): if self.to_field_name: return value.serializable_value(self.to_field_name) else: return value.pk return super().prepare_value(value) def to_python(self, value): if value in self.empty_values: return None try: key = self.to_field_name or "pk" if isinstance(value, self.queryset.model): value = getattr(value, key) value = self.queryset.get(**{key: value}) except (ValueError, TypeError, self.queryset.model.DoesNotExist): raise ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": value}, ) return value def validate(self, value): return Field.validate(self, value) def has_changed(self, initial, data): if self.disabled: return False initial_value = initial if initial is not None else "" data_value = data if data is not None else "" return str(self.prepare_value(initial_value)) != str(data_value) class ModelMultipleChoiceField(ModelChoiceField): """A MultipleChoiceField whose choices are a model QuerySet.""" widget = SelectMultiple hidden_widget = MultipleHiddenInput default_error_messages = { "invalid_list": _("Enter a list of values."), "invalid_choice": _( "Select a valid choice. %(value)s is not one of the available choices." ), "invalid_pk_value": _("“%(pk)s” is not a valid value."), } def __init__(self, queryset, **kwargs): super().__init__(queryset, empty_label=None, **kwargs) def to_python(self, value): if not value: return [] return list(self._check_values(value)) def clean(self, value): value = self.prepare_value(value) if self.required and not value: raise ValidationError(self.error_messages["required"], code="required") elif not self.required and not value: return self.queryset.none() if not isinstance(value, (list, tuple)): raise ValidationError( self.error_messages["invalid_list"], code="invalid_list", ) qs = self._check_values(value) # Since this overrides the inherited ModelChoiceField.clean # we run custom validators here self.run_validators(value) return qs def _check_values(self, value): """ Given a list of possible PK values, return a QuerySet of the corresponding objects. Raise a ValidationError if a given value is invalid (not a valid PK, not in the queryset, etc.) """ key = self.to_field_name or "pk" # deduplicate given values to avoid creating many querysets or # requiring the database backend deduplicate efficiently. try: value = frozenset(value) except TypeError: # list of lists isn't hashable, for example raise ValidationError( self.error_messages["invalid_list"], code="invalid_list", ) for pk in value: try: self.queryset.filter(**{key: pk}) except (ValueError, TypeError): raise ValidationError( self.error_messages["invalid_pk_value"], code="invalid_pk_value", params={"pk": pk}, ) qs = self.queryset.filter(**{"%s__in" % key: value}) pks = {str(getattr(o, key)) for o in qs} for val in value: if str(val) not in pks: raise ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": val}, ) return qs def prepare_value(self, value): if ( hasattr(value, "__iter__") and not isinstance(value, str) and not hasattr(value, "_meta") ): prepare_value = super().prepare_value return [prepare_value(v) for v in value] return super().prepare_value(value) def has_changed(self, initial, data): if self.disabled: return False if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True initial_set = {str(value) for value in self.prepare_value(initial)} data_set = {str(value) for value in data} return data_set != initial_set def modelform_defines_fields(form_class): return hasattr(form_class, "_meta") and ( form_class._meta.fields is not None or form_class._meta.exclude is not None )
e5494a71fbf718443676a3b256974c685af81d3478c03655c6da1552f6813485
from django.core.exceptions import ValidationError from django.forms import Form from django.forms.fields import BooleanField, IntegerField from django.forms.renderers import get_default_renderer from django.forms.utils import ErrorList, RenderableFormMixin from django.forms.widgets import CheckboxInput, HiddenInput, NumberInput from django.utils.functional import cached_property from django.utils.translation import gettext_lazy as _ from django.utils.translation import ngettext_lazy __all__ = ("BaseFormSet", "formset_factory", "all_valid") # special field names TOTAL_FORM_COUNT = "TOTAL_FORMS" INITIAL_FORM_COUNT = "INITIAL_FORMS" MIN_NUM_FORM_COUNT = "MIN_NUM_FORMS" MAX_NUM_FORM_COUNT = "MAX_NUM_FORMS" ORDERING_FIELD_NAME = "ORDER" DELETION_FIELD_NAME = "DELETE" # default minimum number of forms in a formset DEFAULT_MIN_NUM = 0 # default maximum number of forms in a formset, to prevent memory exhaustion DEFAULT_MAX_NUM = 1000 class ManagementForm(Form): """ Keep track of how many form instances are displayed on the page. If adding new forms via JavaScript, you should increment the count field of this form as well. """ template_name = "django/forms/div.html" # RemovedInDjango50Warning. TOTAL_FORMS = IntegerField(widget=HiddenInput) INITIAL_FORMS = IntegerField(widget=HiddenInput) # MIN_NUM_FORM_COUNT and MAX_NUM_FORM_COUNT are output with the rest of the # management form, but only for the convenience of client-side code. The # POST value of them returned from the client is not checked. MIN_NUM_FORMS = IntegerField(required=False, widget=HiddenInput) MAX_NUM_FORMS = IntegerField(required=False, widget=HiddenInput) def clean(self): cleaned_data = super().clean() # When the management form is invalid, we don't know how many forms # were submitted. cleaned_data.setdefault(TOTAL_FORM_COUNT, 0) cleaned_data.setdefault(INITIAL_FORM_COUNT, 0) return cleaned_data class BaseFormSet(RenderableFormMixin): """ A collection of instances of the same Form class. """ deletion_widget = CheckboxInput ordering_widget = NumberInput default_error_messages = { "missing_management_form": _( "ManagementForm data is missing or has been tampered with. Missing fields: " "%(field_names)s. You may need to file a bug report if the issue persists." ), "too_many_forms": ngettext_lazy( "Please submit at most %(num)d form.", "Please submit at most %(num)d forms.", "num", ), "too_few_forms": ngettext_lazy( "Please submit at least %(num)d form.", "Please submit at least %(num)d forms.", "num", ), } template_name_div = "django/forms/formsets/div.html" template_name_p = "django/forms/formsets/p.html" template_name_table = "django/forms/formsets/table.html" template_name_ul = "django/forms/formsets/ul.html" def __init__( self, data=None, files=None, auto_id="id_%s", prefix=None, initial=None, error_class=ErrorList, form_kwargs=None, error_messages=None, ): self.is_bound = data is not None or files is not None self.prefix = prefix or self.get_default_prefix() self.auto_id = auto_id self.data = data or {} self.files = files or {} self.initial = initial self.form_kwargs = form_kwargs or {} self.error_class = error_class self._errors = None self._non_form_errors = None messages = {} for cls in reversed(type(self).__mro__): messages.update(getattr(cls, "default_error_messages", {})) if error_messages is not None: messages.update(error_messages) self.error_messages = messages def __iter__(self): """Yield the forms in the order they should be rendered.""" return iter(self.forms) def __getitem__(self, index): """Return the form at the given index, based on the rendering order.""" return self.forms[index] def __len__(self): return len(self.forms) def __bool__(self): """ Return True since all formsets have a management form which is not included in the length. """ return True def __repr__(self): if self._errors is None: is_valid = "Unknown" else: is_valid = ( self.is_bound and not self._non_form_errors and not any(form_errors for form_errors in self._errors) ) return "<%s: bound=%s valid=%s total_forms=%s>" % ( self.__class__.__qualname__, self.is_bound, is_valid, self.total_form_count(), ) @cached_property def management_form(self): """Return the ManagementForm instance for this FormSet.""" if self.is_bound: form = ManagementForm( self.data, auto_id=self.auto_id, prefix=self.prefix, renderer=self.renderer, ) form.full_clean() else: form = ManagementForm( auto_id=self.auto_id, prefix=self.prefix, initial={ TOTAL_FORM_COUNT: self.total_form_count(), INITIAL_FORM_COUNT: self.initial_form_count(), MIN_NUM_FORM_COUNT: self.min_num, MAX_NUM_FORM_COUNT: self.max_num, }, renderer=self.renderer, ) return form def total_form_count(self): """Return the total number of forms in this FormSet.""" if self.is_bound: # return absolute_max if it is lower than the actual total form # count in the data; this is DoS protection to prevent clients # from forcing the server to instantiate arbitrary numbers of # forms return min( self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max ) else: initial_forms = self.initial_form_count() total_forms = max(initial_forms, self.min_num) + self.extra # Allow all existing related objects/inlines to be displayed, # but don't allow extra beyond max_num. if initial_forms > self.max_num >= 0: total_forms = initial_forms elif total_forms > self.max_num >= 0: total_forms = self.max_num return total_forms def initial_form_count(self): """Return the number of forms that are required in this FormSet.""" if self.is_bound: return self.management_form.cleaned_data[INITIAL_FORM_COUNT] else: # Use the length of the initial data if it's there, 0 otherwise. initial_forms = len(self.initial) if self.initial else 0 return initial_forms @cached_property def forms(self): """Instantiate forms at first property access.""" # DoS protection is included in total_form_count() return [ self._construct_form(i, **self.get_form_kwargs(i)) for i in range(self.total_form_count()) ] def get_form_kwargs(self, index): """ Return additional keyword arguments for each individual formset form. index will be None if the form being constructed is a new empty form. """ return self.form_kwargs.copy() def _construct_form(self, i, **kwargs): """Instantiate and return the i-th form instance in a formset.""" defaults = { "auto_id": self.auto_id, "prefix": self.add_prefix(i), "error_class": self.error_class, # Don't render the HTML 'required' attribute as it may cause # incorrect validation for extra, optional, and deleted # forms in the formset. "use_required_attribute": False, "renderer": self.renderer, } if self.is_bound: defaults["data"] = self.data defaults["files"] = self.files if self.initial and "initial" not in kwargs: try: defaults["initial"] = self.initial[i] except IndexError: pass # Allow extra forms to be empty, unless they're part of # the minimum forms. if i >= self.initial_form_count() and i >= self.min_num: defaults["empty_permitted"] = True defaults.update(kwargs) form = self.form(**defaults) self.add_fields(form, i) return form @property def initial_forms(self): """Return a list of all the initial forms in this formset.""" return self.forms[: self.initial_form_count()] @property def extra_forms(self): """Return a list of all the extra forms in this formset.""" return self.forms[self.initial_form_count() :] @property def empty_form(self): form_kwargs = { **self.get_form_kwargs(None), "auto_id": self.auto_id, "prefix": self.add_prefix("__prefix__"), "empty_permitted": True, "use_required_attribute": False, "renderer": self.renderer, } form = self.form(**form_kwargs) self.add_fields(form, None) return form @property def cleaned_data(self): """ Return a list of form.cleaned_data dicts for every form in self.forms. """ if not self.is_valid(): raise AttributeError( "'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__ ) return [form.cleaned_data for form in self.forms] @property def deleted_forms(self): """Return a list of forms that have been marked for deletion.""" if not self.is_valid() or not self.can_delete: return [] # construct _deleted_form_indexes which is just a list of form indexes # that have had their deletion widget set to True if not hasattr(self, "_deleted_form_indexes"): self._deleted_form_indexes = [] for i, form in enumerate(self.forms): # if this is an extra form and hasn't changed, don't consider it if i >= self.initial_form_count() and not form.has_changed(): continue if self._should_delete_form(form): self._deleted_form_indexes.append(i) return [self.forms[i] for i in self._deleted_form_indexes] @property def ordered_forms(self): """ Return a list of form in the order specified by the incoming data. Raise an AttributeError if ordering is not allowed. """ if not self.is_valid() or not self.can_order: raise AttributeError( "'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__ ) # Construct _ordering, which is a list of (form_index, order_field_value) # tuples. After constructing this list, we'll sort it by order_field_value # so we have a way to get to the form indexes in the order specified # by the form data. if not hasattr(self, "_ordering"): self._ordering = [] for i, form in enumerate(self.forms): # if this is an extra form and hasn't changed, don't consider it if i >= self.initial_form_count() and not form.has_changed(): continue # don't add data marked for deletion to self.ordered_data if self.can_delete and self._should_delete_form(form): continue self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME])) # After we're done populating self._ordering, sort it. # A sort function to order things numerically ascending, but # None should be sorted below anything else. Allowing None as # a comparison value makes it so we can leave ordering fields # blank. def compare_ordering_key(k): if k[1] is None: return (1, 0) # +infinity, larger than any number return (0, k[1]) self._ordering.sort(key=compare_ordering_key) # Return a list of form.cleaned_data dicts in the order specified by # the form data. return [self.forms[i[0]] for i in self._ordering] @classmethod def get_default_prefix(cls): return "form" @classmethod def get_deletion_widget(cls): return cls.deletion_widget @classmethod def get_ordering_widget(cls): return cls.ordering_widget def non_form_errors(self): """ Return an ErrorList of errors that aren't associated with a particular form -- i.e., from formset.clean(). Return an empty ErrorList if there are none. """ if self._non_form_errors is None: self.full_clean() return self._non_form_errors @property def errors(self): """Return a list of form.errors for every form in self.forms.""" if self._errors is None: self.full_clean() return self._errors def total_error_count(self): """Return the number of errors across all forms in the formset.""" return len(self.non_form_errors()) + sum( len(form_errors) for form_errors in self.errors ) def _should_delete_form(self, form): """Return whether or not the form was marked for deletion.""" return form.cleaned_data.get(DELETION_FIELD_NAME, False) def is_valid(self): """Return True if every form in self.forms is valid.""" if not self.is_bound: return False # Accessing errors triggers a full clean the first time only. self.errors # List comprehension ensures is_valid() is called for all forms. # Forms due to be deleted shouldn't cause the formset to be invalid. forms_valid = all( [ form.is_valid() for form in self.forms if not (self.can_delete and self._should_delete_form(form)) ] ) return forms_valid and not self.non_form_errors() def full_clean(self): """ Clean all of self.data and populate self._errors and self._non_form_errors. """ self._errors = [] self._non_form_errors = self.error_class( error_class="nonform", renderer=self.renderer ) empty_forms_count = 0 if not self.is_bound: # Stop further processing. return if not self.management_form.is_valid(): error = ValidationError( self.error_messages["missing_management_form"], params={ "field_names": ", ".join( self.management_form.add_prefix(field_name) for field_name in self.management_form.errors ), }, code="missing_management_form", ) self._non_form_errors.append(error) for i, form in enumerate(self.forms): # Empty forms are unchanged forms beyond those with initial data. if not form.has_changed() and i >= self.initial_form_count(): empty_forms_count += 1 # Accessing errors calls full_clean() if necessary. # _should_delete_form() requires cleaned_data. form_errors = form.errors if self.can_delete and self._should_delete_form(form): continue self._errors.append(form_errors) try: if ( self.validate_max and self.total_form_count() - len(self.deleted_forms) > self.max_num ) or self.management_form.cleaned_data[ TOTAL_FORM_COUNT ] > self.absolute_max: raise ValidationError( self.error_messages["too_many_forms"] % {"num": self.max_num}, code="too_many_forms", ) if ( self.validate_min and self.total_form_count() - len(self.deleted_forms) - empty_forms_count < self.min_num ): raise ValidationError( self.error_messages["too_few_forms"] % {"num": self.min_num}, code="too_few_forms", ) # Give self.clean() a chance to do cross-form validation. self.clean() except ValidationError as e: self._non_form_errors = self.error_class( e.error_list, error_class="nonform", renderer=self.renderer, ) def clean(self): """ Hook for doing any extra formset-wide cleaning after Form.clean() has been called on every form. Any ValidationError raised by this method will not be associated with a particular form; it will be accessible via formset.non_form_errors() """ pass def has_changed(self): """Return True if data in any form differs from initial.""" return any(form.has_changed() for form in self) def add_fields(self, form, index): """A hook for adding extra fields on to each form instance.""" initial_form_count = self.initial_form_count() if self.can_order: # Only pre-fill the ordering field for initial forms. if index is not None and index < initial_form_count: form.fields[ORDERING_FIELD_NAME] = IntegerField( label=_("Order"), initial=index + 1, required=False, widget=self.get_ordering_widget(), ) else: form.fields[ORDERING_FIELD_NAME] = IntegerField( label=_("Order"), required=False, widget=self.get_ordering_widget(), ) if self.can_delete and (self.can_delete_extra or index < initial_form_count): form.fields[DELETION_FIELD_NAME] = BooleanField( label=_("Delete"), required=False, widget=self.get_deletion_widget(), ) def add_prefix(self, index): return "%s-%s" % (self.prefix, index) def is_multipart(self): """ Return True if the formset needs to be multipart, i.e. it has FileInput, or False otherwise. """ if self.forms: return self.forms[0].is_multipart() else: return self.empty_form.is_multipart() @property def media(self): # All the forms on a FormSet are the same, so you only need to # interrogate the first form for media. if self.forms: return self.forms[0].media else: return self.empty_form.media @property def template_name(self): return self.renderer.formset_template_name def get_context(self): return {"formset": self} def formset_factory( form, formset=BaseFormSet, extra=1, can_order=False, can_delete=False, max_num=None, validate_max=False, min_num=None, validate_min=False, absolute_max=None, can_delete_extra=True, renderer=None, ): """Return a FormSet for the given form class.""" if min_num is None: min_num = DEFAULT_MIN_NUM if max_num is None: max_num = DEFAULT_MAX_NUM # absolute_max is a hard limit on forms instantiated, to prevent # memory-exhaustion attacks. Default to max_num + DEFAULT_MAX_NUM # (which is 2 * DEFAULT_MAX_NUM if max_num is None in the first place). if absolute_max is None: absolute_max = max_num + DEFAULT_MAX_NUM if max_num > absolute_max: raise ValueError("'absolute_max' must be greater or equal to 'max_num'.") attrs = { "form": form, "extra": extra, "can_order": can_order, "can_delete": can_delete, "can_delete_extra": can_delete_extra, "min_num": min_num, "max_num": max_num, "absolute_max": absolute_max, "validate_min": validate_min, "validate_max": validate_max, "renderer": renderer or get_default_renderer(), } return type(form.__name__ + "FormSet", (formset,), attrs) def all_valid(formsets): """Validate every formset and return True if all are valid.""" # List comprehension ensures is_valid() is called for all formsets. return all([formset.is_valid() for formset in formsets])
cf5bf978300817ca7761b108d5ddf8f1172b38f2b4a7533078ccd4180425cce5
""" HTML Widget classes """ import copy import datetime import warnings from collections import defaultdict from itertools import chain from django.forms.utils import to_current_timezone from django.templatetags.static import static from django.utils import formats from django.utils.datastructures import OrderedSet from django.utils.dates import MONTHS from django.utils.formats import get_format from django.utils.html import format_html, html_safe from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import mark_safe from django.utils.topological_sort import CyclicDependencyError, stable_topological_sort from django.utils.translation import gettext_lazy as _ from .renderers import get_default_renderer __all__ = ( "Media", "MediaDefiningClass", "Widget", "TextInput", "NumberInput", "EmailInput", "URLInput", "PasswordInput", "HiddenInput", "MultipleHiddenInput", "FileInput", "ClearableFileInput", "Textarea", "DateInput", "DateTimeInput", "TimeInput", "CheckboxInput", "Select", "NullBooleanSelect", "SelectMultiple", "RadioSelect", "CheckboxSelectMultiple", "MultiWidget", "SplitDateTimeWidget", "SplitHiddenDateTimeWidget", "SelectDateWidget", ) MEDIA_TYPES = ("css", "js") class MediaOrderConflictWarning(RuntimeWarning): pass @html_safe class Media: def __init__(self, media=None, css=None, js=None): if media is not None: css = getattr(media, "css", {}) js = getattr(media, "js", []) else: if css is None: css = {} if js is None: js = [] self._css_lists = [css] self._js_lists = [js] def __repr__(self): return "Media(css=%r, js=%r)" % (self._css, self._js) def __str__(self): return self.render() @property def _css(self): css = defaultdict(list) for css_list in self._css_lists: for medium, sublist in css_list.items(): css[medium].append(sublist) return {medium: self.merge(*lists) for medium, lists in css.items()} @property def _js(self): return self.merge(*self._js_lists) def render(self): return mark_safe( "\n".join( chain.from_iterable( getattr(self, "render_" + name)() for name in MEDIA_TYPES ) ) ) def render_js(self): return [ path.__html__() if hasattr(path, "__html__") else format_html('<script src="{}"></script>', self.absolute_path(path)) for path in self._js ] def render_css(self): # To keep rendering order consistent, we can't just iterate over items(). # We need to sort the keys, and iterate over the sorted list. media = sorted(self._css) return chain.from_iterable( [ path.__html__() if hasattr(path, "__html__") else format_html( '<link href="{}" media="{}" rel="stylesheet">', self.absolute_path(path), medium, ) for path in self._css[medium] ] for medium in media ) def absolute_path(self, path): """ Given a relative or absolute path to a static asset, return an absolute path. An absolute path will be returned unchanged while a relative path will be passed to django.templatetags.static.static(). """ if path.startswith(("http://", "https://", "/")): return path return static(path) def __getitem__(self, name): """Return a Media object that only contains media of the given type.""" if name in MEDIA_TYPES: return Media(**{str(name): getattr(self, "_" + name)}) raise KeyError('Unknown media type "%s"' % name) @staticmethod def merge(*lists): """ Merge lists while trying to keep the relative order of the elements. Warn if the lists have the same elements in a different relative order. For static assets it can be important to have them included in the DOM in a certain order. In JavaScript you may not be able to reference a global or in CSS you might want to override a style. """ dependency_graph = defaultdict(set) all_items = OrderedSet() for list_ in filter(None, lists): head = list_[0] # The first items depend on nothing but have to be part of the # dependency graph to be included in the result. dependency_graph.setdefault(head, set()) for item in list_: all_items.add(item) # No self dependencies if head != item: dependency_graph[item].add(head) head = item try: return stable_topological_sort(all_items, dependency_graph) except CyclicDependencyError: warnings.warn( "Detected duplicate Media files in an opposite order: {}".format( ", ".join(repr(list_) for list_ in lists) ), MediaOrderConflictWarning, ) return list(all_items) def __add__(self, other): combined = Media() combined._css_lists = self._css_lists[:] combined._js_lists = self._js_lists[:] for item in other._css_lists: if item and item not in self._css_lists: combined._css_lists.append(item) for item in other._js_lists: if item and item not in self._js_lists: combined._js_lists.append(item) return combined def media_property(cls): def _media(self): # Get the media property of the superclass, if it exists sup_cls = super(cls, self) try: base = sup_cls.media except AttributeError: base = Media() # Get the media definition for this class definition = getattr(cls, "Media", None) if definition: extend = getattr(definition, "extend", True) if extend: if extend is True: m = base else: m = Media() for medium in extend: m = m + base[medium] return m + Media(definition) return Media(definition) return base return property(_media) class MediaDefiningClass(type): """ Metaclass for classes that can have media definitions. """ def __new__(mcs, name, bases, attrs): new_class = super().__new__(mcs, name, bases, attrs) if "media" not in attrs: new_class.media = media_property(new_class) return new_class class Widget(metaclass=MediaDefiningClass): needs_multipart_form = False # Determines does this widget need multipart form is_localized = False is_required = False supports_microseconds = True use_fieldset = False def __init__(self, attrs=None): self.attrs = {} if attrs is None else attrs.copy() def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() memo[id(self)] = obj return obj @property def is_hidden(self): return self.input_type == "hidden" if hasattr(self, "input_type") else False def subwidgets(self, name, value, attrs=None): context = self.get_context(name, value, attrs) yield context["widget"] def format_value(self, value): """ Return a value as it should appear when rendered in a template. """ if value == "" or value is None: return None if self.is_localized: return formats.localize_input(value) return str(value) def get_context(self, name, value, attrs): return { "widget": { "name": name, "is_hidden": self.is_hidden, "required": self.is_required, "value": self.format_value(value), "attrs": self.build_attrs(self.attrs, attrs), "template_name": self.template_name, }, } def render(self, name, value, attrs=None, renderer=None): """Render the widget as an HTML string.""" context = self.get_context(name, value, attrs) return self._render(self.template_name, context, renderer) def _render(self, template_name, context, renderer=None): if renderer is None: renderer = get_default_renderer() return mark_safe(renderer.render(template_name, context)) def build_attrs(self, base_attrs, extra_attrs=None): """Build an attribute dictionary.""" return {**base_attrs, **(extra_attrs or {})} def value_from_datadict(self, data, files, name): """ Given a dictionary of data and this widget's name, return the value of this widget or None if it's not provided. """ return data.get(name) def value_omitted_from_data(self, data, files, name): return name not in data def id_for_label(self, id_): """ Return the HTML ID attribute of this Widget for use by a <label>, given the ID of the field. Return an empty string if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags. """ return id_ def use_required_attribute(self, initial): return not self.is_hidden class Input(Widget): """ Base class for all <input> widgets. """ input_type = None # Subclasses must define this. template_name = "django/forms/widgets/input.html" def __init__(self, attrs=None): if attrs is not None: attrs = attrs.copy() self.input_type = attrs.pop("type", self.input_type) super().__init__(attrs) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context["widget"]["type"] = self.input_type return context class TextInput(Input): input_type = "text" template_name = "django/forms/widgets/text.html" class NumberInput(Input): input_type = "number" template_name = "django/forms/widgets/number.html" class EmailInput(Input): input_type = "email" template_name = "django/forms/widgets/email.html" class URLInput(Input): input_type = "url" template_name = "django/forms/widgets/url.html" class PasswordInput(Input): input_type = "password" template_name = "django/forms/widgets/password.html" def __init__(self, attrs=None, render_value=False): super().__init__(attrs) self.render_value = render_value def get_context(self, name, value, attrs): if not self.render_value: value = None return super().get_context(name, value, attrs) class HiddenInput(Input): input_type = "hidden" template_name = "django/forms/widgets/hidden.html" class MultipleHiddenInput(HiddenInput): """ Handle <input type="hidden"> for fields that have a list of values. """ template_name = "django/forms/widgets/multiple_hidden.html" def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) final_attrs = context["widget"]["attrs"] id_ = context["widget"]["attrs"].get("id") subwidgets = [] for index, value_ in enumerate(context["widget"]["value"]): widget_attrs = final_attrs.copy() if id_: # An ID attribute was given. Add a numeric index as a suffix # so that the inputs don't all have the same ID attribute. widget_attrs["id"] = "%s_%s" % (id_, index) widget = HiddenInput() widget.is_required = self.is_required subwidgets.append(widget.get_context(name, value_, widget_attrs)["widget"]) context["widget"]["subwidgets"] = subwidgets return context def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def format_value(self, value): return [] if value is None else value class FileInput(Input): input_type = "file" needs_multipart_form = True template_name = "django/forms/widgets/file.html" def format_value(self, value): """File input never renders a value.""" return def value_from_datadict(self, data, files, name): "File widgets take data from FILES, not POST" return files.get(name) def value_omitted_from_data(self, data, files, name): return name not in files def use_required_attribute(self, initial): return super().use_required_attribute(initial) and not initial FILE_INPUT_CONTRADICTION = object() class ClearableFileInput(FileInput): clear_checkbox_label = _("Clear") initial_text = _("Currently") input_text = _("Change") template_name = "django/forms/widgets/clearable_file_input.html" def clear_checkbox_name(self, name): """ Given the name of the file input, return the name of the clear checkbox input. """ return name + "-clear" def clear_checkbox_id(self, name): """ Given the name of the clear checkbox input, return the HTML id for it. """ return name + "_id" def is_initial(self, value): """ Return whether value is considered to be initial value. """ return bool(value and getattr(value, "url", False)) def format_value(self, value): """ Return the file object if it has a defined url attribute. """ if self.is_initial(value): return value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) checkbox_name = self.clear_checkbox_name(name) checkbox_id = self.clear_checkbox_id(checkbox_name) context["widget"].update( { "checkbox_name": checkbox_name, "checkbox_id": checkbox_id, "is_initial": self.is_initial(value), "input_text": self.input_text, "initial_text": self.initial_text, "clear_checkbox_label": self.clear_checkbox_label, } ) context["widget"]["attrs"].setdefault("disabled", False) return context def value_from_datadict(self, data, files, name): upload = super().value_from_datadict(data, files, name) if not self.is_required and CheckboxInput().value_from_datadict( data, files, self.clear_checkbox_name(name) ): if upload: # If the user contradicts themselves (uploads a new file AND # checks the "clear" checkbox), we return a unique marker # object that FileField will turn into a ValidationError. return FILE_INPUT_CONTRADICTION # False signals to clear any existing value, as opposed to just None return False return upload def value_omitted_from_data(self, data, files, name): return ( super().value_omitted_from_data(data, files, name) and self.clear_checkbox_name(name) not in data ) class Textarea(Widget): template_name = "django/forms/widgets/textarea.html" def __init__(self, attrs=None): # Use slightly better defaults than HTML's 20x2 box default_attrs = {"cols": "40", "rows": "10"} if attrs: default_attrs.update(attrs) super().__init__(default_attrs) class DateTimeBaseInput(TextInput): format_key = "" supports_microseconds = False def __init__(self, attrs=None, format=None): super().__init__(attrs) self.format = format or None def format_value(self, value): return formats.localize_input( value, self.format or formats.get_format(self.format_key)[0] ) class DateInput(DateTimeBaseInput): format_key = "DATE_INPUT_FORMATS" template_name = "django/forms/widgets/date.html" class DateTimeInput(DateTimeBaseInput): format_key = "DATETIME_INPUT_FORMATS" template_name = "django/forms/widgets/datetime.html" class TimeInput(DateTimeBaseInput): format_key = "TIME_INPUT_FORMATS" template_name = "django/forms/widgets/time.html" # Defined at module level so that CheckboxInput is picklable (#17976) def boolean_check(v): return not (v is False or v is None or v == "") class CheckboxInput(Input): input_type = "checkbox" template_name = "django/forms/widgets/checkbox.html" def __init__(self, attrs=None, check_test=None): super().__init__(attrs) # check_test is a callable that takes a value and returns True # if the checkbox should be checked for that value. self.check_test = boolean_check if check_test is None else check_test def format_value(self, value): """Only return the 'value' attribute if value isn't empty.""" if value is True or value is False or value is None or value == "": return return str(value) def get_context(self, name, value, attrs): if self.check_test(value): attrs = {**(attrs or {}), "checked": True} return super().get_context(name, value, attrs) def value_from_datadict(self, data, files, name): if name not in data: # A missing value means False because HTML form submission does not # send results for unselected checkboxes. return False value = data.get(name) # Translate true and false strings to boolean values. values = {"true": True, "false": False} if isinstance(value, str): value = values.get(value.lower(), value) return bool(value) def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False class ChoiceWidget(Widget): allow_multiple_selected = False input_type = None template_name = None option_template_name = None add_id_index = True checked_attribute = {"checked": True} option_inherits_attrs = True def __init__(self, attrs=None, choices=()): super().__init__(attrs) # choices can be any iterable, but we may need to render this widget # multiple times. Thus, collapse it into a list so it can be consumed # more than once. self.choices = list(choices) def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() obj.choices = copy.copy(self.choices) memo[id(self)] = obj return obj def subwidgets(self, name, value, attrs=None): """ Yield all "subwidgets" of this widget. Used to enable iterating options from a BoundField for choice widgets. """ value = self.format_value(value) yield from self.options(name, value, attrs) def options(self, name, value, attrs=None): """Yield a flat list of options for this widget.""" for group in self.optgroups(name, value, attrs): yield from group[1] def optgroups(self, name, value, attrs=None): """Return a list of optgroups for this widget.""" groups = [] has_selected = False for index, (option_value, option_label) in enumerate(self.choices): if option_value is None: option_value = "" subgroup = [] if isinstance(option_label, (list, tuple)): group_name = option_value subindex = 0 choices = option_label else: group_name = None subindex = None choices = [(option_value, option_label)] groups.append((group_name, subgroup, index)) for subvalue, sublabel in choices: selected = (not has_selected or self.allow_multiple_selected) and str( subvalue ) in value has_selected |= selected subgroup.append( self.create_option( name, subvalue, sublabel, selected, index, subindex=subindex, attrs=attrs, ) ) if subindex is not None: subindex += 1 return groups def create_option( self, name, value, label, selected, index, subindex=None, attrs=None ): index = str(index) if subindex is None else "%s_%s" % (index, subindex) option_attrs = ( self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {} ) if selected: option_attrs.update(self.checked_attribute) if "id" in option_attrs: option_attrs["id"] = self.id_for_label(option_attrs["id"], index) return { "name": name, "value": value, "label": label, "selected": selected, "index": index, "attrs": option_attrs, "type": self.input_type, "template_name": self.option_template_name, "wrap_label": True, } def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context["widget"]["optgroups"] = self.optgroups( name, context["widget"]["value"], attrs ) return context def id_for_label(self, id_, index="0"): """ Use an incremented id for each option where the main widget references the zero index. """ if id_ and self.add_id_index: id_ = "%s_%s" % (id_, index) return id_ def value_from_datadict(self, data, files, name): getter = data.get if self.allow_multiple_selected: try: getter = data.getlist except AttributeError: pass return getter(name) def format_value(self, value): """Return selected values as a list.""" if value is None and self.allow_multiple_selected: return [] if not isinstance(value, (tuple, list)): value = [value] return [str(v) if v is not None else "" for v in value] class Select(ChoiceWidget): input_type = "select" template_name = "django/forms/widgets/select.html" option_template_name = "django/forms/widgets/select_option.html" add_id_index = False checked_attribute = {"selected": True} option_inherits_attrs = False def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.allow_multiple_selected: context["widget"]["attrs"]["multiple"] = True return context @staticmethod def _choice_has_empty_value(choice): """Return True if the choice's value is empty string or None.""" value, _ = choice return value is None or value == "" def use_required_attribute(self, initial): """ Don't render 'required' if the first <option> has a value, as that's invalid HTML. """ use_required_attribute = super().use_required_attribute(initial) # 'required' is always okay for <select multiple>. if self.allow_multiple_selected: return use_required_attribute first_choice = next(iter(self.choices), None) return ( use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice) ) class NullBooleanSelect(Select): """ A Select Widget intended to be used with NullBooleanField. """ def __init__(self, attrs=None): choices = ( ("unknown", _("Unknown")), ("true", _("Yes")), ("false", _("No")), ) super().__init__(attrs, choices) def format_value(self, value): try: return { True: "true", False: "false", "true": "true", "false": "false", # For backwards compatibility with Django < 2.2. "2": "true", "3": "false", }[value] except KeyError: return "unknown" def value_from_datadict(self, data, files, name): value = data.get(name) return { True: True, "True": True, "False": False, False: False, "true": True, "false": False, # For backwards compatibility with Django < 2.2. "2": True, "3": False, }.get(value) class SelectMultiple(Select): allow_multiple_selected = True def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def value_omitted_from_data(self, data, files, name): # An unselected <select multiple> doesn't appear in POST data, so it's # never known if the value is actually omitted. return False class RadioSelect(ChoiceWidget): input_type = "radio" template_name = "django/forms/widgets/radio.html" option_template_name = "django/forms/widgets/radio_option.html" use_fieldset = True def id_for_label(self, id_, index=None): """ Don't include for="field_0" in <label> to improve accessibility when using a screen reader, in addition clicking such a label would toggle the first input. """ if index is None: return "" return super().id_for_label(id_, index) class CheckboxSelectMultiple(RadioSelect): allow_multiple_selected = True input_type = "checkbox" template_name = "django/forms/widgets/checkbox_select.html" option_template_name = "django/forms/widgets/checkbox_option.html" def use_required_attribute(self, initial): # Don't use the 'required' attribute because browser validation would # require all checkboxes to be checked instead of at least one. return False def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False class MultiWidget(Widget): """ A widget that is composed of multiple widgets. In addition to the values added by Widget.get_context(), this widget adds a list of subwidgets to the context as widget['subwidgets']. These can be looped over and rendered like normal widgets. You'll probably want to use this class with MultiValueField. """ template_name = "django/forms/widgets/multiwidget.html" use_fieldset = True def __init__(self, widgets, attrs=None): if isinstance(widgets, dict): self.widgets_names = [("_%s" % name) if name else "" for name in widgets] widgets = widgets.values() else: self.widgets_names = ["_%s" % i for i in range(len(widgets))] self.widgets = [w() if isinstance(w, type) else w for w in widgets] super().__init__(attrs) @property def is_hidden(self): return all(w.is_hidden for w in self.widgets) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.is_localized: for widget in self.widgets: widget.is_localized = self.is_localized # value is a list/tuple of values, each corresponding to a widget # in self.widgets. if not isinstance(value, (list, tuple)): value = self.decompress(value) final_attrs = context["widget"]["attrs"] input_type = final_attrs.pop("type", None) id_ = final_attrs.get("id") subwidgets = [] for i, (widget_name, widget) in enumerate( zip(self.widgets_names, self.widgets) ): if input_type is not None: widget.input_type = input_type widget_name = name + widget_name try: widget_value = value[i] except IndexError: widget_value = None if id_: widget_attrs = final_attrs.copy() widget_attrs["id"] = "%s_%s" % (id_, i) else: widget_attrs = final_attrs subwidgets.append( widget.get_context(widget_name, widget_value, widget_attrs)["widget"] ) context["widget"]["subwidgets"] = subwidgets return context def id_for_label(self, id_): return "" def value_from_datadict(self, data, files, name): return [ widget.value_from_datadict(data, files, name + widget_name) for widget_name, widget in zip(self.widgets_names, self.widgets) ] def value_omitted_from_data(self, data, files, name): return all( widget.value_omitted_from_data(data, files, name + widget_name) for widget_name, widget in zip(self.widgets_names, self.widgets) ) def decompress(self, value): """ Return a list of decompressed values for the given compressed value. The given value can be assumed to be valid, but not necessarily non-empty. """ raise NotImplementedError("Subclasses must implement this method.") def _get_media(self): """ Media for a multiwidget is the combination of all media of the subwidgets. """ media = Media() for w in self.widgets: media = media + w.media return media media = property(_get_media) def __deepcopy__(self, memo): obj = super().__deepcopy__(memo) obj.widgets = copy.deepcopy(self.widgets) return obj @property def needs_multipart_form(self): return any(w.needs_multipart_form for w in self.widgets) class SplitDateTimeWidget(MultiWidget): """ A widget that splits datetime input into two <input type="text"> boxes. """ supports_microseconds = False template_name = "django/forms/widgets/splitdatetime.html" def __init__( self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None, ): widgets = ( DateInput( attrs=attrs if date_attrs is None else date_attrs, format=date_format, ), TimeInput( attrs=attrs if time_attrs is None else time_attrs, format=time_format, ), ) super().__init__(widgets) def decompress(self, value): if value: value = to_current_timezone(value) return [value.date(), value.time()] return [None, None] class SplitHiddenDateTimeWidget(SplitDateTimeWidget): """ A widget that splits datetime input into two <input type="hidden"> inputs. """ template_name = "django/forms/widgets/splithiddendatetime.html" def __init__( self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None, ): super().__init__(attrs, date_format, time_format, date_attrs, time_attrs) for widget in self.widgets: widget.input_type = "hidden" class SelectDateWidget(Widget): """ A widget that splits date input into three <select> boxes. This also serves as an example of a Widget that has more than one HTML element and hence implements value_from_datadict. """ none_value = ("", "---") month_field = "%s_month" day_field = "%s_day" year_field = "%s_year" template_name = "django/forms/widgets/select_date.html" input_type = "select" select_widget = Select date_re = _lazy_re_compile(r"(\d{4}|0)-(\d\d?)-(\d\d?)$") use_fieldset = True def __init__(self, attrs=None, years=None, months=None, empty_label=None): self.attrs = attrs or {} # Optional list or tuple of years to use in the "year" select box. if years: self.years = years else: this_year = datetime.date.today().year self.years = range(this_year, this_year + 10) # Optional dict of months to use in the "month" select box. if months: self.months = months else: self.months = MONTHS # Optional string, list, or tuple to use as empty_label. if isinstance(empty_label, (list, tuple)): if not len(empty_label) == 3: raise ValueError("empty_label list/tuple must have 3 elements.") self.year_none_value = ("", empty_label[0]) self.month_none_value = ("", empty_label[1]) self.day_none_value = ("", empty_label[2]) else: if empty_label is not None: self.none_value = ("", empty_label) self.year_none_value = self.none_value self.month_none_value = self.none_value self.day_none_value = self.none_value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) date_context = {} year_choices = [(i, str(i)) for i in self.years] if not self.is_required: year_choices.insert(0, self.year_none_value) year_name = self.year_field % name date_context["year"] = self.select_widget( attrs, choices=year_choices ).get_context( name=year_name, value=context["widget"]["value"]["year"], attrs={**context["widget"]["attrs"], "id": "id_%s" % year_name}, ) month_choices = list(self.months.items()) if not self.is_required: month_choices.insert(0, self.month_none_value) month_name = self.month_field % name date_context["month"] = self.select_widget( attrs, choices=month_choices ).get_context( name=month_name, value=context["widget"]["value"]["month"], attrs={**context["widget"]["attrs"], "id": "id_%s" % month_name}, ) day_choices = [(i, i) for i in range(1, 32)] if not self.is_required: day_choices.insert(0, self.day_none_value) day_name = self.day_field % name date_context["day"] = self.select_widget( attrs, choices=day_choices, ).get_context( name=day_name, value=context["widget"]["value"]["day"], attrs={**context["widget"]["attrs"], "id": "id_%s" % day_name}, ) subwidgets = [] for field in self._parse_date_fmt(): subwidgets.append(date_context[field]["widget"]) context["widget"]["subwidgets"] = subwidgets return context def format_value(self, value): """ Return a dict containing the year, month, and day of the current value. Use dict instead of a datetime to allow invalid dates such as February 31 to display correctly. """ year, month, day = None, None, None if isinstance(value, (datetime.date, datetime.datetime)): year, month, day = value.year, value.month, value.day elif isinstance(value, str): match = self.date_re.match(value) if match: # Convert any zeros in the date to empty strings to match the # empty option value. year, month, day = [int(val) or "" for val in match.groups()] else: input_format = get_format("DATE_INPUT_FORMATS")[0] try: d = datetime.datetime.strptime(value, input_format) except ValueError: pass else: year, month, day = d.year, d.month, d.day return {"year": year, "month": month, "day": day} @staticmethod def _parse_date_fmt(): fmt = get_format("DATE_FORMAT") escaped = False for char in fmt: if escaped: escaped = False elif char == "\\": escaped = True elif char in "Yy": yield "year" elif char in "bEFMmNn": yield "month" elif char in "dj": yield "day" def id_for_label(self, id_): for first_select in self._parse_date_fmt(): return "%s_%s" % (id_, first_select) return "%s_month" % id_ def value_from_datadict(self, data, files, name): y = data.get(self.year_field % name) m = data.get(self.month_field % name) d = data.get(self.day_field % name) if y == m == d == "": return None if y is not None and m is not None and d is not None: input_format = get_format("DATE_INPUT_FORMATS")[0] input_format = formats.sanitize_strftime_format(input_format) try: date_value = datetime.date(int(y), int(m), int(d)) except ValueError: # Return pseudo-ISO dates with zeros for any unselected values, # e.g. '2017-0-23'. return "%s-%s-%s" % (y or 0, m or 0, d or 0) return date_value.strftime(input_format) return data.get(name) def value_omitted_from_data(self, data, files, name): return not any( ("{}_{}".format(name, interval) in data) for interval in ("year", "month", "day") )
6c35c441465262354b11528da43fd3b9c672c4dc9afd790855d761eadd40dec4
import collections.abc import inspect import warnings from math import ceil from django.utils.functional import cached_property from django.utils.inspect import method_has_no_args from django.utils.translation import gettext_lazy as _ class UnorderedObjectListWarning(RuntimeWarning): pass class InvalidPage(Exception): pass class PageNotAnInteger(InvalidPage): pass class EmptyPage(InvalidPage): pass class Paginator: # Translators: String used to replace omitted page numbers in elided page # range generated by paginators, e.g. [1, 2, '…', 5, 6, 7, '…', 9, 10]. ELLIPSIS = _("…") def __init__(self, object_list, per_page, orphans=0, allow_empty_first_page=True): self.object_list = object_list self._check_object_list_is_ordered() self.per_page = int(per_page) self.orphans = int(orphans) self.allow_empty_first_page = allow_empty_first_page def __iter__(self): for page_number in self.page_range: yield self.page(page_number) def validate_number(self, number): """Validate the given 1-based page number.""" try: if isinstance(number, float) and not number.is_integer(): raise ValueError number = int(number) except (TypeError, ValueError): raise PageNotAnInteger(_("That page number is not an integer")) if number < 1: raise EmptyPage(_("That page number is less than 1")) if number > self.num_pages: raise EmptyPage(_("That page contains no results")) return number def get_page(self, number): """ Return a valid page, even if the page argument isn't a number or isn't in range. """ try: number = self.validate_number(number) except PageNotAnInteger: number = 1 except EmptyPage: number = self.num_pages return self.page(number) def page(self, number): """Return a Page object for the given 1-based page number.""" number = self.validate_number(number) bottom = (number - 1) * self.per_page top = bottom + self.per_page if top + self.orphans >= self.count: top = self.count return self._get_page(self.object_list[bottom:top], number, self) def _get_page(self, *args, **kwargs): """ Return an instance of a single page. This hook can be used by subclasses to use an alternative to the standard :cls:`Page` object. """ return Page(*args, **kwargs) @cached_property def count(self): """Return the total number of objects, across all pages.""" c = getattr(self.object_list, "count", None) if callable(c) and not inspect.isbuiltin(c) and method_has_no_args(c): return c() return len(self.object_list) @cached_property def num_pages(self): """Return the total number of pages.""" if self.count == 0 and not self.allow_empty_first_page: return 0 hits = max(1, self.count - self.orphans) return ceil(hits / self.per_page) @property def page_range(self): """ Return a 1-based range of pages for iterating through within a template for loop. """ return range(1, self.num_pages + 1) def _check_object_list_is_ordered(self): """ Warn if self.object_list is unordered (typically a QuerySet). """ ordered = getattr(self.object_list, "ordered", None) if ordered is not None and not ordered: obj_list_repr = ( "{} {}".format( self.object_list.model, self.object_list.__class__.__name__ ) if hasattr(self.object_list, "model") else "{!r}".format(self.object_list) ) warnings.warn( "Pagination may yield inconsistent results with an unordered " "object_list: {}.".format(obj_list_repr), UnorderedObjectListWarning, stacklevel=3, ) def get_elided_page_range(self, number=1, *, on_each_side=3, on_ends=2): """ Return a 1-based range of pages with some values elided. If the page range is larger than a given size, the whole range is not provided and a compact form is returned instead, e.g. for a paginator with 50 pages, if page 43 were the current page, the output, with the default arguments, would be: 1, 2, …, 40, 41, 42, 43, 44, 45, 46, …, 49, 50. """ number = self.validate_number(number) if self.num_pages <= (on_each_side + on_ends) * 2: yield from self.page_range return if number > (1 + on_each_side + on_ends) + 1: yield from range(1, on_ends + 1) yield self.ELLIPSIS yield from range(number - on_each_side, number + 1) else: yield from range(1, number + 1) if number < (self.num_pages - on_each_side - on_ends) - 1: yield from range(number + 1, number + on_each_side + 1) yield self.ELLIPSIS yield from range(self.num_pages - on_ends + 1, self.num_pages + 1) else: yield from range(number + 1, self.num_pages + 1) class Page(collections.abc.Sequence): def __init__(self, object_list, number, paginator): self.object_list = object_list self.number = number self.paginator = paginator def __repr__(self): return "<Page %s of %s>" % (self.number, self.paginator.num_pages) def __len__(self): return len(self.object_list) def __getitem__(self, index): if not isinstance(index, (int, slice)): raise TypeError( "Page indices must be integers or slices, not %s." % type(index).__name__ ) # The object_list is converted to a list so that if it was a QuerySet # it won't be a database hit per __getitem__. if not isinstance(self.object_list, list): self.object_list = list(self.object_list) return self.object_list[index] def has_next(self): return self.number < self.paginator.num_pages def has_previous(self): return self.number > 1 def has_other_pages(self): return self.has_previous() or self.has_next() def next_page_number(self): return self.paginator.validate_number(self.number + 1) def previous_page_number(self): return self.paginator.validate_number(self.number - 1) def start_index(self): """ Return the 1-based index of the first object on this page, relative to total objects in the paginator. """ # Special case, return zero if no items. if self.paginator.count == 0: return 0 return (self.paginator.per_page * (self.number - 1)) + 1 def end_index(self): """ Return the 1-based index of the last object on this page, relative to total objects found (hits). """ # Special case for the last page because there can be orphans. if self.number == self.paginator.num_pages: return self.paginator.count return self.number * self.paginator.per_page
0db24cb7cbcab8b2654442b5f6696b7e0690d90550ad67016d07a26955ec0690
import ipaddress import math import re from pathlib import Path from urllib.parse import urlsplit, urlunsplit from django.core.exceptions import ValidationError from django.utils.deconstruct import deconstructible from django.utils.encoding import punycode from django.utils.ipv6 import is_valid_ipv6_address from django.utils.regex_helper import _lazy_re_compile from django.utils.translation import gettext_lazy as _ from django.utils.translation import ngettext_lazy # These values, if given to validate(), will trigger the self.required check. EMPTY_VALUES = (None, "", [], (), {}) @deconstructible class RegexValidator: regex = "" message = _("Enter a valid value.") code = "invalid" inverse_match = False flags = 0 def __init__( self, regex=None, message=None, code=None, inverse_match=None, flags=None ): if regex is not None: self.regex = regex if message is not None: self.message = message if code is not None: self.code = code if inverse_match is not None: self.inverse_match = inverse_match if flags is not None: self.flags = flags if self.flags and not isinstance(self.regex, str): raise TypeError( "If the flags are set, regex must be a regular expression string." ) self.regex = _lazy_re_compile(self.regex, self.flags) def __call__(self, value): """ Validate that the input contains (or does *not* contain, if inverse_match is True) a match for the regular expression. """ regex_matches = self.regex.search(str(value)) invalid_input = regex_matches if self.inverse_match else not regex_matches if invalid_input: raise ValidationError(self.message, code=self.code, params={"value": value}) def __eq__(self, other): return ( isinstance(other, RegexValidator) and self.regex.pattern == other.regex.pattern and self.regex.flags == other.regex.flags and (self.message == other.message) and (self.code == other.code) and (self.inverse_match == other.inverse_match) ) @deconstructible class URLValidator(RegexValidator): ul = "\u00a1-\uffff" # Unicode letters range (must not be a raw string). # IP patterns ipv4_re = ( r"(?:0|25[0-5]|2[0-4][0-9]|1[0-9]?[0-9]?|[1-9][0-9]?)" r"(?:\.(?:0|25[0-5]|2[0-4][0-9]|1[0-9]?[0-9]?|[1-9][0-9]?)){3}" ) ipv6_re = r"\[[0-9a-f:.]+\]" # (simple regex, validated later) # Host patterns hostname_re = ( r"[a-z" + ul + r"0-9](?:[a-z" + ul + r"0-9-]{0,61}[a-z" + ul + r"0-9])?" ) # Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1 domain_re = r"(?:\.(?!-)[a-z" + ul + r"0-9-]{1,63}(?<!-))*" tld_re = ( r"\." # dot r"(?!-)" # can't start with a dash r"(?:[a-z" + ul + "-]{2,63}" # domain label r"|xn--[a-z0-9]{1,59})" # or punycode label r"(?<!-)" # can't end with a dash r"\.?" # may have a trailing dot ) host_re = "(" + hostname_re + domain_re + tld_re + "|localhost)" regex = _lazy_re_compile( r"^(?:[a-z0-9.+-]*)://" # scheme is validated separately r"(?:[^\s:@/]+(?::[^\s:@/]*)?@)?" # user:pass authentication r"(?:" + ipv4_re + "|" + ipv6_re + "|" + host_re + ")" r"(?::[0-9]{1,5})?" # port r"(?:[/?#][^\s]*)?" # resource path r"\Z", re.IGNORECASE, ) message = _("Enter a valid URL.") schemes = ["http", "https", "ftp", "ftps"] unsafe_chars = frozenset("\t\r\n") def __init__(self, schemes=None, **kwargs): super().__init__(**kwargs) if schemes is not None: self.schemes = schemes def __call__(self, value): if not isinstance(value, str): raise ValidationError(self.message, code=self.code, params={"value": value}) if self.unsafe_chars.intersection(value): raise ValidationError(self.message, code=self.code, params={"value": value}) # Check if the scheme is valid. scheme = value.split("://")[0].lower() if scheme not in self.schemes: raise ValidationError(self.message, code=self.code, params={"value": value}) # Then check full URL try: splitted_url = urlsplit(value) except ValueError: raise ValidationError(self.message, code=self.code, params={"value": value}) try: super().__call__(value) except ValidationError as e: # Trivial case failed. Try for possible IDN domain if value: scheme, netloc, path, query, fragment = splitted_url try: netloc = punycode(netloc) # IDN -> ACE except UnicodeError: # invalid domain part raise e url = urlunsplit((scheme, netloc, path, query, fragment)) super().__call__(url) else: raise else: # Now verify IPv6 in the netloc part host_match = re.search(r"^\[(.+)\](?::[0-9]{1,5})?$", splitted_url.netloc) if host_match: potential_ip = host_match[1] try: validate_ipv6_address(potential_ip) except ValidationError: raise ValidationError( self.message, code=self.code, params={"value": value} ) # The maximum length of a full host name is 253 characters per RFC 1034 # section 3.1. It's defined to be 255 bytes or less, but this includes # one byte for the length of the name and one byte for the trailing dot # that's used to indicate absolute names in DNS. if splitted_url.hostname is None or len(splitted_url.hostname) > 253: raise ValidationError(self.message, code=self.code, params={"value": value}) integer_validator = RegexValidator( _lazy_re_compile(r"^-?\d+\Z"), message=_("Enter a valid integer."), code="invalid", ) def validate_integer(value): return integer_validator(value) @deconstructible class EmailValidator: message = _("Enter a valid email address.") code = "invalid" user_regex = _lazy_re_compile( # dot-atom r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # quoted-string r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])' r'*"\Z)', re.IGNORECASE, ) domain_regex = _lazy_re_compile( # max length for domain name labels is 63 characters per RFC 1034 r"((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z", re.IGNORECASE, ) literal_regex = _lazy_re_compile( # literal form, ipv4 or ipv6 address (SMTP 4.1.3) r"\[([A-F0-9:.]+)\]\Z", re.IGNORECASE, ) domain_allowlist = ["localhost"] def __init__(self, message=None, code=None, allowlist=None): if message is not None: self.message = message if code is not None: self.code = code if allowlist is not None: self.domain_allowlist = allowlist def __call__(self, value): if not value or "@" not in value: raise ValidationError(self.message, code=self.code, params={"value": value}) user_part, domain_part = value.rsplit("@", 1) if not self.user_regex.match(user_part): raise ValidationError(self.message, code=self.code, params={"value": value}) if domain_part not in self.domain_allowlist and not self.validate_domain_part( domain_part ): # Try for possible IDN domain-part try: domain_part = punycode(domain_part) except UnicodeError: pass else: if self.validate_domain_part(domain_part): return raise ValidationError(self.message, code=self.code, params={"value": value}) def validate_domain_part(self, domain_part): if self.domain_regex.match(domain_part): return True literal_match = self.literal_regex.match(domain_part) if literal_match: ip_address = literal_match[1] try: validate_ipv46_address(ip_address) return True except ValidationError: pass return False def __eq__(self, other): return ( isinstance(other, EmailValidator) and (self.domain_allowlist == other.domain_allowlist) and (self.message == other.message) and (self.code == other.code) ) validate_email = EmailValidator() slug_re = _lazy_re_compile(r"^[-a-zA-Z0-9_]+\Z") validate_slug = RegexValidator( slug_re, # Translators: "letters" means latin letters: a-z and A-Z. _("Enter a valid “slug” consisting of letters, numbers, underscores or hyphens."), "invalid", ) slug_unicode_re = _lazy_re_compile(r"^[-\w]+\Z") validate_unicode_slug = RegexValidator( slug_unicode_re, _( "Enter a valid “slug” consisting of Unicode letters, numbers, underscores, or " "hyphens." ), "invalid", ) def validate_ipv4_address(value): try: ipaddress.IPv4Address(value) except ValueError: raise ValidationError( _("Enter a valid IPv4 address."), code="invalid", params={"value": value} ) else: # Leading zeros are forbidden to avoid ambiguity with the octal # notation. This restriction is included in Python 3.9.5+. # TODO: Remove when dropping support for PY39. if any(octet != "0" and octet[0] == "0" for octet in value.split(".")): raise ValidationError( _("Enter a valid IPv4 address."), code="invalid", params={"value": value}, ) def validate_ipv6_address(value): if not is_valid_ipv6_address(value): raise ValidationError( _("Enter a valid IPv6 address."), code="invalid", params={"value": value} ) def validate_ipv46_address(value): try: validate_ipv4_address(value) except ValidationError: try: validate_ipv6_address(value) except ValidationError: raise ValidationError( _("Enter a valid IPv4 or IPv6 address."), code="invalid", params={"value": value}, ) ip_address_validator_map = { "both": ([validate_ipv46_address], _("Enter a valid IPv4 or IPv6 address.")), "ipv4": ([validate_ipv4_address], _("Enter a valid IPv4 address.")), "ipv6": ([validate_ipv6_address], _("Enter a valid IPv6 address.")), } def ip_address_validators(protocol, unpack_ipv4): """ Depending on the given parameters, return the appropriate validators for the GenericIPAddressField. """ if protocol != "both" and unpack_ipv4: raise ValueError( "You can only use `unpack_ipv4` if `protocol` is set to 'both'" ) try: return ip_address_validator_map[protocol.lower()] except KeyError: raise ValueError( "The protocol '%s' is unknown. Supported: %s" % (protocol, list(ip_address_validator_map)) ) def int_list_validator(sep=",", message=None, code="invalid", allow_negative=False): regexp = _lazy_re_compile( r"^%(neg)s\d+(?:%(sep)s%(neg)s\d+)*\Z" % { "neg": "(-)?" if allow_negative else "", "sep": re.escape(sep), } ) return RegexValidator(regexp, message=message, code=code) validate_comma_separated_integer_list = int_list_validator( message=_("Enter only digits separated by commas."), ) @deconstructible class BaseValidator: message = _("Ensure this value is %(limit_value)s (it is %(show_value)s).") code = "limit_value" def __init__(self, limit_value, message=None): self.limit_value = limit_value if message: self.message = message def __call__(self, value): cleaned = self.clean(value) limit_value = ( self.limit_value() if callable(self.limit_value) else self.limit_value ) params = {"limit_value": limit_value, "show_value": cleaned, "value": value} if self.compare(cleaned, limit_value): raise ValidationError(self.message, code=self.code, params=params) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return ( self.limit_value == other.limit_value and self.message == other.message and self.code == other.code ) def compare(self, a, b): return a is not b def clean(self, x): return x @deconstructible class MaxValueValidator(BaseValidator): message = _("Ensure this value is less than or equal to %(limit_value)s.") code = "max_value" def compare(self, a, b): return a > b @deconstructible class MinValueValidator(BaseValidator): message = _("Ensure this value is greater than or equal to %(limit_value)s.") code = "min_value" def compare(self, a, b): return a < b @deconstructible class StepValueValidator(BaseValidator): message = _("Ensure this value is a multiple of step size %(limit_value)s.") code = "step_size" def compare(self, a, b): return not math.isclose(math.remainder(a, b), 0, abs_tol=1e-9) @deconstructible class MinLengthValidator(BaseValidator): message = ngettext_lazy( "Ensure this value has at least %(limit_value)d character (it has " "%(show_value)d).", "Ensure this value has at least %(limit_value)d characters (it has " "%(show_value)d).", "limit_value", ) code = "min_length" def compare(self, a, b): return a < b def clean(self, x): return len(x) @deconstructible class MaxLengthValidator(BaseValidator): message = ngettext_lazy( "Ensure this value has at most %(limit_value)d character (it has " "%(show_value)d).", "Ensure this value has at most %(limit_value)d characters (it has " "%(show_value)d).", "limit_value", ) code = "max_length" def compare(self, a, b): return a > b def clean(self, x): return len(x) @deconstructible class DecimalValidator: """ Validate that the input does not exceed the maximum number of digits expected, otherwise raise ValidationError. """ messages = { "invalid": _("Enter a number."), "max_digits": ngettext_lazy( "Ensure that there are no more than %(max)s digit in total.", "Ensure that there are no more than %(max)s digits in total.", "max", ), "max_decimal_places": ngettext_lazy( "Ensure that there are no more than %(max)s decimal place.", "Ensure that there are no more than %(max)s decimal places.", "max", ), "max_whole_digits": ngettext_lazy( "Ensure that there are no more than %(max)s digit before the decimal " "point.", "Ensure that there are no more than %(max)s digits before the decimal " "point.", "max", ), } def __init__(self, max_digits, decimal_places): self.max_digits = max_digits self.decimal_places = decimal_places def __call__(self, value): digit_tuple, exponent = value.as_tuple()[1:] if exponent in {"F", "n", "N"}: raise ValidationError( self.messages["invalid"], code="invalid", params={"value": value} ) if exponent >= 0: digits = len(digit_tuple) if digit_tuple != (0,): # A positive exponent adds that many trailing zeros. digits += exponent decimals = 0 else: # If the absolute value of the negative exponent is larger than the # number of digits, then it's the same as the number of digits, # because it'll consume all of the digits in digit_tuple and then # add abs(exponent) - len(digit_tuple) leading zeros after the # decimal point. if abs(exponent) > len(digit_tuple): digits = decimals = abs(exponent) else: digits = len(digit_tuple) decimals = abs(exponent) whole_digits = digits - decimals if self.max_digits is not None and digits > self.max_digits: raise ValidationError( self.messages["max_digits"], code="max_digits", params={"max": self.max_digits, "value": value}, ) if self.decimal_places is not None and decimals > self.decimal_places: raise ValidationError( self.messages["max_decimal_places"], code="max_decimal_places", params={"max": self.decimal_places, "value": value}, ) if ( self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places) ): raise ValidationError( self.messages["max_whole_digits"], code="max_whole_digits", params={"max": (self.max_digits - self.decimal_places), "value": value}, ) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.max_digits == other.max_digits and self.decimal_places == other.decimal_places ) @deconstructible class FileExtensionValidator: message = _( "File extension “%(extension)s” is not allowed. " "Allowed extensions are: %(allowed_extensions)s." ) code = "invalid_extension" def __init__(self, allowed_extensions=None, message=None, code=None): if allowed_extensions is not None: allowed_extensions = [ allowed_extension.lower() for allowed_extension in allowed_extensions ] self.allowed_extensions = allowed_extensions if message is not None: self.message = message if code is not None: self.code = code def __call__(self, value): extension = Path(value.name).suffix[1:].lower() if ( self.allowed_extensions is not None and extension not in self.allowed_extensions ): raise ValidationError( self.message, code=self.code, params={ "extension": extension, "allowed_extensions": ", ".join(self.allowed_extensions), "value": value, }, ) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.allowed_extensions == other.allowed_extensions and self.message == other.message and self.code == other.code ) def get_available_image_extensions(): try: from PIL import Image except ImportError: return [] else: Image.init() return [ext.lower()[1:] for ext in Image.EXTENSION] def validate_image_file_extension(value): return FileExtensionValidator(allowed_extensions=get_available_image_extensions())( value ) @deconstructible class ProhibitNullCharactersValidator: """Validate that the string doesn't contain the null character.""" message = _("Null characters are not allowed.") code = "null_characters_not_allowed" def __init__(self, message=None, code=None): if message is not None: self.message = message if code is not None: self.code = code def __call__(self, value): if "\x00" in str(value): raise ValidationError(self.message, code=self.code, params={"value": value}) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.message == other.message and self.code == other.code )
9f5af5744c489c73f3274fccdbc31fa34941703cfc6c162348fb309f5f6dafee
import datetime import io import json import mimetypes import os import re import sys import time from email.header import Header from http.client import responses from urllib.parse import quote, urlparse from django.conf import settings from django.core import signals, signing from django.core.exceptions import DisallowedRedirect from django.core.serializers.json import DjangoJSONEncoder from django.http.cookie import SimpleCookie from django.utils import timezone from django.utils.datastructures import CaseInsensitiveMapping from django.utils.encoding import iri_to_uri from django.utils.http import http_date from django.utils.regex_helper import _lazy_re_compile _charset_from_content_type_re = _lazy_re_compile( r";\s*charset=(?P<charset>[^\s;]+)", re.I ) class ResponseHeaders(CaseInsensitiveMapping): def __init__(self, data): """ Populate the initial data using __setitem__ to ensure values are correctly encoded. """ self._store = {} if data: for header, value in self._unpack_items(data): self[header] = value def _convert_to_charset(self, value, charset, mime_encode=False): """ Convert headers key/value to ascii/latin-1 native strings. `charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and `value` can't be represented in the given charset, apply MIME-encoding. """ try: if isinstance(value, str): # Ensure string is valid in given charset value.encode(charset) elif isinstance(value, bytes): # Convert bytestring using given charset value = value.decode(charset) else: value = str(value) # Ensure string is valid in given charset. value.encode(charset) if "\n" in value or "\r" in value: raise BadHeaderError( f"Header values can't contain newlines (got {value!r})" ) except UnicodeError as e: # Encoding to a string of the specified charset failed, but we # don't know what type that value was, or if it contains newlines, # which we may need to check for before sending it to be # encoded for multiple character sets. if (isinstance(value, bytes) and (b"\n" in value or b"\r" in value)) or ( isinstance(value, str) and ("\n" in value or "\r" in value) ): raise BadHeaderError( f"Header values can't contain newlines (got {value!r})" ) from e if mime_encode: value = Header(value, "utf-8", maxlinelen=sys.maxsize).encode() else: e.reason += ", HTTP response headers must be in %s format" % charset raise return value def __delitem__(self, key): self.pop(key) def __setitem__(self, key, value): key = self._convert_to_charset(key, "ascii") value = self._convert_to_charset(value, "latin-1", mime_encode=True) self._store[key.lower()] = (key, value) def pop(self, key, default=None): return self._store.pop(key.lower(), default) def setdefault(self, key, value): if key not in self: self[key] = value class BadHeaderError(ValueError): pass class HttpResponseBase: """ An HTTP response base class with dictionary-accessed headers. This class doesn't handle content. It should not be used directly. Use the HttpResponse and StreamingHttpResponse subclasses instead. """ status_code = 200 def __init__( self, content_type=None, status=None, reason=None, charset=None, headers=None ): self.headers = ResponseHeaders(headers) self._charset = charset if "Content-Type" not in self.headers: if content_type is None: content_type = f"text/html; charset={self.charset}" self.headers["Content-Type"] = content_type elif content_type: raise ValueError( "'headers' must not contain 'Content-Type' when the " "'content_type' parameter is provided." ) self._resource_closers = [] # This parameter is set by the handler. It's necessary to preserve the # historical behavior of request_finished. self._handler_class = None self.cookies = SimpleCookie() self.closed = False if status is not None: try: self.status_code = int(status) except (ValueError, TypeError): raise TypeError("HTTP status code must be an integer.") if not 100 <= self.status_code <= 599: raise ValueError("HTTP status code must be an integer from 100 to 599.") self._reason_phrase = reason @property def reason_phrase(self): if self._reason_phrase is not None: return self._reason_phrase # Leave self._reason_phrase unset in order to use the default # reason phrase for status code. return responses.get(self.status_code, "Unknown Status Code") @reason_phrase.setter def reason_phrase(self, value): self._reason_phrase = value @property def charset(self): if self._charset is not None: return self._charset # The Content-Type header may not yet be set, because the charset is # being inserted *into* it. if content_type := self.headers.get("Content-Type"): if matched := _charset_from_content_type_re.search(content_type): # Extract the charset and strip its double quotes. # Note that having parsed it from the Content-Type, we don't # store it back into the _charset for later intentionally, to # allow for the Content-Type to be switched again later. return matched["charset"].replace('"', "") return settings.DEFAULT_CHARSET @charset.setter def charset(self, value): self._charset = value def serialize_headers(self): """HTTP headers as a bytestring.""" return b"\r\n".join( [ key.encode("ascii") + b": " + value.encode("latin-1") for key, value in self.headers.items() ] ) __bytes__ = serialize_headers @property def _content_type_for_repr(self): return ( ', "%s"' % self.headers["Content-Type"] if "Content-Type" in self.headers else "" ) def __setitem__(self, header, value): self.headers[header] = value def __delitem__(self, header): del self.headers[header] def __getitem__(self, header): return self.headers[header] def has_header(self, header): """Case-insensitive check for a header.""" return header in self.headers __contains__ = has_header def items(self): return self.headers.items() def get(self, header, alternate=None): return self.headers.get(header, alternate) def set_cookie( self, key, value="", max_age=None, expires=None, path="/", domain=None, secure=False, httponly=False, samesite=None, ): """ Set a cookie. ``expires`` can be: - a string in the correct format, - a naive ``datetime.datetime`` object in UTC, - an aware ``datetime.datetime`` object in any time zone. If it is a ``datetime.datetime`` object then calculate ``max_age``. ``max_age`` can be: - int/float specifying seconds, - ``datetime.timedelta`` object. """ self.cookies[key] = value if expires is not None: if isinstance(expires, datetime.datetime): if timezone.is_naive(expires): expires = timezone.make_aware(expires, datetime.timezone.utc) delta = expires - datetime.datetime.now(tz=datetime.timezone.utc) # Add one second so the date matches exactly (a fraction of # time gets lost between converting to a timedelta and # then the date string). delta = delta + datetime.timedelta(seconds=1) # Just set max_age - the max_age logic will set expires. expires = None if max_age is not None: raise ValueError("'expires' and 'max_age' can't be used together.") max_age = max(0, delta.days * 86400 + delta.seconds) else: self.cookies[key]["expires"] = expires else: self.cookies[key]["expires"] = "" if max_age is not None: if isinstance(max_age, datetime.timedelta): max_age = max_age.total_seconds() self.cookies[key]["max-age"] = int(max_age) # IE requires expires, so set it if hasn't been already. if not expires: self.cookies[key]["expires"] = http_date(time.time() + max_age) if path is not None: self.cookies[key]["path"] = path if domain is not None: self.cookies[key]["domain"] = domain if secure: self.cookies[key]["secure"] = True if httponly: self.cookies[key]["httponly"] = True if samesite: if samesite.lower() not in ("lax", "none", "strict"): raise ValueError('samesite must be "lax", "none", or "strict".') self.cookies[key]["samesite"] = samesite def setdefault(self, key, value): """Set a header unless it has already been set.""" self.headers.setdefault(key, value) def set_signed_cookie(self, key, value, salt="", **kwargs): value = signing.get_cookie_signer(salt=key + salt).sign(value) return self.set_cookie(key, value, **kwargs) def delete_cookie(self, key, path="/", domain=None, samesite=None): # Browsers can ignore the Set-Cookie header if the cookie doesn't use # the secure flag and: # - the cookie name starts with "__Host-" or "__Secure-", or # - the samesite is "none". secure = key.startswith(("__Secure-", "__Host-")) or ( samesite and samesite.lower() == "none" ) self.set_cookie( key, max_age=0, path=path, domain=domain, secure=secure, expires="Thu, 01 Jan 1970 00:00:00 GMT", samesite=samesite, ) # Common methods used by subclasses def make_bytes(self, value): """Turn a value into a bytestring encoded in the output charset.""" # Per PEP 3333, this response body must be bytes. To avoid returning # an instance of a subclass, this function returns `bytes(value)`. # This doesn't make a copy when `value` already contains bytes. # Handle string types -- we can't rely on force_bytes here because: # - Python attempts str conversion first # - when self._charset != 'utf-8' it re-encodes the content if isinstance(value, (bytes, memoryview)): return bytes(value) if isinstance(value, str): return bytes(value.encode(self.charset)) # Handle non-string types. return str(value).encode(self.charset) # These methods partially implement the file-like object interface. # See https://docs.python.org/library/io.html#io.IOBase # The WSGI server must call this method upon completion of the request. # See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html def close(self): for closer in self._resource_closers: try: closer() except Exception: pass # Free resources that were still referenced. self._resource_closers.clear() self.closed = True signals.request_finished.send(sender=self._handler_class) def write(self, content): raise OSError("This %s instance is not writable" % self.__class__.__name__) def flush(self): pass def tell(self): raise OSError( "This %s instance cannot tell its position" % self.__class__.__name__ ) # These methods partially implement a stream-like object interface. # See https://docs.python.org/library/io.html#io.IOBase def readable(self): return False def seekable(self): return False def writable(self): return False def writelines(self, lines): raise OSError("This %s instance is not writable" % self.__class__.__name__) class HttpResponse(HttpResponseBase): """ An HTTP response class with a string as content. This content can be read, appended to, or replaced. """ streaming = False non_picklable_attrs = frozenset( [ "resolver_match", # Non-picklable attributes added by test clients. "client", "context", "json", "templates", ] ) def __init__(self, content=b"", *args, **kwargs): super().__init__(*args, **kwargs) # Content is a bytestring. See the `content` property methods. self.content = content def __getstate__(self): obj_dict = self.__dict__.copy() for attr in self.non_picklable_attrs: if attr in obj_dict: del obj_dict[attr] return obj_dict def __repr__(self): return "<%(cls)s status_code=%(status_code)d%(content_type)s>" % { "cls": self.__class__.__name__, "status_code": self.status_code, "content_type": self._content_type_for_repr, } def serialize(self): """Full HTTP message, including headers, as a bytestring.""" return self.serialize_headers() + b"\r\n\r\n" + self.content __bytes__ = serialize @property def content(self): return b"".join(self._container) @content.setter def content(self, value): # Consume iterators upon assignment to allow repeated iteration. if hasattr(value, "__iter__") and not isinstance( value, (bytes, memoryview, str) ): content = b"".join(self.make_bytes(chunk) for chunk in value) if hasattr(value, "close"): try: value.close() except Exception: pass else: content = self.make_bytes(value) # Create a list of properly encoded bytestrings to support write(). self._container = [content] def __iter__(self): return iter(self._container) def write(self, content): self._container.append(self.make_bytes(content)) def tell(self): return len(self.content) def getvalue(self): return self.content def writable(self): return True def writelines(self, lines): for line in lines: self.write(line) class StreamingHttpResponse(HttpResponseBase): """ A streaming HTTP response class with an iterator as content. This should only be iterated once, when the response is streamed to the client. However, it can be appended to or replaced with a new iterator that wraps the original content (or yields entirely new content). """ streaming = True def __init__(self, streaming_content=(), *args, **kwargs): super().__init__(*args, **kwargs) # `streaming_content` should be an iterable of bytestrings. # See the `streaming_content` property methods. self.streaming_content = streaming_content def __repr__(self): return "<%(cls)s status_code=%(status_code)d%(content_type)s>" % { "cls": self.__class__.__qualname__, "status_code": self.status_code, "content_type": self._content_type_for_repr, } @property def content(self): raise AttributeError( "This %s instance has no `content` attribute. Use " "`streaming_content` instead." % self.__class__.__name__ ) @property def streaming_content(self): return map(self.make_bytes, self._iterator) @streaming_content.setter def streaming_content(self, value): self._set_streaming_content(value) def _set_streaming_content(self, value): # Ensure we can never iterate on "value" more than once. self._iterator = iter(value) if hasattr(value, "close"): self._resource_closers.append(value.close) def __iter__(self): return self.streaming_content def getvalue(self): return b"".join(self.streaming_content) class FileResponse(StreamingHttpResponse): """ A streaming HTTP response class optimized for files. """ block_size = 4096 def __init__(self, *args, as_attachment=False, filename="", **kwargs): self.as_attachment = as_attachment self.filename = filename self._no_explicit_content_type = ( "content_type" not in kwargs or kwargs["content_type"] is None ) super().__init__(*args, **kwargs) def _set_streaming_content(self, value): if not hasattr(value, "read"): self.file_to_stream = None return super()._set_streaming_content(value) self.file_to_stream = filelike = value if hasattr(filelike, "close"): self._resource_closers.append(filelike.close) value = iter(lambda: filelike.read(self.block_size), b"") self.set_headers(filelike) super()._set_streaming_content(value) def set_headers(self, filelike): """ Set some common response headers (Content-Length, Content-Type, and Content-Disposition) based on the `filelike` response content. """ filename = getattr(filelike, "name", "") filename = filename if isinstance(filename, str) else "" seekable = hasattr(filelike, "seek") and ( not hasattr(filelike, "seekable") or filelike.seekable() ) if hasattr(filelike, "tell"): if seekable: initial_position = filelike.tell() filelike.seek(0, io.SEEK_END) self.headers["Content-Length"] = filelike.tell() - initial_position filelike.seek(initial_position) elif hasattr(filelike, "getbuffer"): self.headers["Content-Length"] = ( filelike.getbuffer().nbytes - filelike.tell() ) elif os.path.exists(filename): self.headers["Content-Length"] = ( os.path.getsize(filename) - filelike.tell() ) elif seekable: self.headers["Content-Length"] = sum( iter(lambda: len(filelike.read(self.block_size)), 0) ) filelike.seek(-int(self.headers["Content-Length"]), io.SEEK_END) filename = os.path.basename(self.filename or filename) if self._no_explicit_content_type: if filename: content_type, encoding = mimetypes.guess_type(filename) # Encoding isn't set to prevent browsers from automatically # uncompressing files. content_type = { "bzip2": "application/x-bzip", "gzip": "application/gzip", "xz": "application/x-xz", }.get(encoding, content_type) self.headers["Content-Type"] = ( content_type or "application/octet-stream" ) else: self.headers["Content-Type"] = "application/octet-stream" if filename: disposition = "attachment" if self.as_attachment else "inline" try: filename.encode("ascii") file_expr = 'filename="{}"'.format( filename.replace("\\", "\\\\").replace('"', r"\"") ) except UnicodeEncodeError: file_expr = "filename*=utf-8''{}".format(quote(filename)) self.headers["Content-Disposition"] = "{}; {}".format( disposition, file_expr ) elif self.as_attachment: self.headers["Content-Disposition"] = "attachment" class HttpResponseRedirectBase(HttpResponse): allowed_schemes = ["http", "https", "ftp"] def __init__(self, redirect_to, *args, **kwargs): super().__init__(*args, **kwargs) self["Location"] = iri_to_uri(redirect_to) parsed = urlparse(str(redirect_to)) if parsed.scheme and parsed.scheme not in self.allowed_schemes: raise DisallowedRedirect( "Unsafe redirect to URL with protocol '%s'" % parsed.scheme ) url = property(lambda self: self["Location"]) def __repr__(self): return ( '<%(cls)s status_code=%(status_code)d%(content_type)s, url="%(url)s">' % { "cls": self.__class__.__name__, "status_code": self.status_code, "content_type": self._content_type_for_repr, "url": self.url, } ) class HttpResponseRedirect(HttpResponseRedirectBase): status_code = 302 class HttpResponsePermanentRedirect(HttpResponseRedirectBase): status_code = 301 class HttpResponseNotModified(HttpResponse): status_code = 304 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) del self["content-type"] @HttpResponse.content.setter def content(self, value): if value: raise AttributeError( "You cannot set content to a 304 (Not Modified) response" ) self._container = [] class HttpResponseBadRequest(HttpResponse): status_code = 400 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods, *args, **kwargs): super().__init__(*args, **kwargs) self["Allow"] = ", ".join(permitted_methods) def __repr__(self): return "<%(cls)s [%(methods)s] status_code=%(status_code)d%(content_type)s>" % { "cls": self.__class__.__name__, "status_code": self.status_code, "content_type": self._content_type_for_repr, "methods": self["Allow"], } class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 class Http404(Exception): pass class JsonResponse(HttpResponse): """ An HTTP response class that consumes data to be serialized to JSON. :param data: Data to be dumped into json. By default only ``dict`` objects are allowed to be passed due to a security flaw before ECMAScript 5. See the ``safe`` parameter for more information. :param encoder: Should be a json encoder class. Defaults to ``django.core.serializers.json.DjangoJSONEncoder``. :param safe: Controls if only ``dict`` objects may be serialized. Defaults to ``True``. :param json_dumps_params: A dictionary of kwargs passed to json.dumps(). """ def __init__( self, data, encoder=DjangoJSONEncoder, safe=True, json_dumps_params=None, **kwargs, ): if safe and not isinstance(data, dict): raise TypeError( "In order to allow non-dict objects to be serialized set the " "safe parameter to False." ) if json_dumps_params is None: json_dumps_params = {} kwargs.setdefault("content_type", "application/json") data = json.dumps(data, cls=encoder, **json_dumps_params) super().__init__(content=data, **kwargs)
837fe1d643fdc8fdc1b00f27f60d1d0e8e990dcf134ee87a06278c7da4371b86
import codecs import copy from io import BytesIO from itertools import chain from urllib.parse import parse_qsl, quote, urlencode, urljoin, urlsplit from django.conf import settings from django.core import signing from django.core.exceptions import ( DisallowedHost, ImproperlyConfigured, RequestDataTooBig, TooManyFieldsSent, ) from django.core.files import uploadhandler from django.http.multipartparser import MultiPartParser, MultiPartParserError from django.utils.datastructures import ( CaseInsensitiveMapping, ImmutableList, MultiValueDict, ) from django.utils.encoding import escape_uri_path, iri_to_uri from django.utils.functional import cached_property from django.utils.http import is_same_domain, parse_header_parameters from django.utils.regex_helper import _lazy_re_compile RAISE_ERROR = object() host_validation_re = _lazy_re_compile( r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:[0-9]+)?$" ) class UnreadablePostError(OSError): pass class RawPostDataException(Exception): """ You cannot access raw_post_data from a request that has multipart/* POST data if it has been accessed via POST, FILES, etc.. """ pass class HttpRequest: """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] non_picklable_attrs = frozenset(["resolver_match", "_stream"]) def __init__(self): # WARNING: The `WSGIRequest` subclass doesn't call `super`. # Any variable assignment made here should also happen in # `WSGIRequest.__init__()`. self.GET = QueryDict(mutable=True) self.POST = QueryDict(mutable=True) self.COOKIES = {} self.META = {} self.FILES = MultiValueDict() self.path = "" self.path_info = "" self.method = None self.resolver_match = None self.content_type = None self.content_params = None def __repr__(self): if self.method is None or not self.get_full_path(): return "<%s>" % self.__class__.__name__ return "<%s: %s %r>" % ( self.__class__.__name__, self.method, self.get_full_path(), ) def __getstate__(self): obj_dict = self.__dict__.copy() for attr in self.non_picklable_attrs: if attr in obj_dict: del obj_dict[attr] return obj_dict def __deepcopy__(self, memo): obj = copy.copy(self) for attr in self.non_picklable_attrs: if hasattr(self, attr): setattr(obj, attr, copy.deepcopy(getattr(self, attr), memo)) memo[id(self)] = obj return obj @cached_property def headers(self): return HttpHeaders(self.META) @cached_property def accepted_types(self): """Return a list of MediaType instances.""" return parse_accept_header(self.headers.get("Accept", "*/*")) def accepts(self, media_type): return any( accepted_type.match(media_type) for accepted_type in self.accepted_types ) def _set_content_type_params(self, meta): """Set content_type, content_params, and encoding.""" self.content_type, self.content_params = parse_header_parameters( meta.get("CONTENT_TYPE", "") ) if "charset" in self.content_params: try: codecs.lookup(self.content_params["charset"]) except LookupError: pass else: self.encoding = self.content_params["charset"] def _get_raw_host(self): """ Return the HTTP host using the environment or request headers. Skip allowed hosts protection, so may return an insecure host. """ # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ("HTTP_X_FORWARDED_HOST" in self.META): host = self.META["HTTP_X_FORWARDED_HOST"] elif "HTTP_HOST" in self.META: host = self.META["HTTP_HOST"] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META["SERVER_NAME"] server_port = self.get_port() if server_port != ("443" if self.is_secure() else "80"): host = "%s:%s" % (host, server_port) return host def get_host(self): """Return the HTTP host using the environment or request headers.""" host = self._get_raw_host() # Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True. allowed_hosts = settings.ALLOWED_HOSTS if settings.DEBUG and not allowed_hosts: allowed_hosts = [".localhost", "127.0.0.1", "[::1]"] domain, port = split_domain_port(host) if domain and validate_host(domain, allowed_hosts): return host else: msg = "Invalid HTTP_HOST header: %r." % host if domain: msg += " You may need to add %r to ALLOWED_HOSTS." % domain else: msg += ( " The domain name provided is not valid according to RFC 1034/1035." ) raise DisallowedHost(msg) def get_port(self): """Return the port number for the request as a string.""" if settings.USE_X_FORWARDED_PORT and "HTTP_X_FORWARDED_PORT" in self.META: port = self.META["HTTP_X_FORWARDED_PORT"] else: port = self.META["SERVER_PORT"] return str(port) def get_full_path(self, force_append_slash=False): return self._get_full_path(self.path, force_append_slash) def get_full_path_info(self, force_append_slash=False): return self._get_full_path(self.path_info, force_append_slash) def _get_full_path(self, path, force_append_slash): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return "%s%s%s" % ( escape_uri_path(path), "/" if force_append_slash and not path.endswith("/") else "", ("?" + iri_to_uri(self.META.get("QUERY_STRING", ""))) if self.META.get("QUERY_STRING", "") else "", ) def get_signed_cookie(self, key, default=RAISE_ERROR, salt="", max_age=None): """ Attempt to return a signed cookie. If the signature fails or the cookie has expired, raise an exception, unless the `default` argument is provided, in which case return that value. """ try: cookie_value = self.COOKIES[key] except KeyError: if default is not RAISE_ERROR: return default else: raise try: value = signing.get_cookie_signer(salt=key + salt).unsign( cookie_value, max_age=max_age ) except signing.BadSignature: if default is not RAISE_ERROR: return default else: raise return value def build_absolute_uri(self, location=None): """ Build an absolute URI from the location and the variables available in this request. If no ``location`` is specified, build the absolute URI using request.get_full_path(). If the location is absolute, convert it to an RFC 3987 compliant URI and return it. If location is relative or is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base URL constructed from the request variables. """ if location is None: # Make it an absolute url (but schemeless and domainless) for the # edge case that the path starts with '//'. location = "//%s" % self.get_full_path() else: # Coerce lazy locations. location = str(location) bits = urlsplit(location) if not (bits.scheme and bits.netloc): # Handle the simple, most common case. If the location is absolute # and a scheme or host (netloc) isn't provided, skip an expensive # urljoin() as long as no path segments are '.' or '..'. if ( bits.path.startswith("/") and not bits.scheme and not bits.netloc and "/./" not in bits.path and "/../" not in bits.path ): # If location starts with '//' but has no netloc, reuse the # schema and netloc from the current request. Strip the double # slashes and continue as if it wasn't specified. if location.startswith("//"): location = location[2:] location = self._current_scheme_host + location else: # Join the constructed URL with the provided location, which # allows the provided location to apply query strings to the # base path. location = urljoin(self._current_scheme_host + self.path, location) return iri_to_uri(location) @cached_property def _current_scheme_host(self): return "{}://{}".format(self.scheme, self.get_host()) def _get_scheme(self): """ Hook for subclasses like WSGIRequest to implement. Return 'http' by default. """ return "http" @property def scheme(self): if settings.SECURE_PROXY_SSL_HEADER: try: header, secure_value = settings.SECURE_PROXY_SSL_HEADER except ValueError: raise ImproperlyConfigured( "The SECURE_PROXY_SSL_HEADER setting must be a tuple containing " "two values." ) header_value = self.META.get(header) if header_value is not None: header_value, *_ = header_value.split(",", 1) return "https" if header_value.strip() == secure_value else "http" return self._get_scheme() def is_secure(self): return self.scheme == "https" @property def encoding(self): return self._encoding @encoding.setter def encoding(self, val): """ Set the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, remove and recreate it on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, "GET"): del self.GET if hasattr(self, "_post"): del self._post def _initialize_handlers(self): self._upload_handlers = [ uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS ] @property def upload_handlers(self): if not self._upload_handlers: # If there are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers @upload_handlers.setter def upload_handlers(self, upload_handlers): if hasattr(self, "_files"): raise AttributeError( "You cannot set the upload handlers after the upload has been " "processed." ) self._upload_handlers = upload_handlers def parse_file_upload(self, META, post_data): """Return a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning=( "You cannot alter upload handlers after the upload has been " "processed." ), ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() @property def body(self): if not hasattr(self, "_body"): if self._read_started: raise RawPostDataException( "You cannot access body after reading from request's data stream" ) # Limit the maximum request data size that will be handled in-memory. if ( settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and int(self.META.get("CONTENT_LENGTH") or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE ): raise RequestDataTooBig( "Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE." ) try: self._body = self.read() except OSError as e: raise UnreadablePostError(*e.args) from e finally: self._stream.close() self._stream = BytesIO(self._body) return self._body def _mark_post_parse_error(self): self._post = QueryDict() self._files = MultiValueDict() def _load_post_and_files(self): """Populate self._post and self._files if the content-type is a form type""" if self.method != "POST": self._post, self._files = ( QueryDict(encoding=self._encoding), MultiValueDict(), ) return if self._read_started and not hasattr(self, "_body"): self._mark_post_parse_error() return if self.content_type == "multipart/form-data": if hasattr(self, "_body"): # Use already read data data = BytesIO(self._body) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except MultiPartParserError: # An error occurred while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. self._mark_post_parse_error() raise elif self.content_type == "application/x-www-form-urlencoded": self._post, self._files = ( QueryDict(self.body, encoding=self._encoding), MultiValueDict(), ) else: self._post, self._files = ( QueryDict(encoding=self._encoding), MultiValueDict(), ) def close(self): if hasattr(self, "_files"): for f in chain.from_iterable(list_[1] for list_ in self._files.lists()): f.close() # File-like and iterator interface. # # Expects self._stream to be set to an appropriate source of bytes by # a corresponding request subclass (e.g. WSGIRequest). # Also when request data has already been read by request.POST or # request.body, self._stream points to a BytesIO instance # containing that data. def read(self, *args, **kwargs): self._read_started = True try: return self._stream.read(*args, **kwargs) except OSError as e: raise UnreadablePostError(*e.args) from e def readline(self, *args, **kwargs): self._read_started = True try: return self._stream.readline(*args, **kwargs) except OSError as e: raise UnreadablePostError(*e.args) from e def __iter__(self): return iter(self.readline, b"") def readlines(self): return list(self) class HttpHeaders(CaseInsensitiveMapping): HTTP_PREFIX = "HTTP_" # PEP 333 gives two headers which aren't prepended with HTTP_. UNPREFIXED_HEADERS = {"CONTENT_TYPE", "CONTENT_LENGTH"} def __init__(self, environ): headers = {} for header, value in environ.items(): name = self.parse_header_name(header) if name: headers[name] = value super().__init__(headers) def __getitem__(self, key): """Allow header lookup using underscores in place of hyphens.""" return super().__getitem__(key.replace("_", "-")) @classmethod def parse_header_name(cls, header): if header.startswith(cls.HTTP_PREFIX): header = header[len(cls.HTTP_PREFIX) :] elif header not in cls.UNPREFIXED_HEADERS: return None return header.replace("_", "-").title() class QueryDict(MultiValueDict): """ A specialized MultiValueDict which represents a query string. A QueryDict can be used to represent GET or POST data. It subclasses MultiValueDict since keys in such data can be repeated, for instance in the data from a form with a <select multiple> field. By default QueryDicts are immutable, though the copy() method will always return a mutable copy. Both keys and values set on this class are converted from the given encoding (DEFAULT_CHARSET by default) to str. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string=None, mutable=False, encoding=None): super().__init__() self.encoding = encoding or settings.DEFAULT_CHARSET query_string = query_string or "" parse_qsl_kwargs = { "keep_blank_values": True, "encoding": self.encoding, "max_num_fields": settings.DATA_UPLOAD_MAX_NUMBER_FIELDS, } if isinstance(query_string, bytes): # query_string normally contains URL-encoded data, a subset of ASCII. try: query_string = query_string.decode(self.encoding) except UnicodeDecodeError: # ... but some user agents are misbehaving :-( query_string = query_string.decode("iso-8859-1") try: for key, value in parse_qsl(query_string, **parse_qsl_kwargs): self.appendlist(key, value) except ValueError as e: # ValueError can also be raised if the strict_parsing argument to # parse_qsl() is True. As that is not used by Django, assume that # the exception was raised by exceeding the value of max_num_fields # instead of fragile checks of exception message strings. raise TooManyFieldsSent( "The number of GET/POST parameters exceeded " "settings.DATA_UPLOAD_MAX_NUMBER_FIELDS." ) from e self._mutable = mutable @classmethod def fromkeys(cls, iterable, value="", mutable=False, encoding=None): """ Return a new QueryDict with keys (may be repeated) from an iterable and values from value. """ q = cls("", mutable=True, encoding=encoding) for key in iterable: q.appendlist(key, value) if not mutable: q._mutable = False return q @property def encoding(self): if self._encoding is None: self._encoding = settings.DEFAULT_CHARSET return self._encoding @encoding.setter def encoding(self, value): self._encoding = value def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super().__setitem__(key, value) def __delitem__(self, key): self._assert_mutable() super().__delitem__(key) def __copy__(self): result = self.__class__("", mutable=True, encoding=self.encoding) for key, value in self.lists(): result.setlist(key, value) return result def __deepcopy__(self, memo): result = self.__class__("", mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in self.lists(): result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = bytes_to_text(key, self.encoding) list_ = [bytes_to_text(elt, self.encoding) for elt in list_] super().setlist(key, list_) def setlistdefault(self, key, default_list=None): self._assert_mutable() return super().setlistdefault(key, default_list) def appendlist(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super().appendlist(key, value) def pop(self, key, *args): self._assert_mutable() return super().pop(key, *args) def popitem(self): self._assert_mutable() return super().popitem() def clear(self): self._assert_mutable() super().clear() def setdefault(self, key, default=None): self._assert_mutable() key = bytes_to_text(key, self.encoding) default = bytes_to_text(default, self.encoding) return super().setdefault(key, default) def copy(self): """Return a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Return an encoded string of all query string arguments. `safe` specifies characters which don't require quoting, for example:: >>> q = QueryDict(mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: safe = safe.encode(self.encoding) def encode(k, v): return "%s=%s" % ((quote(k, safe), quote(v, safe))) else: def encode(k, v): return urlencode({k: v}) for k, list_ in self.lists(): output.extend( encode(k.encode(self.encoding), str(v).encode(self.encoding)) for v in list_ ) return "&".join(output) class MediaType: def __init__(self, media_type_raw_line): full_type, self.params = parse_header_parameters( media_type_raw_line if media_type_raw_line else "" ) self.main_type, _, self.sub_type = full_type.partition("/") def __str__(self): params_str = "".join("; %s=%s" % (k, v) for k, v in self.params.items()) return "%s%s%s" % ( self.main_type, ("/%s" % self.sub_type) if self.sub_type else "", params_str, ) def __repr__(self): return "<%s: %s>" % (self.__class__.__qualname__, self) @property def is_all_types(self): return self.main_type == "*" and self.sub_type == "*" def match(self, other): if self.is_all_types: return True other = MediaType(other) if self.main_type == other.main_type and self.sub_type in {"*", other.sub_type}: return True return False # It's neither necessary nor appropriate to use # django.utils.encoding.force_str() for parsing URLs and form inputs. Thus, # this slightly more restricted function, used by QueryDict. def bytes_to_text(s, encoding): """ Convert bytes objects to strings, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Return any non-bytes objects without change. """ if isinstance(s, bytes): return str(s, encoding, "replace") else: return s def split_domain_port(host): """ Return a (domain, port) tuple from a given host. Returned domain is lowercased. If the host is invalid, the domain will be empty. """ host = host.lower() if not host_validation_re.match(host): return "", "" if host[-1] == "]": # It's an IPv6 address without a port. return host, "" bits = host.rsplit(":", 1) domain, port = bits if len(bits) == 2 else (bits[0], "") # Remove a trailing dot (if present) from the domain. domain = domain[:-1] if domain.endswith(".") else domain return domain, port def validate_host(host, allowed_hosts): """ Validate the given host for this site. Check that the host looks valid and matches a host or host pattern in the given list of ``allowed_hosts``. Any pattern beginning with a period matches a domain and all its subdomains (e.g. ``.example.com`` matches ``example.com`` and any subdomain), ``*`` matches anything, and anything else must match exactly. Note: This function assumes that the given host is lowercased and has already had the port, if any, stripped off. Return ``True`` for a valid host, ``False`` otherwise. """ return any( pattern == "*" or is_same_domain(host, pattern) for pattern in allowed_hosts ) def parse_accept_header(header): return [MediaType(token) for token in header.split(",") if token.strip()]
2300e6f099c36ea4c63cdc51d66ed77585d2bacb7b31d0033e1904fd0774df6f
import asyncio import logging from django.core.exceptions import ImproperlyConfigured from django.http import ( HttpResponse, HttpResponseGone, HttpResponseNotAllowed, HttpResponsePermanentRedirect, HttpResponseRedirect, ) from django.template.response import TemplateResponse from django.urls import reverse from django.utils.decorators import classonlymethod from django.utils.functional import classproperty logger = logging.getLogger("django.request") class ContextMixin: """ A default context mixin that passes the keyword arguments received by get_context_data() as the template context. """ extra_context = None def get_context_data(self, **kwargs): kwargs.setdefault("view", self) if self.extra_context is not None: kwargs.update(self.extra_context) return kwargs class View: """ Intentionally simple parent class for all views. Only implements dispatch-by-method and simple sanity checking. """ http_method_names = [ "get", "post", "put", "patch", "delete", "head", "options", "trace", ] def __init__(self, **kwargs): """ Constructor. Called in the URLconf; can contain helpful extra keyword arguments, and other things. """ # Go through keyword arguments, and either save their values to our # instance, or raise an error. for key, value in kwargs.items(): setattr(self, key, value) @classproperty def view_is_async(cls): handlers = [ getattr(cls, method) for method in cls.http_method_names if (method != "options" and hasattr(cls, method)) ] if not handlers: return False is_async = asyncio.iscoroutinefunction(handlers[0]) if not all(asyncio.iscoroutinefunction(h) == is_async for h in handlers[1:]): raise ImproperlyConfigured( f"{cls.__qualname__} HTTP handlers must either be all sync or all " "async." ) return is_async @classonlymethod def as_view(cls, **initkwargs): """Main entry point for a request-response process.""" for key in initkwargs: if key in cls.http_method_names: raise TypeError( "The method name %s is not accepted as a keyword argument " "to %s()." % (key, cls.__name__) ) if not hasattr(cls, key): raise TypeError( "%s() received an invalid keyword %r. as_view " "only accepts arguments that are already " "attributes of the class." % (cls.__name__, key) ) def view(request, *args, **kwargs): self = cls(**initkwargs) self.setup(request, *args, **kwargs) if not hasattr(self, "request"): raise AttributeError( "%s instance has no 'request' attribute. Did you override " "setup() and forget to call super()?" % cls.__name__ ) return self.dispatch(request, *args, **kwargs) view.view_class = cls view.view_initkwargs = initkwargs # __name__ and __qualname__ are intentionally left unchanged as # view_class should be used to robustly determine the name of the view # instead. view.__doc__ = cls.__doc__ view.__module__ = cls.__module__ view.__annotations__ = cls.dispatch.__annotations__ # Copy possible attributes set by decorators, e.g. @csrf_exempt, from # the dispatch method. view.__dict__.update(cls.dispatch.__dict__) # Mark the callback if the view class is async. if cls.view_is_async: view._is_coroutine = asyncio.coroutines._is_coroutine return view def setup(self, request, *args, **kwargs): """Initialize attributes shared by all view methods.""" if hasattr(self, "get") and not hasattr(self, "head"): self.head = self.get self.request = request self.args = args self.kwargs = kwargs def dispatch(self, request, *args, **kwargs): # Try to dispatch to the right method; if a method doesn't exist, # defer to the error handler. Also defer to the error handler if the # request method isn't on the approved list. if request.method.lower() in self.http_method_names: handler = getattr( self, request.method.lower(), self.http_method_not_allowed ) else: handler = self.http_method_not_allowed return handler(request, *args, **kwargs) def http_method_not_allowed(self, request, *args, **kwargs): logger.warning( "Method Not Allowed (%s): %s", request.method, request.path, extra={"status_code": 405, "request": request}, ) response = HttpResponseNotAllowed(self._allowed_methods()) if self.view_is_async: async def func(): return response return func() else: return response def options(self, request, *args, **kwargs): """Handle responding to requests for the OPTIONS HTTP verb.""" response = HttpResponse() response.headers["Allow"] = ", ".join(self._allowed_methods()) response.headers["Content-Length"] = "0" if self.view_is_async: async def func(): return response return func() else: return response def _allowed_methods(self): return [m.upper() for m in self.http_method_names if hasattr(self, m)] class TemplateResponseMixin: """A mixin that can be used to render a template.""" template_name = None template_engine = None response_class = TemplateResponse content_type = None def render_to_response(self, context, **response_kwargs): """ Return a response, using the `response_class` for this view, with a template rendered with the given context. Pass response_kwargs to the constructor of the response class. """ response_kwargs.setdefault("content_type", self.content_type) return self.response_class( request=self.request, template=self.get_template_names(), context=context, using=self.template_engine, **response_kwargs, ) def get_template_names(self): """ Return a list of template names to be used for the request. Must return a list. May not be called if render_to_response() is overridden. """ if self.template_name is None: raise ImproperlyConfigured( "TemplateResponseMixin requires either a definition of " "'template_name' or an implementation of 'get_template_names()'" ) else: return [self.template_name] class TemplateView(TemplateResponseMixin, ContextMixin, View): """ Render a template. Pass keyword arguments from the URLconf to the context. """ def get(self, request, *args, **kwargs): context = self.get_context_data(**kwargs) return self.render_to_response(context) class RedirectView(View): """Provide a redirect on any GET request.""" permanent = False url = None pattern_name = None query_string = False def get_redirect_url(self, *args, **kwargs): """ Return the URL redirect to. Keyword arguments from the URL pattern match generating the redirect request are provided as kwargs to this method. """ if self.url: url = self.url % kwargs elif self.pattern_name: url = reverse(self.pattern_name, args=args, kwargs=kwargs) else: return None args = self.request.META.get("QUERY_STRING", "") if args and self.query_string: url = "%s?%s" % (url, args) return url def get(self, request, *args, **kwargs): url = self.get_redirect_url(*args, **kwargs) if url: if self.permanent: return HttpResponsePermanentRedirect(url) else: return HttpResponseRedirect(url) else: logger.warning( "Gone: %s", request.path, extra={"status_code": 410, "request": request} ) return HttpResponseGone() def head(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def post(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def options(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def delete(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def put(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) def patch(self, request, *args, **kwargs): return self.get(request, *args, **kwargs)
c08726c11c838a132c1b652eb21f541da2e3d389acc1b94220e8792f661c0d05
""" LANG_INFO is a dictionary structure to provide meta information about languages. About name_local: capitalize it as if your language name was appearing inside a sentence in your language. The 'fallback' key can be used to specify a special fallback logic which doesn't follow the traditional 'fr-ca' -> 'fr' fallback logic. """ LANG_INFO = { "af": { "bidi": False, "code": "af", "name": "Afrikaans", "name_local": "Afrikaans", }, "ar": { "bidi": True, "code": "ar", "name": "Arabic", "name_local": "العربيّة", }, "ar-dz": { "bidi": True, "code": "ar-dz", "name": "Algerian Arabic", "name_local": "العربية الجزائرية", }, "ast": { "bidi": False, "code": "ast", "name": "Asturian", "name_local": "asturianu", }, "az": { "bidi": True, "code": "az", "name": "Azerbaijani", "name_local": "Azərbaycanca", }, "be": { "bidi": False, "code": "be", "name": "Belarusian", "name_local": "беларуская", }, "bg": { "bidi": False, "code": "bg", "name": "Bulgarian", "name_local": "български", }, "bn": { "bidi": False, "code": "bn", "name": "Bengali", "name_local": "বাংলা", }, "br": { "bidi": False, "code": "br", "name": "Breton", "name_local": "brezhoneg", }, "bs": { "bidi": False, "code": "bs", "name": "Bosnian", "name_local": "bosanski", }, "ca": { "bidi": False, "code": "ca", "name": "Catalan", "name_local": "català", }, "ckb": { "bidi": True, "code": "ckb", "name": "Central Kurdish (Sorani)", "name_local": "کوردی", }, "cs": { "bidi": False, "code": "cs", "name": "Czech", "name_local": "česky", }, "cy": { "bidi": False, "code": "cy", "name": "Welsh", "name_local": "Cymraeg", }, "da": { "bidi": False, "code": "da", "name": "Danish", "name_local": "dansk", }, "de": { "bidi": False, "code": "de", "name": "German", "name_local": "Deutsch", }, "dsb": { "bidi": False, "code": "dsb", "name": "Lower Sorbian", "name_local": "dolnoserbski", }, "el": { "bidi": False, "code": "el", "name": "Greek", "name_local": "Ελληνικά", }, "en": { "bidi": False, "code": "en", "name": "English", "name_local": "English", }, "en-au": { "bidi": False, "code": "en-au", "name": "Australian English", "name_local": "Australian English", }, "en-gb": { "bidi": False, "code": "en-gb", "name": "British English", "name_local": "British English", }, "eo": { "bidi": False, "code": "eo", "name": "Esperanto", "name_local": "Esperanto", }, "es": { "bidi": False, "code": "es", "name": "Spanish", "name_local": "español", }, "es-ar": { "bidi": False, "code": "es-ar", "name": "Argentinian Spanish", "name_local": "español de Argentina", }, "es-co": { "bidi": False, "code": "es-co", "name": "Colombian Spanish", "name_local": "español de Colombia", }, "es-mx": { "bidi": False, "code": "es-mx", "name": "Mexican Spanish", "name_local": "español de Mexico", }, "es-ni": { "bidi": False, "code": "es-ni", "name": "Nicaraguan Spanish", "name_local": "español de Nicaragua", }, "es-ve": { "bidi": False, "code": "es-ve", "name": "Venezuelan Spanish", "name_local": "español de Venezuela", }, "et": { "bidi": False, "code": "et", "name": "Estonian", "name_local": "eesti", }, "eu": { "bidi": False, "code": "eu", "name": "Basque", "name_local": "Basque", }, "fa": { "bidi": True, "code": "fa", "name": "Persian", "name_local": "فارسی", }, "fi": { "bidi": False, "code": "fi", "name": "Finnish", "name_local": "suomi", }, "fr": { "bidi": False, "code": "fr", "name": "French", "name_local": "français", }, "fy": { "bidi": False, "code": "fy", "name": "Frisian", "name_local": "frysk", }, "ga": { "bidi": False, "code": "ga", "name": "Irish", "name_local": "Gaeilge", }, "gd": { "bidi": False, "code": "gd", "name": "Scottish Gaelic", "name_local": "Gàidhlig", }, "gl": { "bidi": False, "code": "gl", "name": "Galician", "name_local": "galego", }, "he": { "bidi": True, "code": "he", "name": "Hebrew", "name_local": "עברית", }, "hi": { "bidi": False, "code": "hi", "name": "Hindi", "name_local": "हिंदी", }, "hr": { "bidi": False, "code": "hr", "name": "Croatian", "name_local": "Hrvatski", }, "hsb": { "bidi": False, "code": "hsb", "name": "Upper Sorbian", "name_local": "hornjoserbsce", }, "hu": { "bidi": False, "code": "hu", "name": "Hungarian", "name_local": "Magyar", }, "hy": { "bidi": False, "code": "hy", "name": "Armenian", "name_local": "հայերեն", }, "ia": { "bidi": False, "code": "ia", "name": "Interlingua", "name_local": "Interlingua", }, "io": { "bidi": False, "code": "io", "name": "Ido", "name_local": "ido", }, "id": { "bidi": False, "code": "id", "name": "Indonesian", "name_local": "Bahasa Indonesia", }, "ig": { "bidi": False, "code": "ig", "name": "Igbo", "name_local": "Asụsụ Ìgbò", }, "is": { "bidi": False, "code": "is", "name": "Icelandic", "name_local": "Íslenska", }, "it": { "bidi": False, "code": "it", "name": "Italian", "name_local": "italiano", }, "ja": { "bidi": False, "code": "ja", "name": "Japanese", "name_local": "日本語", }, "ka": { "bidi": False, "code": "ka", "name": "Georgian", "name_local": "ქართული", }, "kab": { "bidi": False, "code": "kab", "name": "Kabyle", "name_local": "taqbaylit", }, "kk": { "bidi": False, "code": "kk", "name": "Kazakh", "name_local": "Қазақ", }, "km": { "bidi": False, "code": "km", "name": "Khmer", "name_local": "Khmer", }, "kn": { "bidi": False, "code": "kn", "name": "Kannada", "name_local": "Kannada", }, "ko": { "bidi": False, "code": "ko", "name": "Korean", "name_local": "한국어", }, "ky": { "bidi": False, "code": "ky", "name": "Kyrgyz", "name_local": "Кыргызча", }, "lb": { "bidi": False, "code": "lb", "name": "Luxembourgish", "name_local": "Lëtzebuergesch", }, "lt": { "bidi": False, "code": "lt", "name": "Lithuanian", "name_local": "Lietuviškai", }, "lv": { "bidi": False, "code": "lv", "name": "Latvian", "name_local": "latviešu", }, "mk": { "bidi": False, "code": "mk", "name": "Macedonian", "name_local": "Македонски", }, "ml": { "bidi": False, "code": "ml", "name": "Malayalam", "name_local": "മലയാളം", }, "mn": { "bidi": False, "code": "mn", "name": "Mongolian", "name_local": "Mongolian", }, "mr": { "bidi": False, "code": "mr", "name": "Marathi", "name_local": "मराठी", }, "ms": { "bidi": False, "code": "ms", "name": "Malay", "name_local": "Bahasa Melayu", }, "my": { "bidi": False, "code": "my", "name": "Burmese", "name_local": "မြန်မာဘာသာ", }, "nb": { "bidi": False, "code": "nb", "name": "Norwegian Bokmal", "name_local": "norsk (bokmål)", }, "ne": { "bidi": False, "code": "ne", "name": "Nepali", "name_local": "नेपाली", }, "nl": { "bidi": False, "code": "nl", "name": "Dutch", "name_local": "Nederlands", }, "nn": { "bidi": False, "code": "nn", "name": "Norwegian Nynorsk", "name_local": "norsk (nynorsk)", }, "no": { "bidi": False, "code": "no", "name": "Norwegian", "name_local": "norsk", }, "os": { "bidi": False, "code": "os", "name": "Ossetic", "name_local": "Ирон", }, "pa": { "bidi": False, "code": "pa", "name": "Punjabi", "name_local": "Punjabi", }, "pl": { "bidi": False, "code": "pl", "name": "Polish", "name_local": "polski", }, "pt": { "bidi": False, "code": "pt", "name": "Portuguese", "name_local": "Português", }, "pt-br": { "bidi": False, "code": "pt-br", "name": "Brazilian Portuguese", "name_local": "Português Brasileiro", }, "ro": { "bidi": False, "code": "ro", "name": "Romanian", "name_local": "Română", }, "ru": { "bidi": False, "code": "ru", "name": "Russian", "name_local": "Русский", }, "sk": { "bidi": False, "code": "sk", "name": "Slovak", "name_local": "Slovensky", }, "sl": { "bidi": False, "code": "sl", "name": "Slovenian", "name_local": "Slovenščina", }, "sq": { "bidi": False, "code": "sq", "name": "Albanian", "name_local": "shqip", }, "sr": { "bidi": False, "code": "sr", "name": "Serbian", "name_local": "српски", }, "sr-latn": { "bidi": False, "code": "sr-latn", "name": "Serbian Latin", "name_local": "srpski (latinica)", }, "sv": { "bidi": False, "code": "sv", "name": "Swedish", "name_local": "svenska", }, "sw": { "bidi": False, "code": "sw", "name": "Swahili", "name_local": "Kiswahili", }, "ta": { "bidi": False, "code": "ta", "name": "Tamil", "name_local": "தமிழ்", }, "te": { "bidi": False, "code": "te", "name": "Telugu", "name_local": "తెలుగు", }, "tg": { "bidi": False, "code": "tg", "name": "Tajik", "name_local": "тоҷикӣ", }, "th": { "bidi": False, "code": "th", "name": "Thai", "name_local": "ภาษาไทย", }, "tk": { "bidi": False, "code": "tk", "name": "Turkmen", "name_local": "Türkmençe", }, "tr": { "bidi": False, "code": "tr", "name": "Turkish", "name_local": "Türkçe", }, "tt": { "bidi": False, "code": "tt", "name": "Tatar", "name_local": "Татарча", }, "udm": { "bidi": False, "code": "udm", "name": "Udmurt", "name_local": "Удмурт", }, "uk": { "bidi": False, "code": "uk", "name": "Ukrainian", "name_local": "Українська", }, "ur": { "bidi": True, "code": "ur", "name": "Urdu", "name_local": "اردو", }, "uz": { "bidi": False, "code": "uz", "name": "Uzbek", "name_local": "oʻzbek tili", }, "vi": { "bidi": False, "code": "vi", "name": "Vietnamese", "name_local": "Tiếng Việt", }, "zh-cn": { "fallback": ["zh-hans"], }, "zh-hans": { "bidi": False, "code": "zh-hans", "name": "Simplified Chinese", "name_local": "简体中文", }, "zh-hant": { "bidi": False, "code": "zh-hant", "name": "Traditional Chinese", "name_local": "繁體中文", }, "zh-hk": { "fallback": ["zh-hant"], }, "zh-mo": { "fallback": ["zh-hant"], }, "zh-my": { "fallback": ["zh-hans"], }, "zh-sg": { "fallback": ["zh-hans"], }, "zh-tw": { "fallback": ["zh-hant"], }, }
11b9900be772425f04a9540e546c32d516b9f8fd67f89dd41782b9e69dd5cce3
# This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = "j F Y" TIME_FORMAT = "G:i" DATETIME_FORMAT = "j F Y، کاتژمێر G:i" YEAR_MONTH_FORMAT = "F Y" MONTH_DAY_FORMAT = "j F" SHORT_DATE_FORMAT = "Y/n/j" SHORT_DATETIME_FORMAT = "Y/n/j،‏ G:i" FIRST_DAY_OF_WEEK = 6 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see https://docs.python.org/library/datetime.html#strftime-strptime-behavior # DATE_INPUT_FORMATS = # TIME_INPUT_FORMATS = # DATETIME_INPUT_FORMATS = DECIMAL_SEPARATOR = "." THOUSAND_SEPARATOR = "," # NUMBER_GROUPING =
ffce1b4c0b57d5cdb6cfeacc49c9f9084adfed641e2a57a622e365660363a299
"""Translation helper functions.""" import functools import gettext as gettext_module import os import re import sys import warnings from asgiref.local import Local from django.apps import apps from django.conf import settings from django.conf.locale import LANG_INFO from django.core.exceptions import AppRegistryNotReady from django.core.signals import setting_changed from django.dispatch import receiver from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import SafeData, mark_safe from . import to_language, to_locale # Translations are cached in a dictionary for every language. # The active translations are stored by threadid to make them thread local. _translations = {} _active = Local() # The default translation is based on the settings file. _default = None # magic gettext number to separate context from message CONTEXT_SEPARATOR = "\x04" # Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9 # and RFC 3066, section 2.1 accept_language_re = _lazy_re_compile( r""" # "en", "en-au", "x-y-z", "es-419", "*" ([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # Optional "q=1.00", "q=0.8" (?:\s*;\s*q=(0(?:\.[0-9]{,3})?|1(?:\.0{,3})?))? # Multiple accepts per header. (?:\s*,\s*|$) """, re.VERBOSE, ) language_code_re = _lazy_re_compile( r"^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$", re.IGNORECASE ) language_code_prefix_re = _lazy_re_compile(r"^/(\w+([@-]\w+){0,2})(/|$)") @receiver(setting_changed) def reset_cache(*, setting, **kwargs): """ Reset global state when LANGUAGES setting has been changed, as some languages should no longer be accepted. """ if setting in ("LANGUAGES", "LANGUAGE_CODE"): check_for_language.cache_clear() get_languages.cache_clear() get_supported_language_variant.cache_clear() class TranslationCatalog: """ Simulate a dict for DjangoTranslation._catalog so as multiple catalogs with different plural equations are kept separate. """ def __init__(self, trans=None): self._catalogs = [trans._catalog.copy()] if trans else [{}] self._plurals = [trans.plural] if trans else [lambda n: int(n != 1)] def __getitem__(self, key): for cat in self._catalogs: try: return cat[key] except KeyError: pass raise KeyError(key) def __setitem__(self, key, value): self._catalogs[0][key] = value def __contains__(self, key): return any(key in cat for cat in self._catalogs) def items(self): for cat in self._catalogs: yield from cat.items() def keys(self): for cat in self._catalogs: yield from cat.keys() def update(self, trans): # Merge if plural function is the same, else prepend. for cat, plural in zip(self._catalogs, self._plurals): if trans.plural.__code__ == plural.__code__: cat.update(trans._catalog) break else: self._catalogs.insert(0, trans._catalog.copy()) self._plurals.insert(0, trans.plural) def get(self, key, default=None): missing = object() for cat in self._catalogs: result = cat.get(key, missing) if result is not missing: return result return default def plural(self, msgid, num): for cat, plural in zip(self._catalogs, self._plurals): tmsg = cat.get((msgid, plural(num))) if tmsg is not None: return tmsg raise KeyError class DjangoTranslation(gettext_module.GNUTranslations): """ Set up the GNUTranslations context with regard to output charset. This translation object will be constructed out of multiple GNUTranslations objects by merging their catalogs. It will construct an object for the requested language and add a fallback to the default language, if it's different from the requested language. """ domain = "django" def __init__(self, language, domain=None, localedirs=None): """Create a GNUTranslations() using many locale directories""" gettext_module.GNUTranslations.__init__(self) if domain is not None: self.domain = domain self.__language = language self.__to_language = to_language(language) self.__locale = to_locale(language) self._catalog = None # If a language doesn't have a catalog, use the Germanic default for # pluralization: anything except one is pluralized. self.plural = lambda n: int(n != 1) if self.domain == "django": if localedirs is not None: # A module-level cache is used for caching 'django' translations warnings.warn( "localedirs is ignored when domain is 'django'.", RuntimeWarning ) localedirs = None self._init_translation_catalog() if localedirs: for localedir in localedirs: translation = self._new_gnu_trans(localedir) self.merge(translation) else: self._add_installed_apps_translations() self._add_local_translations() if ( self.__language == settings.LANGUAGE_CODE and self.domain == "django" and self._catalog is None ): # default lang should have at least one translation file available. raise OSError( "No translation files found for default language %s." % settings.LANGUAGE_CODE ) self._add_fallback(localedirs) if self._catalog is None: # No catalogs found for this language, set an empty catalog. self._catalog = TranslationCatalog() def __repr__(self): return "<DjangoTranslation lang:%s>" % self.__language def _new_gnu_trans(self, localedir, use_null_fallback=True): """ Return a mergeable gettext.GNUTranslations instance. A convenience wrapper. By default gettext uses 'fallback=False'. Using param `use_null_fallback` to avoid confusion with any other references to 'fallback'. """ return gettext_module.translation( domain=self.domain, localedir=localedir, languages=[self.__locale], fallback=use_null_fallback, ) def _init_translation_catalog(self): """Create a base catalog using global django translations.""" settingsfile = sys.modules[settings.__module__].__file__ localedir = os.path.join(os.path.dirname(settingsfile), "locale") translation = self._new_gnu_trans(localedir) self.merge(translation) def _add_installed_apps_translations(self): """Merge translations from each installed app.""" try: app_configs = reversed(apps.get_app_configs()) except AppRegistryNotReady: raise AppRegistryNotReady( "The translation infrastructure cannot be initialized before the " "apps registry is ready. Check that you don't make non-lazy " "gettext calls at import time." ) for app_config in app_configs: localedir = os.path.join(app_config.path, "locale") if os.path.exists(localedir): translation = self._new_gnu_trans(localedir) self.merge(translation) def _add_local_translations(self): """Merge translations defined in LOCALE_PATHS.""" for localedir in reversed(settings.LOCALE_PATHS): translation = self._new_gnu_trans(localedir) self.merge(translation) def _add_fallback(self, localedirs=None): """Set the GNUTranslations() fallback with the default language.""" # Don't set a fallback for the default language or any English variant # (as it's empty, so it'll ALWAYS fall back to the default language) if self.__language == settings.LANGUAGE_CODE or self.__language.startswith( "en" ): return if self.domain == "django": # Get from cache default_translation = translation(settings.LANGUAGE_CODE) else: default_translation = DjangoTranslation( settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs ) self.add_fallback(default_translation) def merge(self, other): """Merge another translation into this catalog.""" if not getattr(other, "_catalog", None): return # NullTranslations() has no _catalog if self._catalog is None: # Take plural and _info from first catalog found (generally Django's). self.plural = other.plural self._info = other._info.copy() self._catalog = TranslationCatalog(other) else: self._catalog.update(other) if other._fallback: self.add_fallback(other._fallback) def language(self): """Return the translation language.""" return self.__language def to_language(self): """Return the translation language name.""" return self.__to_language def ngettext(self, msgid1, msgid2, n): try: tmsg = self._catalog.plural(msgid1, n) except KeyError: if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) if n == 1: tmsg = msgid1 else: tmsg = msgid2 return tmsg def translation(language): """ Return a translation object in the default 'django' domain. """ global _translations if language not in _translations: _translations[language] = DjangoTranslation(language) return _translations[language] def activate(language): """ Fetch the translation object for a given language and install it as the current translation object for the current thread. """ if not language: return _active.value = translation(language) def deactivate(): """ Uninstall the active translation object so that further _() calls resolve to the default translation object. """ if hasattr(_active, "value"): del _active.value def deactivate_all(): """ Make the active translation object a NullTranslations() instance. This is useful when we want delayed translations to appear as the original string for some reason. """ _active.value = gettext_module.NullTranslations() _active.value.to_language = lambda *args: None def get_language(): """Return the currently selected language.""" t = getattr(_active, "value", None) if t is not None: try: return t.to_language() except AttributeError: pass # If we don't have a real translation object, assume it's the default language. return settings.LANGUAGE_CODE def get_language_bidi(): """ Return selected language's BiDi layout. * False = left-to-right layout * True = right-to-left layout """ lang = get_language() if lang is None: return False else: base_lang = get_language().split("-")[0] return base_lang in settings.LANGUAGES_BIDI def catalog(): """ Return the current active catalog for further processing. This can be used if you need to modify the catalog or want to access the whole message catalog instead of just translating one string. """ global _default t = getattr(_active, "value", None) if t is not None: return t if _default is None: _default = translation(settings.LANGUAGE_CODE) return _default def gettext(message): """ Translate the 'message' string. It uses the current thread to find the translation object to use. If no current translation is activated, the message will be run through the default translation object. """ global _default eol_message = message.replace("\r\n", "\n").replace("\r", "\n") if eol_message: _default = _default or translation(settings.LANGUAGE_CODE) translation_object = getattr(_active, "value", _default) result = translation_object.gettext(eol_message) else: # Return an empty value of the corresponding type if an empty message # is given, instead of metadata, which is the default gettext behavior. result = type(message)("") if isinstance(message, SafeData): return mark_safe(result) return result def pgettext(context, message): msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message) result = gettext(msg_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = message elif isinstance(message, SafeData): result = mark_safe(result) return result def gettext_noop(message): """ Mark strings for translation but don't translate them now. This can be used to store strings in global variables that should stay in the base language (because they might be used externally) and will be translated later. """ return message def do_ntranslate(singular, plural, number, translation_function): global _default t = getattr(_active, "value", None) if t is not None: return getattr(t, translation_function)(singular, plural, number) if _default is None: _default = translation(settings.LANGUAGE_CODE) return getattr(_default, translation_function)(singular, plural, number) def ngettext(singular, plural, number): """ Return a string of the translation of either the singular or plural, based on the number. """ return do_ntranslate(singular, plural, number, "ngettext") def npgettext(context, singular, plural, number): msgs_with_ctxt = ( "%s%s%s" % (context, CONTEXT_SEPARATOR, singular), "%s%s%s" % (context, CONTEXT_SEPARATOR, plural), number, ) result = ngettext(*msgs_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = ngettext(singular, plural, number) return result def all_locale_paths(): """ Return a list of paths to user-provides languages files. """ globalpath = os.path.join( os.path.dirname(sys.modules[settings.__module__].__file__), "locale" ) app_paths = [] for app_config in apps.get_app_configs(): locale_path = os.path.join(app_config.path, "locale") if os.path.exists(locale_path): app_paths.append(locale_path) return [globalpath, *settings.LOCALE_PATHS, *app_paths] @functools.lru_cache(maxsize=1000) def check_for_language(lang_code): """ Check whether there is a global language file for the given language code. This is used to decide whether a user-provided language is available. lru_cache should have a maxsize to prevent from memory exhaustion attacks, as the provided language codes are taken from the HTTP request. See also <https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>. """ # First, a quick check to make sure lang_code is well-formed (#21458) if lang_code is None or not language_code_re.search(lang_code): return False return any( gettext_module.find("django", path, [to_locale(lang_code)]) is not None for path in all_locale_paths() ) @functools.lru_cache def get_languages(): """ Cache of settings.LANGUAGES in a dictionary for easy lookups by key. Convert keys to lowercase as they should be treated as case-insensitive. """ return {key.lower(): value for key, value in dict(settings.LANGUAGES).items()} @functools.lru_cache(maxsize=1000) def get_supported_language_variant(lang_code, strict=False): """ Return the language code that's listed in supported languages, possibly selecting a more generic variant. Raise LookupError if nothing is found. If `strict` is False (the default), look for a country-specific variant when neither the language code nor its generic variant is found. lru_cache should have a maxsize to prevent from memory exhaustion attacks, as the provided language codes are taken from the HTTP request. See also <https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>. """ if lang_code: # If 'zh-hant-tw' is not supported, try special fallback or subsequent # language codes i.e. 'zh-hant' and 'zh'. possible_lang_codes = [lang_code] try: possible_lang_codes.extend(LANG_INFO[lang_code]["fallback"]) except KeyError: pass i = None while (i := lang_code.rfind("-", 0, i)) > -1: possible_lang_codes.append(lang_code[:i]) generic_lang_code = possible_lang_codes[-1] supported_lang_codes = get_languages() for code in possible_lang_codes: if code.lower() in supported_lang_codes and check_for_language(code): return code if not strict: # if fr-fr is not supported, try fr-ca. for supported_code in supported_lang_codes: if supported_code.startswith(generic_lang_code + "-"): return supported_code raise LookupError(lang_code) def get_language_from_path(path, strict=False): """ Return the language code if there's a valid language code found in `path`. If `strict` is False (the default), look for a country-specific variant when neither the language code nor its generic variant is found. """ regex_match = language_code_prefix_re.match(path) if not regex_match: return None lang_code = regex_match[1] try: return get_supported_language_variant(lang_code, strict=strict) except LookupError: return None def get_language_from_request(request, check_path=False): """ Analyze the request to find what language the user wants the system to show. Only languages listed in settings.LANGUAGES are taken into account. If the user requests a sublanguage where we have a main language, we send out the main language. If check_path is True, the URL path prefix will be checked for a language code, otherwise this is skipped for backwards compatibility. """ if check_path: lang_code = get_language_from_path(request.path_info) if lang_code is not None: return lang_code lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME) if ( lang_code is not None and lang_code in get_languages() and check_for_language(lang_code) ): return lang_code try: return get_supported_language_variant(lang_code) except LookupError: pass accept = request.META.get("HTTP_ACCEPT_LANGUAGE", "") for accept_lang, unused in parse_accept_lang_header(accept): if accept_lang == "*": break if not language_code_re.search(accept_lang): continue try: return get_supported_language_variant(accept_lang) except LookupError: continue return None @functools.lru_cache(maxsize=1000) def parse_accept_lang_header(lang_string): """ Parse the lang_string, which is the body of an HTTP Accept-Language header, and return a tuple of (lang, q-value), ordered by 'q' values. Return an empty tuple if there are any format errors in lang_string. """ result = [] pieces = accept_language_re.split(lang_string.lower()) if pieces[-1]: return () for i in range(0, len(pieces) - 1, 3): first, lang, priority = pieces[i : i + 3] if first: return () if priority: priority = float(priority) else: priority = 1.0 result.append((lang, priority)) result.sort(key=lambda k: k[1], reverse=True) return tuple(result)
7b4db01b8d20e09bb1c1883b13872c67404b8c872f4d6867f8806880c0a8245d
# These are versions of the functions in django.utils.translation.trans_real # that don't actually do anything. This is purely for performance, so that # settings.USE_I18N = False can use this module rather than trans_real.py. from django.conf import settings def gettext(message): return message gettext_noop = gettext_lazy = _ = gettext def ngettext(singular, plural, number): if number == 1: return singular return plural ngettext_lazy = ngettext def pgettext(context, message): return gettext(message) def npgettext(context, singular, plural, number): return ngettext(singular, plural, number) def activate(x): return None def deactivate(): return None deactivate_all = deactivate def get_language(): return settings.LANGUAGE_CODE def get_language_bidi(): return settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI def check_for_language(x): return True def get_language_from_request(request, check_path=False): return None def get_language_from_path(request): return None def get_supported_language_variant(lang_code, strict=False): if lang_code and lang_code.lower() == settings.LANGUAGE_CODE.lower(): return lang_code else: raise LookupError(lang_code)
3203264985eca38d1e95c53620b5b4be41fdb826d9650f86f54ae38d5ebc942e
import functools import re from collections import defaultdict from itertools import chain from django.conf import settings from django.db import models from django.db.migrations import operations from django.db.migrations.migration import Migration from django.db.migrations.operations.models import AlterModelOptions from django.db.migrations.optimizer import MigrationOptimizer from django.db.migrations.questioner import MigrationQuestioner from django.db.migrations.utils import ( COMPILED_REGEX_TYPE, RegexObject, resolve_relation, ) from django.utils.topological_sort import stable_topological_sort class MigrationAutodetector: """ Take a pair of ProjectStates and compare them to see what the first would need doing to make it match the second (the second usually being the project's current state). Note that this naturally operates on entire projects at a time, as it's likely that changes interact (for example, you can't add a ForeignKey without having a migration to add the table it depends on first). A user interface may offer single-app usage if it wishes, with the caveat that it may not always be possible. """ def __init__(self, from_state, to_state, questioner=None): self.from_state = from_state self.to_state = to_state self.questioner = questioner or MigrationQuestioner() self.existing_apps = {app for app, model in from_state.models} def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None): """ Main entry point to produce a list of applicable changes. Take a graph to base names on and an optional set of apps to try and restrict to (restriction is not guaranteed) """ changes = self._detect_changes(convert_apps, graph) changes = self.arrange_for_graph(changes, graph, migration_name) if trim_to_apps: changes = self._trim_to_apps(changes, trim_to_apps) return changes def deep_deconstruct(self, obj): """ Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly. """ if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return {key: self.deep_deconstruct(value) for key, value in obj.items()} elif isinstance(obj, functools.partial): return ( obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords), ) elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, "deconstruct"): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], {key: self.deep_deconstruct(value) for key, value in kwargs.items()}, ) else: return obj def only_relation_agnostic_fields(self, fields): """ Return a definition of the fields that ignores field names and what related fields actually relate to. Used for detecting renames (as the related fields change during renames). """ fields_def = [] for name, field in sorted(fields.items()): deconstruction = self.deep_deconstruct(field) if field.remote_field and field.remote_field.model: deconstruction[2].pop("to", None) fields_def.append(deconstruction) return fields_def def _detect_changes(self, convert_apps=None, graph=None): """ Return a dict of migration plans which will achieve the change from from_state to to_state. The dict has app labels as keys and a list of migrations as values. The resulting migrations aren't specially named, but the names do matter for dependencies inside the set. convert_apps is the list of apps to convert to use migrations (i.e. to make initial migrations for, in the usual case) graph is an optional argument that, if provided, can help improve dependency generation and avoid potential circular dependencies. """ # The first phase is generating all the operations for each app # and gathering them into a big per-app list. # Then go through that list, order it, and split into migrations to # resolve dependencies caused by M2Ms and FKs. self.generated_operations = {} self.altered_indexes = {} self.altered_constraints = {} self.renamed_fields = {} # Prepare some old/new state and model lists, separating # proxy models and ignoring unmigrated apps. self.old_model_keys = set() self.old_proxy_keys = set() self.old_unmanaged_keys = set() self.new_model_keys = set() self.new_proxy_keys = set() self.new_unmanaged_keys = set() for (app_label, model_name), model_state in self.from_state.models.items(): if not model_state.options.get("managed", True): self.old_unmanaged_keys.add((app_label, model_name)) elif app_label not in self.from_state.real_apps: if model_state.options.get("proxy"): self.old_proxy_keys.add((app_label, model_name)) else: self.old_model_keys.add((app_label, model_name)) for (app_label, model_name), model_state in self.to_state.models.items(): if not model_state.options.get("managed", True): self.new_unmanaged_keys.add((app_label, model_name)) elif app_label not in self.from_state.real_apps or ( convert_apps and app_label in convert_apps ): if model_state.options.get("proxy"): self.new_proxy_keys.add((app_label, model_name)) else: self.new_model_keys.add((app_label, model_name)) self.from_state.resolve_fields_and_relations() self.to_state.resolve_fields_and_relations() # Renames have to come first self.generate_renamed_models() # Prepare lists of fields and generate through model map self._prepare_field_lists() self._generate_through_model_map() # Generate non-rename model operations self.generate_deleted_models() self.generate_created_models() self.generate_deleted_proxies() self.generate_created_proxies() self.generate_altered_options() self.generate_altered_managers() # Create the renamed fields and store them in self.renamed_fields. # They are used by create_altered_indexes(), generate_altered_fields(), # generate_removed_altered_index/unique_together(), and # generate_altered_index/unique_together(). self.create_renamed_fields() # Create the altered indexes and store them in self.altered_indexes. # This avoids the same computation in generate_removed_indexes() # and generate_added_indexes(). self.create_altered_indexes() self.create_altered_constraints() # Generate index removal operations before field is removed self.generate_removed_constraints() self.generate_removed_indexes() # Generate field renaming operations. self.generate_renamed_fields() self.generate_renamed_indexes() # Generate removal of foo together. self.generate_removed_altered_unique_together() self.generate_removed_altered_index_together() # RemovedInDjango51Warning. # Generate field operations. self.generate_removed_fields() self.generate_added_fields() self.generate_altered_fields() self.generate_altered_order_with_respect_to() self.generate_altered_unique_together() self.generate_altered_index_together() # RemovedInDjango51Warning. self.generate_added_indexes() self.generate_added_constraints() self.generate_altered_db_table() self._sort_migrations() self._build_migration_list(graph) self._optimize_migrations() return self.migrations def _prepare_field_lists(self): """ Prepare field lists and a list of the fields that used through models in the old state so dependencies can be made from the through model deletion to the field that uses it. """ self.kept_model_keys = self.old_model_keys & self.new_model_keys self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys self.through_users = {} self.old_field_keys = { (app_label, model_name, field_name) for app_label, model_name in self.kept_model_keys for field_name in self.from_state.models[ app_label, self.renamed_models.get((app_label, model_name), model_name) ].fields } self.new_field_keys = { (app_label, model_name, field_name) for app_label, model_name in self.kept_model_keys for field_name in self.to_state.models[app_label, model_name].fields } def _generate_through_model_map(self): """Through model map generation.""" for app_label, model_name in sorted(self.old_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] for field_name, field in old_model_state.fields.items(): if hasattr(field, "remote_field") and getattr( field.remote_field, "through", None ): through_key = resolve_relation( field.remote_field.through, app_label, model_name ) self.through_users[through_key] = ( app_label, old_model_name, field_name, ) @staticmethod def _resolve_dependency(dependency): """ Return the resolved dependency and a boolean denoting whether or not it was swappable. """ if dependency[0] != "__setting__": return dependency, False resolved_app_label, resolved_object_name = getattr( settings, dependency[1] ).split(".") return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True def _build_migration_list(self, graph=None): """ Chop the lists of operations up into migrations with dependencies on each other. Do this by going through an app's list of operations until one is found that has an outgoing dependency that isn't in another app's migration yet (hasn't been chopped off its list). Then chop off the operations before it into a migration and move onto the next app. If the loops completes without doing anything, there's a circular dependency (which _should_ be impossible as the operations are all split at this point so they can't depend and be depended on). """ self.migrations = {} num_ops = sum(len(x) for x in self.generated_operations.values()) chop_mode = False while num_ops: # On every iteration, we step through all the apps and see if there # is a completed set of operations. # If we find that a subset of the operations are complete we can # try to chop it off from the rest and continue, but we only # do this if we've already been through the list once before # without any chopping and nothing has changed. for app_label in sorted(self.generated_operations): chopped = [] dependencies = set() for operation in list(self.generated_operations[app_label]): deps_satisfied = True operation_dependencies = set() for dep in operation._auto_deps: # Temporarily resolve the swappable dependency to # prevent circular references. While keeping the # dependency checks on the resolved model, add the # swappable dependencies. original_dep = dep dep, is_swappable_dep = self._resolve_dependency(dep) if dep[0] != app_label: # External app dependency. See if it's not yet # satisfied. for other_operation in self.generated_operations.get( dep[0], [] ): if self.check_dependency(other_operation, dep): deps_satisfied = False break if not deps_satisfied: break else: if is_swappable_dep: operation_dependencies.add( (original_dep[0], original_dep[1]) ) elif dep[0] in self.migrations: operation_dependencies.add( (dep[0], self.migrations[dep[0]][-1].name) ) else: # If we can't find the other app, we add a # first/last dependency, but only if we've # already been through once and checked # everything. if chop_mode: # If the app already exists, we add a # dependency on the last migration, as # we don't know which migration # contains the target field. If it's # not yet migrated or has no # migrations, we use __first__. if graph and graph.leaf_nodes(dep[0]): operation_dependencies.add( graph.leaf_nodes(dep[0])[0] ) else: operation_dependencies.add( (dep[0], "__first__") ) else: deps_satisfied = False if deps_satisfied: chopped.append(operation) dependencies.update(operation_dependencies) del self.generated_operations[app_label][0] else: break # Make a migration! Well, only if there's stuff to put in it if dependencies or chopped: if not self.generated_operations[app_label] or chop_mode: subclass = type( "Migration", (Migration,), {"operations": [], "dependencies": []}, ) instance = subclass( "auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label, ) instance.dependencies = list(dependencies) instance.operations = chopped instance.initial = app_label not in self.existing_apps self.migrations.setdefault(app_label, []).append(instance) chop_mode = False else: self.generated_operations[app_label] = ( chopped + self.generated_operations[app_label] ) new_num_ops = sum(len(x) for x in self.generated_operations.values()) if new_num_ops == num_ops: if not chop_mode: chop_mode = True else: raise ValueError( "Cannot resolve operation dependencies: %r" % self.generated_operations ) num_ops = new_num_ops def _sort_migrations(self): """ Reorder to make things possible. Reordering may be needed so FKs work nicely inside the same app. """ for app_label, ops in sorted(self.generated_operations.items()): # construct a dependency graph for intra-app dependencies dependency_graph = {op: set() for op in ops} for op in ops: for dep in op._auto_deps: # Resolve intra-app dependencies to handle circular # references involving a swappable model. dep = self._resolve_dependency(dep)[0] if dep[0] == app_label: for op2 in ops: if self.check_dependency(op2, dep): dependency_graph[op].add(op2) # we use a stable sort for deterministic tests & general behavior self.generated_operations[app_label] = stable_topological_sort( ops, dependency_graph ) def _optimize_migrations(self): # Add in internal dependencies among the migrations for app_label, migrations in self.migrations.items(): for m1, m2 in zip(migrations, migrations[1:]): m2.dependencies.append((app_label, m1.name)) # De-dupe dependencies for migrations in self.migrations.values(): for migration in migrations: migration.dependencies = list(set(migration.dependencies)) # Optimize migrations for app_label, migrations in self.migrations.items(): for migration in migrations: migration.operations = MigrationOptimizer().optimize( migration.operations, app_label ) def check_dependency(self, operation, dependency): """ Return True if the given operation depends on the given dependency, False otherwise. """ # Created model if dependency[2] is None and dependency[3] is True: return ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() ) # Created field elif dependency[2] is not None and dependency[3] is True: return ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() and any(dependency[2] == x for x, y in operation.fields) ) or ( isinstance(operation, operations.AddField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # Removed field elif dependency[2] is not None and dependency[3] is False: return ( isinstance(operation, operations.RemoveField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # Removed model elif dependency[2] is None and dependency[3] is False: return ( isinstance(operation, operations.DeleteModel) and operation.name_lower == dependency[1].lower() ) # Field being altered elif dependency[2] is not None and dependency[3] == "alter": return ( isinstance(operation, operations.AlterField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # order_with_respect_to being unset for a field elif dependency[2] is not None and dependency[3] == "order_wrt_unset": return ( isinstance(operation, operations.AlterOrderWithRespectTo) and operation.name_lower == dependency[1].lower() and (operation.order_with_respect_to or "").lower() != dependency[2].lower() ) # Field is removed and part of an index/unique_together elif dependency[2] is not None and dependency[3] == "foo_together_change": return ( isinstance( operation, (operations.AlterUniqueTogether, operations.AlterIndexTogether), ) and operation.name_lower == dependency[1].lower() ) # Unknown dependency. Raise an error. else: raise ValueError("Can't handle dependency %r" % (dependency,)) def add_operation(self, app_label, operation, dependencies=None, beginning=False): # Dependencies are # (app_label, model_name, field_name, create/delete as True/False) operation._auto_deps = dependencies or [] if beginning: self.generated_operations.setdefault(app_label, []).insert(0, operation) else: self.generated_operations.setdefault(app_label, []).append(operation) def swappable_first_key(self, item): """ Place potential swappable models first in lists of created models (only real way to solve #22783). """ try: model_state = self.to_state.models[item] base_names = { base if isinstance(base, str) else base.__name__ for base in model_state.bases } string_version = "%s.%s" % (item[0], item[1]) if ( model_state.options.get("swappable") or "AbstractUser" in base_names or "AbstractBaseUser" in base_names or settings.AUTH_USER_MODEL.lower() == string_version.lower() ): return ("___" + item[0], "___" + item[1]) except LookupError: pass return item def generate_renamed_models(self): """ Find any renamed models, generate the operations for them, and remove the old entry from the model lists. Must be run before other model-level generation. """ self.renamed_models = {} self.renamed_models_rel = {} added_models = self.new_model_keys - self.old_model_keys for app_label, model_name in sorted(added_models): model_state = self.to_state.models[app_label, model_name] model_fields_def = self.only_relation_agnostic_fields(model_state.fields) removed_models = self.old_model_keys - self.new_model_keys for rem_app_label, rem_model_name in removed_models: if rem_app_label == app_label: rem_model_state = self.from_state.models[ rem_app_label, rem_model_name ] rem_model_fields_def = self.only_relation_agnostic_fields( rem_model_state.fields ) if model_fields_def == rem_model_fields_def: if self.questioner.ask_rename_model( rem_model_state, model_state ): dependencies = [] fields = list(model_state.fields.values()) + [ field.remote_field for relations in self.to_state.relations[ app_label, model_name ].values() for field in relations.values() ] for field in fields: if field.is_relation: dependencies.extend( self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, ) ) self.add_operation( app_label, operations.RenameModel( old_name=rem_model_state.name, new_name=model_state.name, ), dependencies=dependencies, ) self.renamed_models[app_label, model_name] = rem_model_name renamed_models_rel_key = "%s.%s" % ( rem_model_state.app_label, rem_model_state.name_lower, ) self.renamed_models_rel[ renamed_models_rel_key ] = "%s.%s" % ( model_state.app_label, model_state.name_lower, ) self.old_model_keys.remove((rem_app_label, rem_model_name)) self.old_model_keys.add((app_label, model_name)) break def generate_created_models(self): """ Find all new models (both managed and unmanaged) and make create operations for them as well as separate operations to create any foreign key or M2M relationships (these are optimized later, if possible). Defer any model options that refer to collections of fields that might be deferred (e.g. unique_together, index_together). """ old_keys = self.old_model_keys | self.old_unmanaged_keys added_models = self.new_model_keys - old_keys added_unmanaged_models = self.new_unmanaged_keys - old_keys all_added_models = chain( sorted(added_models, key=self.swappable_first_key, reverse=True), sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True), ) for app_label, model_name in all_added_models: model_state = self.to_state.models[app_label, model_name] # Gather related fields related_fields = {} primary_key_rel = None for field_name, field in model_state.fields.items(): if field.remote_field: if field.remote_field.model: if field.primary_key: primary_key_rel = field.remote_field.model elif not field.remote_field.parent_link: related_fields[field_name] = field if getattr(field.remote_field, "through", None): related_fields[field_name] = field # Are there indexes/unique|index_together to defer? indexes = model_state.options.pop("indexes") constraints = model_state.options.pop("constraints") unique_together = model_state.options.pop("unique_together", None) # RemovedInDjango51Warning. index_together = model_state.options.pop("index_together", None) order_with_respect_to = model_state.options.pop( "order_with_respect_to", None ) # Depend on the deletion of any possible proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, str) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Depend on the removal of base fields if the new model has # a field with the same name. old_base_model_state = self.from_state.models.get( (base_app_label, base_name) ) new_base_model_state = self.to_state.models.get( (base_app_label, base_name) ) if old_base_model_state and new_base_model_state: removed_base_fields = ( set(old_base_model_state.fields) .difference( new_base_model_state.fields, ) .intersection(model_state.fields) ) for removed_base_field in removed_base_fields: dependencies.append( (base_app_label, base_name, removed_base_field, False) ) # Depend on the other end of the primary key if it's a relation if primary_key_rel: dependencies.append( resolve_relation( primary_key_rel, app_label, model_name, ) + (None, True) ) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[ d for d in model_state.fields.items() if d[0] not in related_fields ], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), dependencies=dependencies, beginning=True, ) # Don't add operations which modify the database for unmanaged models if not model_state.options.get("managed", True): continue # Generate operations for each related field for name, field in sorted(related_fields.items()): dependencies = self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, ) # Depend on our own model being created dependencies.append((app_label, model_name, None, True)) # Make operation self.add_operation( app_label, operations.AddField( model_name=model_name, name=name, field=field, ), dependencies=list(set(dependencies)), ) # Generate other opns if order_with_respect_to: self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=order_with_respect_to, ), dependencies=[ (app_label, model_name, order_with_respect_to, True), (app_label, model_name, None, True), ], ) related_dependencies = [ (app_label, model_name, name, True) for name in sorted(related_fields) ] related_dependencies.append((app_label, model_name, None, True)) for index in indexes: self.add_operation( app_label, operations.AddIndex( model_name=model_name, index=index, ), dependencies=related_dependencies, ) for constraint in constraints: self.add_operation( app_label, operations.AddConstraint( model_name=model_name, constraint=constraint, ), dependencies=related_dependencies, ) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=unique_together, ), dependencies=related_dependencies, ) # RemovedInDjango51Warning. if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=index_together, ), dependencies=related_dependencies, ) # Fix relationships if the model changed from a proxy model to a # concrete model. relations = self.to_state.relations if (app_label, model_name) in self.old_proxy_keys: for related_model_key, related_fields in relations[ app_label, model_name ].items(): related_model_state = self.to_state.models[related_model_key] for related_field_name, related_field in related_fields.items(): self.add_operation( related_model_state.app_label, operations.AlterField( model_name=related_model_state.name, name=related_field_name, field=related_field, ), dependencies=[(app_label, model_name, None, True)], ) def generate_created_proxies(self): """ Make CreateModel statements for proxy models. Use the same statements as that way there's less code duplication, but for proxy models it's safe to skip all the pointless field stuff and chuck out an operation. """ added = self.new_proxy_keys - self.old_proxy_keys for app_label, model_name in sorted(added): model_state = self.to_state.models[app_label, model_name] assert model_state.options.get("proxy") # Depend on the deletion of any possible non-proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, str) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), # Depend on the deletion of any possible non-proxy version of us dependencies=dependencies, ) def generate_deleted_models(self): """ Find all deleted models (managed and unmanaged) and make delete operations for them as well as separate operations to delete any foreign key or M2M relationships (these are optimized later, if possible). Also bring forward removal of any model options that refer to collections of fields - the inverse of generate_created_models(). """ new_keys = self.new_model_keys | self.new_unmanaged_keys deleted_models = self.old_model_keys - new_keys deleted_unmanaged_models = self.old_unmanaged_keys - new_keys all_deleted_models = chain( sorted(deleted_models), sorted(deleted_unmanaged_models) ) for app_label, model_name in all_deleted_models: model_state = self.from_state.models[app_label, model_name] # Gather related fields related_fields = {} for field_name, field in model_state.fields.items(): if field.remote_field: if field.remote_field.model: related_fields[field_name] = field if getattr(field.remote_field, "through", None): related_fields[field_name] = field # Generate option removal first unique_together = model_state.options.pop("unique_together", None) # RemovedInDjango51Warning. index_together = model_state.options.pop("index_together", None) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=None, ), ) # RemovedInDjango51Warning. if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=None, ), ) # Then remove each related field for name in sorted(related_fields): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=name, ), ) # Finally, remove the model. # This depends on both the removal/alteration of all incoming fields # and the removal of all its own related fields, and if it's # a through model the field that references it. dependencies = [] relations = self.from_state.relations for ( related_object_app_label, object_name, ), relation_related_fields in relations[app_label, model_name].items(): for field_name, field in relation_related_fields.items(): dependencies.append( (related_object_app_label, object_name, field_name, False), ) if not field.many_to_many: dependencies.append( ( related_object_app_label, object_name, field_name, "alter", ), ) for name in sorted(related_fields): dependencies.append((app_label, model_name, name, False)) # We're referenced in another field's through= through_user = self.through_users.get((app_label, model_state.name_lower)) if through_user: dependencies.append( (through_user[0], through_user[1], through_user[2], False) ) # Finally, make the operation, deduping any dependencies self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), dependencies=list(set(dependencies)), ) def generate_deleted_proxies(self): """Make DeleteModel options for proxy models.""" deleted = self.old_proxy_keys - self.new_proxy_keys for app_label, model_name in sorted(deleted): model_state = self.from_state.models[app_label, model_name] assert model_state.options.get("proxy") self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), ) def create_renamed_fields(self): """Work out renamed fields.""" self.renamed_operations = [] old_field_keys = self.old_field_keys.copy() for app_label, model_name, field_name in sorted( self.new_field_keys - old_field_keys ): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] field = new_model_state.get_field(field_name) # Scan to see if this is actually a rename! field_dec = self.deep_deconstruct(field) for rem_app_label, rem_model_name, rem_field_name in sorted( old_field_keys - self.new_field_keys ): if rem_app_label == app_label and rem_model_name == model_name: old_field = old_model_state.get_field(rem_field_name) old_field_dec = self.deep_deconstruct(old_field) if ( field.remote_field and field.remote_field.model and "to" in old_field_dec[2] ): old_rel_to = old_field_dec[2]["to"] if old_rel_to in self.renamed_models_rel: old_field_dec[2]["to"] = self.renamed_models_rel[old_rel_to] old_field.set_attributes_from_name(rem_field_name) old_db_column = old_field.get_attname_column()[1] if old_field_dec == field_dec or ( # Was the field renamed and db_column equal to the # old field's column added? old_field_dec[0:2] == field_dec[0:2] and dict(old_field_dec[2], db_column=old_db_column) == field_dec[2] ): if self.questioner.ask_rename( model_name, rem_field_name, field_name, field ): self.renamed_operations.append( ( rem_app_label, rem_model_name, old_field.db_column, rem_field_name, app_label, model_name, field, field_name, ) ) old_field_keys.remove( (rem_app_label, rem_model_name, rem_field_name) ) old_field_keys.add((app_label, model_name, field_name)) self.renamed_fields[ app_label, model_name, field_name ] = rem_field_name break def generate_renamed_fields(self): """Generate RenameField operations.""" for ( rem_app_label, rem_model_name, rem_db_column, rem_field_name, app_label, model_name, field, field_name, ) in self.renamed_operations: # A db_column mismatch requires a prior noop AlterField for the # subsequent RenameField to be a noop on attempts at preserving the # old name. if rem_db_column != field.db_column: altered_field = field.clone() altered_field.name = rem_field_name self.add_operation( app_label, operations.AlterField( model_name=model_name, name=rem_field_name, field=altered_field, ), ) self.add_operation( app_label, operations.RenameField( model_name=model_name, old_name=rem_field_name, new_name=field_name, ), ) self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name)) self.old_field_keys.add((app_label, model_name, field_name)) def generate_added_fields(self): """Make AddField operations.""" for app_label, model_name, field_name in sorted( self.new_field_keys - self.old_field_keys ): self._generate_added_field(app_label, model_name, field_name) def _generate_added_field(self, app_label, model_name, field_name): field = self.to_state.models[app_label, model_name].get_field(field_name) # Adding a field always depends at least on its removal. dependencies = [(app_label, model_name, field_name, False)] # Fields that are foreignkeys/m2ms depend on stuff. if field.remote_field and field.remote_field.model: dependencies.extend( self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, ) ) # You can't just add NOT NULL fields with no default or fields # which don't allow empty strings as default. time_fields = (models.DateField, models.DateTimeField, models.TimeField) preserve_default = ( field.null or field.has_default() or field.many_to_many or (field.blank and field.empty_strings_allowed) or (isinstance(field, time_fields) and field.auto_now) ) if not preserve_default: field = field.clone() if isinstance(field, time_fields) and field.auto_now_add: field.default = self.questioner.ask_auto_now_add_addition( field_name, model_name ) else: field.default = self.questioner.ask_not_null_addition( field_name, model_name ) if ( field.unique and field.default is not models.NOT_PROVIDED and callable(field.default) ): self.questioner.ask_unique_callable_default_addition(field_name, model_name) self.add_operation( app_label, operations.AddField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ), dependencies=dependencies, ) def generate_removed_fields(self): """Make RemoveField operations.""" for app_label, model_name, field_name in sorted( self.old_field_keys - self.new_field_keys ): self._generate_removed_field(app_label, model_name, field_name) def _generate_removed_field(self, app_label, model_name, field_name): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=field_name, ), # We might need to depend on the removal of an # order_with_respect_to or index/unique_together operation; # this is safely ignored if there isn't one dependencies=[ (app_label, model_name, field_name, "order_wrt_unset"), (app_label, model_name, field_name, "foo_together_change"), ], ) def generate_altered_fields(self): """ Make AlterField operations, or possibly RemovedField/AddField if alter isn't possible. """ for app_label, model_name, field_name in sorted( self.old_field_keys & self.new_field_keys ): # Did the field change? old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_field_name = self.renamed_fields.get( (app_label, model_name, field_name), field_name ) old_field = self.from_state.models[app_label, old_model_name].get_field( old_field_name ) new_field = self.to_state.models[app_label, model_name].get_field( field_name ) dependencies = [] # Implement any model renames on relations; these are handled by RenameModel # so we need to exclude them from the comparison if hasattr(new_field, "remote_field") and getattr( new_field.remote_field, "model", None ): rename_key = resolve_relation( new_field.remote_field.model, app_label, model_name ) if rename_key in self.renamed_models: new_field.remote_field.model = old_field.remote_field.model # Handle ForeignKey which can only have a single to_field. remote_field_name = getattr(new_field.remote_field, "field_name", None) if remote_field_name: to_field_rename_key = rename_key + (remote_field_name,) if to_field_rename_key in self.renamed_fields: # Repoint both model and field name because to_field # inclusion in ForeignKey.deconstruct() is based on # both. new_field.remote_field.model = old_field.remote_field.model new_field.remote_field.field_name = ( old_field.remote_field.field_name ) # Handle ForeignObjects which can have multiple from_fields/to_fields. from_fields = getattr(new_field, "from_fields", None) if from_fields: from_rename_key = (app_label, model_name) new_field.from_fields = tuple( [ self.renamed_fields.get( from_rename_key + (from_field,), from_field ) for from_field in from_fields ] ) new_field.to_fields = tuple( [ self.renamed_fields.get(rename_key + (to_field,), to_field) for to_field in new_field.to_fields ] ) dependencies.extend( self._get_dependencies_for_foreign_key( app_label, model_name, new_field, self.to_state, ) ) if hasattr(new_field, "remote_field") and getattr( new_field.remote_field, "through", None ): rename_key = resolve_relation( new_field.remote_field.through, app_label, model_name ) if rename_key in self.renamed_models: new_field.remote_field.through = old_field.remote_field.through old_field_dec = self.deep_deconstruct(old_field) new_field_dec = self.deep_deconstruct(new_field) # If the field was confirmed to be renamed it means that only # db_column was allowed to change which generate_renamed_fields() # already accounts for by adding an AlterField operation. if old_field_dec != new_field_dec and old_field_name == field_name: both_m2m = old_field.many_to_many and new_field.many_to_many neither_m2m = not old_field.many_to_many and not new_field.many_to_many if both_m2m or neither_m2m: # Either both fields are m2m or neither is preserve_default = True if ( old_field.null and not new_field.null and not new_field.has_default() and not new_field.many_to_many ): field = new_field.clone() new_default = self.questioner.ask_not_null_alteration( field_name, model_name ) if new_default is not models.NOT_PROVIDED: field.default = new_default preserve_default = False else: field = new_field self.add_operation( app_label, operations.AlterField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ), dependencies=dependencies, ) else: # We cannot alter between m2m and concrete fields self._generate_removed_field(app_label, model_name, field_name) self._generate_added_field(app_label, model_name, field_name) def create_altered_indexes(self): option_name = operations.AddIndex.option_name self.renamed_index_together_values = defaultdict(list) for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_indexes = old_model_state.options[option_name] new_indexes = new_model_state.options[option_name] added_indexes = [idx for idx in new_indexes if idx not in old_indexes] removed_indexes = [idx for idx in old_indexes if idx not in new_indexes] renamed_indexes = [] # Find renamed indexes. remove_from_added = [] remove_from_removed = [] for new_index in added_indexes: new_index_dec = new_index.deconstruct() new_index_name = new_index_dec[2].pop("name") for old_index in removed_indexes: old_index_dec = old_index.deconstruct() old_index_name = old_index_dec[2].pop("name") # Indexes are the same except for the names. if ( new_index_dec == old_index_dec and new_index_name != old_index_name ): renamed_indexes.append((old_index_name, new_index_name, None)) remove_from_added.append(new_index) remove_from_removed.append(old_index) # Find index_together changed to indexes. for ( old_value, new_value, index_together_app_label, index_together_model_name, dependencies, ) in self._get_altered_foo_together_operations( operations.AlterIndexTogether.option_name ): if ( app_label != index_together_app_label or model_name != index_together_model_name ): continue removed_values = old_value.difference(new_value) for removed_index_together in removed_values: renamed_index_together_indexes = [] for new_index in added_indexes: _, args, kwargs = new_index.deconstruct() # Ensure only 'fields' are defined in the Index. if ( not args and new_index.fields == list(removed_index_together) and set(kwargs) == {"name", "fields"} ): renamed_index_together_indexes.append(new_index) if len(renamed_index_together_indexes) == 1: renamed_index = renamed_index_together_indexes[0] remove_from_added.append(renamed_index) renamed_indexes.append( (None, renamed_index.name, removed_index_together) ) self.renamed_index_together_values[ index_together_app_label, index_together_model_name ].append(removed_index_together) # Remove renamed indexes from the lists of added and removed # indexes. added_indexes = [ idx for idx in added_indexes if idx not in remove_from_added ] removed_indexes = [ idx for idx in removed_indexes if idx not in remove_from_removed ] self.altered_indexes.update( { (app_label, model_name): { "added_indexes": added_indexes, "removed_indexes": removed_indexes, "renamed_indexes": renamed_indexes, } } ) def generate_added_indexes(self): for (app_label, model_name), alt_indexes in self.altered_indexes.items(): for index in alt_indexes["added_indexes"]: self.add_operation( app_label, operations.AddIndex( model_name=model_name, index=index, ), ) def generate_removed_indexes(self): for (app_label, model_name), alt_indexes in self.altered_indexes.items(): for index in alt_indexes["removed_indexes"]: self.add_operation( app_label, operations.RemoveIndex( model_name=model_name, name=index.name, ), ) def generate_renamed_indexes(self): for (app_label, model_name), alt_indexes in self.altered_indexes.items(): for old_index_name, new_index_name, old_fields in alt_indexes[ "renamed_indexes" ]: self.add_operation( app_label, operations.RenameIndex( model_name=model_name, new_name=new_index_name, old_name=old_index_name, old_fields=old_fields, ), ) def create_altered_constraints(self): option_name = operations.AddConstraint.option_name for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_constraints = old_model_state.options[option_name] new_constraints = new_model_state.options[option_name] add_constraints = [c for c in new_constraints if c not in old_constraints] rem_constraints = [c for c in old_constraints if c not in new_constraints] self.altered_constraints.update( { (app_label, model_name): { "added_constraints": add_constraints, "removed_constraints": rem_constraints, } } ) def generate_added_constraints(self): for ( app_label, model_name, ), alt_constraints in self.altered_constraints.items(): for constraint in alt_constraints["added_constraints"]: self.add_operation( app_label, operations.AddConstraint( model_name=model_name, constraint=constraint, ), ) def generate_removed_constraints(self): for ( app_label, model_name, ), alt_constraints in self.altered_constraints.items(): for constraint in alt_constraints["removed_constraints"]: self.add_operation( app_label, operations.RemoveConstraint( model_name=model_name, name=constraint.name, ), ) @staticmethod def _get_dependencies_for_foreign_key(app_label, model_name, field, project_state): remote_field_model = None if hasattr(field.remote_field, "model"): remote_field_model = field.remote_field.model else: relations = project_state.relations[app_label, model_name] for (remote_app_label, remote_model_name), fields in relations.items(): if any( field == related_field.remote_field for related_field in fields.values() ): remote_field_model = f"{remote_app_label}.{remote_model_name}" break # Account for FKs to swappable models swappable_setting = getattr(field, "swappable_setting", None) if swappable_setting is not None: dep_app_label = "__setting__" dep_object_name = swappable_setting else: dep_app_label, dep_object_name = resolve_relation( remote_field_model, app_label, model_name, ) dependencies = [(dep_app_label, dep_object_name, None, True)] if getattr(field.remote_field, "through", None): through_app_label, through_object_name = resolve_relation( field.remote_field.through, app_label, model_name, ) dependencies.append((through_app_label, through_object_name, None, True)) return dependencies def _get_altered_foo_together_operations(self, option_name): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] # We run the old version through the field renames to account for those old_value = old_model_state.options.get(option_name) old_value = ( { tuple( self.renamed_fields.get((app_label, model_name, n), n) for n in unique ) for unique in old_value } if old_value else set() ) new_value = new_model_state.options.get(option_name) new_value = set(new_value) if new_value else set() if old_value != new_value: dependencies = [] for foo_togethers in new_value: for field_name in foo_togethers: field = new_model_state.get_field(field_name) if field.remote_field and field.remote_field.model: dependencies.extend( self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, ) ) yield ( old_value, new_value, app_label, model_name, dependencies, ) def _generate_removed_altered_foo_together(self, operation): for ( old_value, new_value, app_label, model_name, dependencies, ) in self._get_altered_foo_together_operations(operation.option_name): if operation == operations.AlterIndexTogether: old_value = { value for value in old_value if value not in self.renamed_index_together_values[app_label, model_name] } removal_value = new_value.intersection(old_value) if removal_value or old_value: self.add_operation( app_label, operation( name=model_name, **{operation.option_name: removal_value} ), dependencies=dependencies, ) def generate_removed_altered_unique_together(self): self._generate_removed_altered_foo_together(operations.AlterUniqueTogether) # RemovedInDjango51Warning. def generate_removed_altered_index_together(self): self._generate_removed_altered_foo_together(operations.AlterIndexTogether) def _generate_altered_foo_together(self, operation): for ( old_value, new_value, app_label, model_name, dependencies, ) in self._get_altered_foo_together_operations(operation.option_name): removal_value = new_value.intersection(old_value) if new_value != removal_value: self.add_operation( app_label, operation(name=model_name, **{operation.option_name: new_value}), dependencies=dependencies, ) def generate_altered_unique_together(self): self._generate_altered_foo_together(operations.AlterUniqueTogether) # RemovedInDjango51Warning. def generate_altered_index_together(self): self._generate_altered_foo_together(operations.AlterIndexTogether) def generate_altered_db_table(self): models_to_check = self.kept_model_keys.union( self.kept_proxy_keys, self.kept_unmanaged_keys ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_db_table_name = old_model_state.options.get("db_table") new_db_table_name = new_model_state.options.get("db_table") if old_db_table_name != new_db_table_name: self.add_operation( app_label, operations.AlterModelTable( name=model_name, table=new_db_table_name, ), ) def generate_altered_options(self): """ Work out if any non-schema-affecting options have changed and make an operation to represent them in state changes (in case Python code in migrations needs them). """ models_to_check = self.kept_model_keys.union( self.kept_proxy_keys, self.kept_unmanaged_keys, # unmanaged converted to managed self.old_unmanaged_keys & self.new_model_keys, # managed converted to unmanaged self.old_model_keys & self.new_unmanaged_keys, ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_options = { key: value for key, value in old_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } new_options = { key: value for key, value in new_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } if old_options != new_options: self.add_operation( app_label, operations.AlterModelOptions( name=model_name, options=new_options, ), ) def generate_altered_order_with_respect_to(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if old_model_state.options.get( "order_with_respect_to" ) != new_model_state.options.get("order_with_respect_to"): # Make sure it comes second if we're adding # (removal dependency is part of RemoveField) dependencies = [] if new_model_state.options.get("order_with_respect_to"): dependencies.append( ( app_label, model_name, new_model_state.options["order_with_respect_to"], True, ) ) # Actually generate the operation self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=new_model_state.options.get( "order_with_respect_to" ), ), dependencies=dependencies, ) def generate_altered_managers(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if old_model_state.managers != new_model_state.managers: self.add_operation( app_label, operations.AlterModelManagers( name=model_name, managers=new_model_state.managers, ), ) def arrange_for_graph(self, changes, graph, migration_name=None): """ Take a result from changes() and a MigrationGraph, and fix the names and dependencies of the changes so they extend the graph from the leaf nodes for each app. """ leaves = graph.leaf_nodes() name_map = {} for app_label, migrations in list(changes.items()): if not migrations: continue # Find the app label's current leaf node app_leaf = None for leaf in leaves: if leaf[0] == app_label: app_leaf = leaf break # Do they want an initial migration for this app? if app_leaf is None and not self.questioner.ask_initial(app_label): # They don't. for migration in migrations: name_map[(app_label, migration.name)] = (app_label, "__first__") del changes[app_label] continue # Work out the next number in the sequence if app_leaf is None: next_number = 1 else: next_number = (self.parse_number(app_leaf[1]) or 0) + 1 # Name each migration for i, migration in enumerate(migrations): if i == 0 and app_leaf: migration.dependencies.append(app_leaf) new_name_parts = ["%04i" % next_number] if migration_name: new_name_parts.append(migration_name) elif i == 0 and not app_leaf: new_name_parts.append("initial") else: new_name_parts.append(migration.suggest_name()[:100]) new_name = "_".join(new_name_parts) name_map[(app_label, migration.name)] = (app_label, new_name) next_number += 1 migration.name = new_name # Now fix dependencies for migrations in changes.values(): for migration in migrations: migration.dependencies = [ name_map.get(d, d) for d in migration.dependencies ] return changes def _trim_to_apps(self, changes, app_labels): """ Take changes from arrange_for_graph() and set of app labels, and return a modified set of changes which trims out as many migrations that are not in app_labels as possible. Note that some other migrations may still be present as they may be required dependencies. """ # Gather other app dependencies in a first pass app_dependencies = {} for app_label, migrations in changes.items(): for migration in migrations: for dep_app_label, name in migration.dependencies: app_dependencies.setdefault(app_label, set()).add(dep_app_label) required_apps = set(app_labels) # Keep resolving till there's no change old_required_apps = None while old_required_apps != required_apps: old_required_apps = set(required_apps) required_apps.update( *[app_dependencies.get(app_label, ()) for app_label in required_apps] ) # Remove all migrations that aren't needed for app_label in list(changes): if app_label not in required_apps: del changes[app_label] return changes @classmethod def parse_number(cls, name): """ Given a migration name, try to extract a number from the beginning of it. For a squashed migration such as '0001_squashed_0004…', return the second number. If no number is found, return None. """ if squashed_match := re.search(r".*_squashed_(\d+)", name): return int(squashed_match[1]) match = re.match(r"^\d+", name) if match: return int(match[0]) return None
8ad64048619ea4960263652fe4098baf13a08c41f5b727065746efdc4b518d01
import re from django.db.migrations.utils import get_migration_name_timestamp from django.db.transaction import atomic from .exceptions import IrreversibleError class Migration: """ The base class for all migrations. Migration files will import this from django.db.migrations.Migration and subclass it as a class called Migration. It will have one or more of the following attributes: - operations: A list of Operation instances, probably from django.db.migrations.operations - dependencies: A list of tuples of (app_path, migration_name) - run_before: A list of tuples of (app_path, migration_name) - replaces: A list of migration_names Note that all migrations come out of migrations and into the Loader or Graph as instances, having been initialized with their app label and name. """ # Operations to apply during this migration, in order. operations = [] # Other migrations that should be run before this migration. # Should be a list of (app, migration_name). dependencies = [] # Other migrations that should be run after this one (i.e. have # this migration added to their dependencies). Useful to make third-party # apps' migrations run after your AUTH_USER replacement, for example. run_before = [] # Migration names in this app that this migration replaces. If this is # non-empty, this migration will only be applied if all these migrations # are not applied. replaces = [] # Is this an initial migration? Initial migrations are skipped on # --fake-initial if the table or fields already exist. If None, check if # the migration has any dependencies to determine if there are dependencies # to tell if db introspection needs to be done. If True, always perform # introspection. If False, never perform introspection. initial = None # Whether to wrap the whole migration in a transaction. Only has an effect # on database backends which support transactional DDL. atomic = True def __init__(self, name, app_label): self.name = name self.app_label = app_label # Copy dependencies & other attrs as we might mutate them at runtime self.operations = list(self.__class__.operations) self.dependencies = list(self.__class__.dependencies) self.run_before = list(self.__class__.run_before) self.replaces = list(self.__class__.replaces) def __eq__(self, other): return ( isinstance(other, Migration) and self.name == other.name and self.app_label == other.app_label ) def __repr__(self): return "<Migration %s.%s>" % (self.app_label, self.name) def __str__(self): return "%s.%s" % (self.app_label, self.name) def __hash__(self): return hash("%s.%s" % (self.app_label, self.name)) def mutate_state(self, project_state, preserve=True): """ Take a ProjectState and return a new one with the migration's operations applied to it. Preserve the original object state by default and return a mutated state from a copy. """ new_state = project_state if preserve: new_state = project_state.clone() for operation in self.operations: operation.state_forwards(self.app_label, new_state) return new_state def apply(self, project_state, schema_editor, collect_sql=False): """ Take a project_state representing all migrations prior to this one and a schema_editor for a live database and apply the migration in a forwards order. Return the resulting project state for efficient reuse by following Migrations. """ for operation in self.operations: # If this operation cannot be represented as SQL, place a comment # there instead if collect_sql: schema_editor.collected_sql.append("--") schema_editor.collected_sql.append("-- %s" % operation.describe()) schema_editor.collected_sql.append("--") if not operation.reduces_to_sql: schema_editor.collected_sql.append( "-- THIS OPERATION CANNOT BE WRITTEN AS SQL" ) continue collected_sql_before = len(schema_editor.collected_sql) # Save the state before the operation has run old_state = project_state.clone() operation.state_forwards(self.app_label, project_state) # Run the operation atomic_operation = operation.atomic or ( self.atomic and operation.atomic is not False ) if not schema_editor.atomic_migration and atomic_operation: # Force a transaction on a non-transactional-DDL backend or an # atomic operation inside a non-atomic migration. with atomic(schema_editor.connection.alias): operation.database_forwards( self.app_label, schema_editor, old_state, project_state ) else: # Normal behaviour operation.database_forwards( self.app_label, schema_editor, old_state, project_state ) if collect_sql and collected_sql_before == len(schema_editor.collected_sql): schema_editor.collected_sql.append("-- (no-op)") return project_state def unapply(self, project_state, schema_editor, collect_sql=False): """ Take a project_state representing all migrations prior to this one and a schema_editor for a live database and apply the migration in a reverse order. The backwards migration process consists of two phases: 1. The intermediate states from right before the first until right after the last operation inside this migration are preserved. 2. The operations are applied in reverse order using the states recorded in step 1. """ # Construct all the intermediate states we need for a reverse migration to_run = [] new_state = project_state # Phase 1 for operation in self.operations: # If it's irreversible, error out if not operation.reversible: raise IrreversibleError( "Operation %s in %s is not reversible" % (operation, self) ) # Preserve new state from previous run to not tamper the same state # over all operations new_state = new_state.clone() old_state = new_state.clone() operation.state_forwards(self.app_label, new_state) to_run.insert(0, (operation, old_state, new_state)) # Phase 2 for operation, to_state, from_state in to_run: if collect_sql: schema_editor.collected_sql.append("--") schema_editor.collected_sql.append("-- %s" % operation.describe()) schema_editor.collected_sql.append("--") if not operation.reduces_to_sql: schema_editor.collected_sql.append( "-- THIS OPERATION CANNOT BE WRITTEN AS SQL" ) continue collected_sql_before = len(schema_editor.collected_sql) atomic_operation = operation.atomic or ( self.atomic and operation.atomic is not False ) if not schema_editor.atomic_migration and atomic_operation: # Force a transaction on a non-transactional-DDL backend or an # atomic operation inside a non-atomic migration. with atomic(schema_editor.connection.alias): operation.database_backwards( self.app_label, schema_editor, from_state, to_state ) else: # Normal behaviour operation.database_backwards( self.app_label, schema_editor, from_state, to_state ) if collect_sql and collected_sql_before == len(schema_editor.collected_sql): schema_editor.collected_sql.append("-- (no-op)") return project_state def suggest_name(self): """ Suggest a name for the operations this migration might represent. Names are not guaranteed to be unique, but put some effort into the fallback name to avoid VCS conflicts if possible. """ if self.initial: return "initial" raw_fragments = [op.migration_name_fragment for op in self.operations] fragments = [re.sub(r"\W+", "_", name) for name in raw_fragments if name] if not fragments or len(fragments) != len(self.operations): return "auto_%s" % get_migration_name_timestamp() name = fragments[0] for fragment in fragments[1:]: new_name = f"{name}_{fragment}" if len(new_name) > 52: name = f"{name}_and_more" break name = new_name return name class SwappableTuple(tuple): """ Subclass of tuple so Django can tell this was originally a swappable dependency when it reads the migration file. """ def __new__(cls, value, setting): self = tuple.__new__(cls, value) self.setting = setting return self def swappable_dependency(value): """Turn a setting value into a dependency.""" return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
297901d905fe65791afb88639673469e99b43436c1bf788d177c8e059de8994e
import builtins import collections.abc import datetime import decimal import enum import functools import math import os import pathlib import re import types import uuid from django.conf import SettingsReference from django.db import models from django.db.migrations.operations.base import Operation from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject from django.utils.functional import LazyObject, Promise from django.utils.version import PY311, get_docs_version class BaseSerializer: def __init__(self, value): self.value = value def serialize(self): raise NotImplementedError( "Subclasses of BaseSerializer must implement the serialize() method." ) class BaseSequenceSerializer(BaseSerializer): def _format(self): raise NotImplementedError( "Subclasses of BaseSequenceSerializer must implement the _format() method." ) def serialize(self): imports = set() strings = [] for item in self.value: item_string, item_imports = serializer_factory(item).serialize() imports.update(item_imports) strings.append(item_string) value = self._format() return value % (", ".join(strings)), imports class BaseSimpleSerializer(BaseSerializer): def serialize(self): return repr(self.value), set() class ChoicesSerializer(BaseSerializer): def serialize(self): return serializer_factory(self.value.value).serialize() class DateTimeSerializer(BaseSerializer): """For datetime.*, except datetime.datetime.""" def serialize(self): return repr(self.value), {"import datetime"} class DatetimeDatetimeSerializer(BaseSerializer): """For datetime.datetime.""" def serialize(self): if self.value.tzinfo is not None and self.value.tzinfo != datetime.timezone.utc: self.value = self.value.astimezone(datetime.timezone.utc) imports = ["import datetime"] return repr(self.value), set(imports) class DecimalSerializer(BaseSerializer): def serialize(self): return repr(self.value), {"from decimal import Decimal"} class DeconstructableSerializer(BaseSerializer): @staticmethod def serialize_deconstructed(path, args, kwargs): name, imports = DeconstructableSerializer._serialize_path(path) strings = [] for arg in args: arg_string, arg_imports = serializer_factory(arg).serialize() strings.append(arg_string) imports.update(arg_imports) for kw, arg in sorted(kwargs.items()): arg_string, arg_imports = serializer_factory(arg).serialize() imports.update(arg_imports) strings.append("%s=%s" % (kw, arg_string)) return "%s(%s)" % (name, ", ".join(strings)), imports @staticmethod def _serialize_path(path): module, name = path.rsplit(".", 1) if module == "django.db.models": imports = {"from django.db import models"} name = "models.%s" % name else: imports = {"import %s" % module} name = path return name, imports def serialize(self): return self.serialize_deconstructed(*self.value.deconstruct()) class DictionarySerializer(BaseSerializer): def serialize(self): imports = set() strings = [] for k, v in sorted(self.value.items()): k_string, k_imports = serializer_factory(k).serialize() v_string, v_imports = serializer_factory(v).serialize() imports.update(k_imports) imports.update(v_imports) strings.append((k_string, v_string)) return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports class EnumSerializer(BaseSerializer): def serialize(self): enum_class = self.value.__class__ module = enum_class.__module__ if issubclass(enum_class, enum.Flag): if PY311: members = list(self.value) else: members, _ = enum._decompose(enum_class, self.value) members = reversed(members) else: members = (self.value,) return ( " | ".join( [ f"{module}.{enum_class.__qualname__}[{item.name!r}]" for item in members ] ), {"import %s" % module}, ) class FloatSerializer(BaseSimpleSerializer): def serialize(self): if math.isnan(self.value) or math.isinf(self.value): return 'float("{}")'.format(self.value), set() return super().serialize() class FrozensetSerializer(BaseSequenceSerializer): def _format(self): return "frozenset([%s])" class FunctionTypeSerializer(BaseSerializer): def serialize(self): if getattr(self.value, "__self__", None) and isinstance( self.value.__self__, type ): klass = self.value.__self__ module = klass.__module__ return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), { "import %s" % module } # Further error checking if self.value.__name__ == "<lambda>": raise ValueError("Cannot serialize function: lambda") if self.value.__module__ is None: raise ValueError("Cannot serialize function %r: No module" % self.value) module_name = self.value.__module__ if "<" not in self.value.__qualname__: # Qualname can include <locals> return "%s.%s" % (module_name, self.value.__qualname__), { "import %s" % self.value.__module__ } raise ValueError( "Could not find function %s in %s.\n" % (self.value.__name__, module_name) ) class FunctoolsPartialSerializer(BaseSerializer): def serialize(self): # Serialize functools.partial() arguments func_string, func_imports = serializer_factory(self.value.func).serialize() args_string, args_imports = serializer_factory(self.value.args).serialize() keywords_string, keywords_imports = serializer_factory( self.value.keywords ).serialize() # Add any imports needed by arguments imports = {"import functools", *func_imports, *args_imports, *keywords_imports} return ( "functools.%s(%s, *%s, **%s)" % ( self.value.__class__.__name__, func_string, args_string, keywords_string, ), imports, ) class IterableSerializer(BaseSerializer): def serialize(self): imports = set() strings = [] for item in self.value: item_string, item_imports = serializer_factory(item).serialize() imports.update(item_imports) strings.append(item_string) # When len(strings)==0, the empty iterable should be serialized as # "()", not "(,)" because (,) is invalid Python syntax. value = "(%s)" if len(strings) != 1 else "(%s,)" return value % (", ".join(strings)), imports class ModelFieldSerializer(DeconstructableSerializer): def serialize(self): attr_name, path, args, kwargs = self.value.deconstruct() return self.serialize_deconstructed(path, args, kwargs) class ModelManagerSerializer(DeconstructableSerializer): def serialize(self): as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct() if as_manager: name, imports = self._serialize_path(qs_path) return "%s.as_manager()" % name, imports else: return self.serialize_deconstructed(manager_path, args, kwargs) class OperationSerializer(BaseSerializer): def serialize(self): from django.db.migrations.writer import OperationWriter string, imports = OperationWriter(self.value, indentation=0).serialize() # Nested operation, trailing comma is handled in upper OperationWriter._write() return string.rstrip(","), imports class PathLikeSerializer(BaseSerializer): def serialize(self): return repr(os.fspath(self.value)), {} class PathSerializer(BaseSerializer): def serialize(self): # Convert concrete paths to pure paths to avoid issues with migrations # generated on one platform being used on a different platform. prefix = "Pure" if isinstance(self.value, pathlib.Path) else "" return "pathlib.%s%r" % (prefix, self.value), {"import pathlib"} class RegexSerializer(BaseSerializer): def serialize(self): regex_pattern, pattern_imports = serializer_factory( self.value.pattern ).serialize() # Turn off default implicit flags (e.g. re.U) because regexes with the # same implicit and explicit flags aren't equal. flags = self.value.flags ^ re.compile("").flags regex_flags, flag_imports = serializer_factory(flags).serialize() imports = {"import re", *pattern_imports, *flag_imports} args = [regex_pattern] if flags: args.append(regex_flags) return "re.compile(%s)" % ", ".join(args), imports class SequenceSerializer(BaseSequenceSerializer): def _format(self): return "[%s]" class SetSerializer(BaseSequenceSerializer): def _format(self): # Serialize as a set literal except when value is empty because {} # is an empty dict. return "{%s}" if self.value else "set(%s)" class SettingsReferenceSerializer(BaseSerializer): def serialize(self): return "settings.%s" % self.value.setting_name, { "from django.conf import settings" } class TupleSerializer(BaseSequenceSerializer): def _format(self): # When len(value)==0, the empty tuple should be serialized as "()", # not "(,)" because (,) is invalid Python syntax. return "(%s)" if len(self.value) != 1 else "(%s,)" class TypeSerializer(BaseSerializer): def serialize(self): special_cases = [ (models.Model, "models.Model", ["from django.db import models"]), (type(None), "type(None)", []), ] for case, string, imports in special_cases: if case is self.value: return string, set(imports) if hasattr(self.value, "__module__"): module = self.value.__module__ if module == builtins.__name__: return self.value.__name__, set() else: return "%s.%s" % (module, self.value.__qualname__), { "import %s" % module } class UUIDSerializer(BaseSerializer): def serialize(self): return "uuid.%s" % repr(self.value), {"import uuid"} class Serializer: _registry = { # Some of these are order-dependent. frozenset: FrozensetSerializer, list: SequenceSerializer, set: SetSerializer, tuple: TupleSerializer, dict: DictionarySerializer, models.Choices: ChoicesSerializer, enum.Enum: EnumSerializer, datetime.datetime: DatetimeDatetimeSerializer, (datetime.date, datetime.timedelta, datetime.time): DateTimeSerializer, SettingsReference: SettingsReferenceSerializer, float: FloatSerializer, (bool, int, type(None), bytes, str, range): BaseSimpleSerializer, decimal.Decimal: DecimalSerializer, (functools.partial, functools.partialmethod): FunctoolsPartialSerializer, ( types.FunctionType, types.BuiltinFunctionType, types.MethodType, ): FunctionTypeSerializer, collections.abc.Iterable: IterableSerializer, (COMPILED_REGEX_TYPE, RegexObject): RegexSerializer, uuid.UUID: UUIDSerializer, pathlib.PurePath: PathSerializer, os.PathLike: PathLikeSerializer, } @classmethod def register(cls, type_, serializer): if not issubclass(serializer, BaseSerializer): raise ValueError( "'%s' must inherit from 'BaseSerializer'." % serializer.__name__ ) cls._registry[type_] = serializer @classmethod def unregister(cls, type_): cls._registry.pop(type_) def serializer_factory(value): if isinstance(value, Promise): value = str(value) elif isinstance(value, LazyObject): # The unwrapped value is returned as the first item of the arguments # tuple. value = value.__reduce__()[1][0] if isinstance(value, models.Field): return ModelFieldSerializer(value) if isinstance(value, models.manager.BaseManager): return ModelManagerSerializer(value) if isinstance(value, Operation): return OperationSerializer(value) if isinstance(value, type): return TypeSerializer(value) # Anything that knows how to deconstruct itself. if hasattr(value, "deconstruct"): return DeconstructableSerializer(value) for type_, serializer_cls in Serializer._registry.items(): if isinstance(value, type_): return serializer_cls(value) raise ValueError( "Cannot serialize: %r\nThere are some values Django cannot serialize into " "migration files.\nFor more, see https://docs.djangoproject.com/en/%s/" "topics/migrations/#migration-serializing" % (value, get_docs_version()) )
eac835dbdeca05cf89cfc37bdf9cfb705e858f3c02f6a526fe50193aa527129a
""" Classes to represent the definitions of aggregate functions. """ from django.core.exceptions import FieldError from django.db.models.expressions import Case, Func, Star, When from django.db.models.fields import IntegerField from django.db.models.functions.comparison import Coalesce from django.db.models.functions.mixins import ( FixDurationInputMixin, NumericOutputFieldMixin, ) __all__ = [ "Aggregate", "Avg", "Count", "Max", "Min", "StdDev", "Sum", "Variance", ] class Aggregate(Func): template = "%(function)s(%(distinct)s%(expressions)s)" contains_aggregate = True name = None filter_template = "%s FILTER (WHERE %%(filter)s)" window_compatible = True allow_distinct = False empty_result_set_value = None def __init__( self, *expressions, distinct=False, filter=None, default=None, **extra ): if distinct and not self.allow_distinct: raise TypeError("%s does not allow distinct." % self.__class__.__name__) if default is not None and self.empty_result_set_value is not None: raise TypeError(f"{self.__class__.__name__} does not allow default.") self.distinct = distinct self.filter = filter self.default = default super().__init__(*expressions, **extra) def get_source_fields(self): # Don't return the filter expression since it's not a source field. return [e._output_field_or_none for e in super().get_source_expressions()] def get_source_expressions(self): source_expressions = super().get_source_expressions() if self.filter: return source_expressions + [self.filter] return source_expressions def set_source_expressions(self, exprs): self.filter = self.filter and exprs.pop() return super().set_source_expressions(exprs) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # Aggregates are not allowed in UPDATE queries, so ignore for_save c = super().resolve_expression(query, allow_joins, reuse, summarize) c.filter = c.filter and c.filter.resolve_expression( query, allow_joins, reuse, summarize ) if not summarize: # Call Aggregate.get_source_expressions() to avoid # returning self.filter and including that in this loop. expressions = super(Aggregate, c).get_source_expressions() for index, expr in enumerate(expressions): if expr.contains_aggregate: before_resolved = self.get_source_expressions()[index] name = ( before_resolved.name if hasattr(before_resolved, "name") else repr(before_resolved) ) raise FieldError( "Cannot compute %s('%s'): '%s' is an aggregate" % (c.name, name, name) ) if (default := c.default) is None: return c if hasattr(default, "resolve_expression"): default = default.resolve_expression(query, allow_joins, reuse, summarize) c.default = None # Reset the default argument before wrapping. coalesce = Coalesce(c, default, output_field=c._output_field_or_none) coalesce.is_summary = c.is_summary return coalesce @property def default_alias(self): expressions = self.get_source_expressions() if len(expressions) == 1 and hasattr(expressions[0], "name"): return "%s__%s" % (expressions[0].name, self.name.lower()) raise TypeError("Complex expressions require an alias") def get_group_by_cols(self): return [] def as_sql(self, compiler, connection, **extra_context): extra_context["distinct"] = "DISTINCT " if self.distinct else "" if self.filter: if connection.features.supports_aggregate_filter_clause: filter_sql, filter_params = self.filter.as_sql(compiler, connection) template = self.filter_template % extra_context.get( "template", self.template ) sql, params = super().as_sql( compiler, connection, template=template, filter=filter_sql, **extra_context, ) return sql, (*params, *filter_params) else: copy = self.copy() copy.filter = None source_expressions = copy.get_source_expressions() condition = When(self.filter, then=source_expressions[0]) copy.set_source_expressions([Case(condition)] + source_expressions[1:]) return super(Aggregate, copy).as_sql( compiler, connection, **extra_context ) return super().as_sql(compiler, connection, **extra_context) def _get_repr_options(self): options = super()._get_repr_options() if self.distinct: options["distinct"] = self.distinct if self.filter: options["filter"] = self.filter return options class Avg(FixDurationInputMixin, NumericOutputFieldMixin, Aggregate): function = "AVG" name = "Avg" allow_distinct = True class Count(Aggregate): function = "COUNT" name = "Count" output_field = IntegerField() allow_distinct = True empty_result_set_value = 0 def __init__(self, expression, filter=None, **extra): if expression == "*": expression = Star() if isinstance(expression, Star) and filter is not None: raise ValueError("Star cannot be used with filter. Please specify a field.") super().__init__(expression, filter=filter, **extra) class Max(Aggregate): function = "MAX" name = "Max" class Min(Aggregate): function = "MIN" name = "Min" class StdDev(NumericOutputFieldMixin, Aggregate): name = "StdDev" def __init__(self, expression, sample=False, **extra): self.function = "STDDEV_SAMP" if sample else "STDDEV_POP" super().__init__(expression, **extra) def _get_repr_options(self): return {**super()._get_repr_options(), "sample": self.function == "STDDEV_SAMP"} class Sum(FixDurationInputMixin, Aggregate): function = "SUM" name = "Sum" allow_distinct = True class Variance(NumericOutputFieldMixin, Aggregate): name = "Variance" def __init__(self, expression, sample=False, **extra): self.function = "VAR_SAMP" if sample else "VAR_POP" super().__init__(expression, **extra) def _get_repr_options(self): return {**super()._get_repr_options(), "sample": self.function == "VAR_SAMP"}
a161dbdd0afe5ef5e2449ca3d40a1ceca43189bc998b9c6897eae4c6cb450109
""" The main QuerySet implementation. This provides the public API for the ORM. """ import copy import operator import warnings from itertools import chain, islice from asgiref.sync import sync_to_async import django from django.conf import settings from django.core import exceptions from django.db import ( DJANGO_VERSION_PICKLE_KEY, IntegrityError, NotSupportedError, connections, router, transaction, ) from django.db.models import AutoField, DateField, DateTimeField, Field, sql from django.db.models.constants import LOOKUP_SEP, OnConflict from django.db.models.deletion import Collector from django.db.models.expressions import Case, F, Ref, Value, When from django.db.models.functions import Cast, Trunc from django.db.models.query_utils import FilteredRelation, Q from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE from django.db.models.utils import create_namedtuple_class, resolve_callables from django.utils import timezone from django.utils.deprecation import RemovedInDjango50Warning from django.utils.functional import cached_property, partition # The maximum number of results to fetch in a get() query. MAX_GET_RESULTS = 21 # The maximum number of items to display in a QuerySet.__repr__ REPR_OUTPUT_SIZE = 20 class BaseIterable: def __init__( self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE ): self.queryset = queryset self.chunked_fetch = chunked_fetch self.chunk_size = chunk_size async def _async_generator(self): # Generators don't actually start running until the first time you call # next() on them, so make the generator object in the async thread and # then repeatedly dispatch to it in a sync thread. sync_generator = self.__iter__() def next_slice(gen): return list(islice(gen, self.chunk_size)) while True: chunk = await sync_to_async(next_slice)(sync_generator) for item in chunk: yield item if len(chunk) < self.chunk_size: break # __aiter__() is a *synchronous* method that has to then return an # *asynchronous* iterator/generator. Thus, nest an async generator inside # it. # This is a generic iterable converter for now, and is going to suffer a # performance penalty on large sets of items due to the cost of crossing # over the sync barrier for each chunk. Custom __aiter__() methods should # be added to each Iterable subclass, but that needs some work in the # Compiler first. def __aiter__(self): return self._async_generator() class ModelIterable(BaseIterable): """Iterable that yields a model instance for each row.""" def __iter__(self): queryset = self.queryset db = queryset.db compiler = queryset.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ) select, klass_info, annotation_col_map = ( compiler.select, compiler.klass_info, compiler.annotation_col_map, ) model_cls = klass_info["model"] select_fields = klass_info["select_fields"] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [ f[0].target.attname for f in select[model_fields_start:model_fields_end] ] related_populators = get_related_populators(klass_info, select, db) known_related_objects = [ ( field, related_objs, operator.attrgetter( *[ field.attname if from_field == "self" else queryset.model._meta.get_field(from_field).attname for from_field in field.from_fields ] ), ) for field, related_objs in queryset._known_related_objects.items() ] for row in compiler.results_iter(results): obj = model_cls.from_db( db, init_list, row[model_fields_start:model_fields_end] ) for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model. for field, rel_objs, rel_getter in known_related_objects: # Avoid overwriting objects loaded by, e.g., select_related(). if field.is_cached(obj): continue rel_obj_id = rel_getter(obj) try: rel_obj = rel_objs[rel_obj_id] except KeyError: pass # May happen in qs1 | qs2 scenarios. else: setattr(obj, field.name, rel_obj) yield obj class RawModelIterable(BaseIterable): """ Iterable that yields a model instance for each row from a raw queryset. """ def __iter__(self): # Cache some things for performance reasons outside the loop. db = self.queryset.db query = self.queryset.query connection = connections[db] compiler = connection.ops.compiler("SQLCompiler")(query, connection, db) query_iterator = iter(query) try: ( model_init_names, model_init_pos, annotation_fields, ) = self.queryset.resolve_model_init_order() model_cls = self.queryset.model if model_cls._meta.pk.attname not in model_init_names: raise exceptions.FieldDoesNotExist( "Raw query must include the primary key" ) fields = [self.queryset.model_fields.get(c) for c in self.queryset.columns] converters = compiler.get_converters( [f.get_col(f.model._meta.db_table) if f else None for f in fields] ) if converters: query_iterator = compiler.apply_converters(query_iterator, converters) for values in query_iterator: # Associate fields to values model_init_values = [values[pos] for pos in model_init_pos] instance = model_cls.from_db(db, model_init_names, model_init_values) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) yield instance finally: # Done iterating the Query. If it has its own cursor, close it. if hasattr(query, "cursor") and query.cursor: query.cursor.close() class ValuesIterable(BaseIterable): """ Iterable returned by QuerySet.values() that yields a dict for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] indexes = range(len(names)) for row in compiler.results_iter( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ): yield {names[i]: row[i] for i in indexes} class ValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=False) that yields a tuple for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) if queryset._fields: # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] fields = [ *queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields), ] if fields != names: # Reorder according to fields. index_map = {name: idx for idx, name in enumerate(names)} rowfactory = operator.itemgetter(*[index_map[f] for f in fields]) return map( rowfactory, compiler.results_iter( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ), ) return compiler.results_iter( tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size, ) class NamedValuesListIterable(ValuesListIterable): """ Iterable returned by QuerySet.values_list(named=True) that yields a namedtuple for each row. """ def __iter__(self): queryset = self.queryset if queryset._fields: names = queryset._fields else: query = queryset.query names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] tuple_class = create_namedtuple_class(*names) new = tuple.__new__ for row in super().__iter__(): yield new(tuple_class, row) class FlatValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=True) that yields single values. """ def __iter__(self): queryset = self.queryset compiler = queryset.query.get_compiler(queryset.db) for row in compiler.results_iter( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ): yield row[0] class QuerySet: """Represent a lazy database lookup for a set of objects.""" def __init__(self, model=None, query=None, using=None, hints=None): self.model = model self._db = using self._hints = hints or {} self._query = query or sql.Query(self.model) self._result_cache = None self._sticky_filter = False self._for_write = False self._prefetch_related_lookups = () self._prefetch_done = False self._known_related_objects = {} # {rel_field: {pk: rel_obj}} self._iterable_class = ModelIterable self._fields = None self._defer_next_filter = False self._deferred_filter = None @property def query(self): if self._deferred_filter: negate, args, kwargs = self._deferred_filter self._filter_or_exclude_inplace(negate, args, kwargs) self._deferred_filter = None return self._query @query.setter def query(self, value): if value.values_select: self._iterable_class = ValuesIterable self._query = value def as_manager(cls): # Address the circular dependency between `Queryset` and `Manager`. from django.db.models.manager import Manager manager = Manager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) ######################## # PYTHON MAGIC METHODS # ######################## def __deepcopy__(self, memo): """Don't populate the QuerySet's cache.""" obj = self.__class__() for k, v in self.__dict__.items(): if k == "_result_cache": obj.__dict__[k] = None else: obj.__dict__[k] = copy.deepcopy(v, memo) return obj def __getstate__(self): # Force the cache to be fully populated. self._fetch_all() return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__} def __setstate__(self, state): pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: if pickled_version != django.__version__: warnings.warn( "Pickled queryset instance's Django version %s does not " "match the current version %s." % (pickled_version, django.__version__), RuntimeWarning, stacklevel=2, ) else: warnings.warn( "Pickled queryset instance's Django version is not specified.", RuntimeWarning, stacklevel=2, ) self.__dict__.update(state) def __repr__(self): data = list(self[: REPR_OUTPUT_SIZE + 1]) if len(data) > REPR_OUTPUT_SIZE: data[-1] = "...(remaining elements truncated)..." return "<%s %r>" % (self.__class__.__name__, data) def __len__(self): self._fetch_all() return len(self._result_cache) def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler.execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql.compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache) def __aiter__(self): # Remember, __aiter__ itself is synchronous, it's the thing it returns # that is async! async def generator(): await sync_to_async(self._fetch_all)() for item in self._result_cache: yield item return generator() def __bool__(self): self._fetch_all() return bool(self._result_cache) def __getitem__(self, k): """Retrieve an item or slice from the set of results.""" if not isinstance(k, (int, slice)): raise TypeError( "QuerySet indices must be integers or slices, not %s." % type(k).__name__ ) if (isinstance(k, int) and k < 0) or ( isinstance(k, slice) and ( (k.start is not None and k.start < 0) or (k.stop is not None and k.stop < 0) ) ): raise ValueError("Negative indexing is not supported.") if self._result_cache is not None: return self._result_cache[k] if isinstance(k, slice): qs = self._chain() if k.start is not None: start = int(k.start) else: start = None if k.stop is not None: stop = int(k.stop) else: stop = None qs.query.set_limits(start, stop) return list(qs)[:: k.step] if k.step else qs qs = self._chain() qs.query.set_limits(k, k + 1) qs._fetch_all() return qs._result_cache[0] def __class_getitem__(cls, *args, **kwargs): return cls def __and__(self, other): self._check_operator_queryset(other, "&") self._merge_sanity_check(other) if isinstance(other, EmptyQuerySet): return other if isinstance(self, EmptyQuerySet): return self combined = self._chain() combined._merge_known_related_objects(other) combined.query.combine(other.query, sql.AND) return combined def __or__(self, other): self._check_operator_queryset(other, "|") self._merge_sanity_check(other) if isinstance(self, EmptyQuerySet): return other if isinstance(other, EmptyQuerySet): return self query = ( self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values("pk")) ) combined = query._chain() combined._merge_known_related_objects(other) if not other.query.can_filter(): other = other.model._base_manager.filter(pk__in=other.values("pk")) combined.query.combine(other.query, sql.OR) return combined def __xor__(self, other): self._check_operator_queryset(other, "^") self._merge_sanity_check(other) if isinstance(self, EmptyQuerySet): return other if isinstance(other, EmptyQuerySet): return self query = ( self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values("pk")) ) combined = query._chain() combined._merge_known_related_objects(other) if not other.query.can_filter(): other = other.model._base_manager.filter(pk__in=other.values("pk")) combined.query.combine(other.query, sql.XOR) return combined #################################### # METHODS THAT DO DATABASE QUERIES # #################################### def _iterator(self, use_chunked_fetch, chunk_size): iterable = self._iterable_class( self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size or 2000, ) if not self._prefetch_related_lookups or chunk_size is None: yield from iterable return iterator = iter(iterable) while results := list(islice(iterator, chunk_size)): prefetch_related_objects(results, *self._prefetch_related_lookups) yield from results def iterator(self, chunk_size=None): """ An iterator over the results from applying this QuerySet to the database. chunk_size must be provided for QuerySets that prefetch related objects. Otherwise, a default chunk_size of 2000 is supplied. """ if chunk_size is None: if self._prefetch_related_lookups: # When the deprecation ends, replace with: # raise ValueError( # 'chunk_size must be provided when using ' # 'QuerySet.iterator() after prefetch_related().' # ) warnings.warn( "Using QuerySet.iterator() after prefetch_related() " "without specifying chunk_size is deprecated.", category=RemovedInDjango50Warning, stacklevel=2, ) elif chunk_size <= 0: raise ValueError("Chunk size must be strictly positive.") use_chunked_fetch = not connections[self.db].settings_dict.get( "DISABLE_SERVER_SIDE_CURSORS" ) return self._iterator(use_chunked_fetch, chunk_size) async def aiterator(self, chunk_size=2000): """ An asynchronous iterator over the results from applying this QuerySet to the database. """ if self._prefetch_related_lookups: raise NotSupportedError( "Using QuerySet.aiterator() after prefetch_related() is not supported." ) if chunk_size <= 0: raise ValueError("Chunk size must be strictly positive.") use_chunked_fetch = not connections[self.db].settings_dict.get( "DISABLE_SERVER_SIDE_CURSORS" ) async for item in self._iterable_class( self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size ): yield item def aggregate(self, *args, **kwargs): """ Return a dictionary containing the calculations (aggregation) over the current queryset. If args is present the expression is passed as a kwarg using the Aggregate object's default alias. """ if self.query.distinct_fields: raise NotImplementedError("aggregate() + distinct(fields) not implemented.") self._validate_values_are_expressions( (*args, *kwargs.values()), method_name="aggregate" ) for arg in args: # The default_alias property raises TypeError if default_alias # can't be set automatically or AttributeError if it isn't an # attribute. try: arg.default_alias except (AttributeError, TypeError): raise TypeError("Complex aggregates require an alias") kwargs[arg.default_alias] = arg query = self.query.chain() for (alias, aggregate_expr) in kwargs.items(): query.add_annotation(aggregate_expr, alias, is_summary=True) annotation = query.annotations[alias] if not annotation.contains_aggregate: raise TypeError("%s is not an aggregate expression" % alias) for expr in annotation.get_source_expressions(): if ( expr.contains_aggregate and isinstance(expr, Ref) and expr.refs in kwargs ): name = expr.refs raise exceptions.FieldError( "Cannot compute %s('%s'): '%s' is an aggregate" % (annotation.name, name, name) ) return query.get_aggregation(self.db, kwargs) async def aaggregate(self, *args, **kwargs): return await sync_to_async(self.aggregate)(*args, **kwargs) def count(self): """ Perform a SELECT COUNT() and return the number of records as an integer. If the QuerySet is already fully cached, return the length of the cached results set to avoid multiple SELECT COUNT(*) calls. """ if self._result_cache is not None: return len(self._result_cache) return self.query.get_count(using=self.db) async def acount(self): return await sync_to_async(self.count)() def get(self, *args, **kwargs): """ Perform the query and return a single object matching the given keyword arguments. """ if self.query.combinator and (args or kwargs): raise NotSupportedError( "Calling QuerySet.get(...) with filters after %s() is not " "supported." % self.query.combinator ) clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs) if self.query.can_filter() and not self.query.distinct_fields: clone = clone.order_by() limit = None if ( not clone.query.select_for_update or connections[clone.db].features.supports_select_for_update_with_limit ): limit = MAX_GET_RESULTS clone.query.set_limits(high=limit) num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise self.model.DoesNotExist( "%s matching query does not exist." % self.model._meta.object_name ) raise self.model.MultipleObjectsReturned( "get() returned more than one %s -- it returned %s!" % ( self.model._meta.object_name, num if not limit or num < limit else "more than %s" % (limit - 1), ) ) async def aget(self, *args, **kwargs): return await sync_to_async(self.get)(*args, **kwargs) def create(self, **kwargs): """ Create a new object with the given kwargs, saving it to the database and returning the created object. """ obj = self.model(**kwargs) self._for_write = True obj.save(force_insert=True, using=self.db) return obj async def acreate(self, **kwargs): return await sync_to_async(self.create)(**kwargs) def _prepare_for_bulk_create(self, objs): for obj in objs: if obj.pk is None: # Populate new PK values. obj.pk = obj._meta.pk.get_pk_value_on_save(obj) obj._prepare_related_fields_for_save(operation_name="bulk_create") def _check_bulk_create_options( self, ignore_conflicts, update_conflicts, update_fields, unique_fields ): if ignore_conflicts and update_conflicts: raise ValueError( "ignore_conflicts and update_conflicts are mutually exclusive." ) db_features = connections[self.db].features if ignore_conflicts: if not db_features.supports_ignore_conflicts: raise NotSupportedError( "This database backend does not support ignoring conflicts." ) return OnConflict.IGNORE elif update_conflicts: if not db_features.supports_update_conflicts: raise NotSupportedError( "This database backend does not support updating conflicts." ) if not update_fields: raise ValueError( "Fields that will be updated when a row insertion fails " "on conflicts must be provided." ) if unique_fields and not db_features.supports_update_conflicts_with_target: raise NotSupportedError( "This database backend does not support updating " "conflicts with specifying unique fields that can trigger " "the upsert." ) if not unique_fields and db_features.supports_update_conflicts_with_target: raise ValueError( "Unique fields that can trigger the upsert must be provided." ) # Updating primary keys and non-concrete fields is forbidden. update_fields = [self.model._meta.get_field(name) for name in update_fields] if any(not f.concrete or f.many_to_many for f in update_fields): raise ValueError( "bulk_create() can only be used with concrete fields in " "update_fields." ) if any(f.primary_key for f in update_fields): raise ValueError( "bulk_create() cannot be used with primary keys in " "update_fields." ) if unique_fields: # Primary key is allowed in unique_fields. unique_fields = [ self.model._meta.get_field(name) for name in unique_fields if name != "pk" ] if any(not f.concrete or f.many_to_many for f in unique_fields): raise ValueError( "bulk_create() can only be used with concrete fields " "in unique_fields." ) return OnConflict.UPDATE return None def bulk_create( self, objs, batch_size=None, ignore_conflicts=False, update_conflicts=False, update_fields=None, unique_fields=None, ): """ Insert each of the instances into the database. Do *not* call save() on each of the instances, do not send any pre/post_save signals, and do not set the primary key attribute if it is an autoincrement field (except if features.can_return_rows_from_bulk_insert=True). Multi-table models are not supported. """ # When you bulk insert you don't get the primary keys back (if it's an # autoincrement, except if can_return_rows_from_bulk_insert=True), so # you can't insert into the child tables which references this. There # are two workarounds: # 1) This could be implemented if you didn't have an autoincrement pk # 2) You could do it by doing O(n) normal inserts into the parent # tables to get the primary keys back and then doing a single bulk # insert into the childmost table. # We currently set the primary keys on the objects when using # PostgreSQL via the RETURNING ID clause. It should be possible for # Oracle as well, but the semantics for extracting the primary keys is # trickier so it's not done yet. if batch_size is not None and batch_size <= 0: raise ValueError("Batch size must be a positive integer.") # Check that the parents share the same concrete model with the our # model to detect the inheritance pattern ConcreteGrandParent -> # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy # would not identify that case as involving multiple tables. for parent in self.model._meta.get_parent_list(): if parent._meta.concrete_model is not self.model._meta.concrete_model: raise ValueError("Can't bulk create a multi-table inherited model") if not objs: return objs on_conflict = self._check_bulk_create_options( ignore_conflicts, update_conflicts, update_fields, unique_fields, ) self._for_write = True opts = self.model._meta fields = opts.concrete_fields objs = list(objs) self._prepare_for_bulk_create(objs) with transaction.atomic(using=self.db, savepoint=False): objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) if objs_with_pk: returned_columns = self._batched_insert( objs_with_pk, fields, batch_size, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) for obj_with_pk, results in zip(objs_with_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): if field != opts.pk: setattr(obj_with_pk, field.attname, result) for obj_with_pk in objs_with_pk: obj_with_pk._state.adding = False obj_with_pk._state.db = self.db if objs_without_pk: fields = [f for f in fields if not isinstance(f, AutoField)] returned_columns = self._batched_insert( objs_without_pk, fields, batch_size, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) connection = connections[self.db] if ( connection.features.can_return_rows_from_bulk_insert and on_conflict is None ): assert len(returned_columns) == len(objs_without_pk) for obj_without_pk, results in zip(objs_without_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): setattr(obj_without_pk, field.attname, result) obj_without_pk._state.adding = False obj_without_pk._state.db = self.db return objs async def abulk_create( self, objs, batch_size=None, ignore_conflicts=False, update_conflicts=False, update_fields=None, unique_fields=None, ): return await sync_to_async(self.bulk_create)( objs=objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts, update_conflicts=update_conflicts, update_fields=update_fields, unique_fields=unique_fields, ) def bulk_update(self, objs, fields, batch_size=None): """ Update the given fields in each of the given objects in the database. """ if batch_size is not None and batch_size <= 0: raise ValueError("Batch size must be a positive integer.") if not fields: raise ValueError("Field names must be given to bulk_update().") objs = tuple(objs) if any(obj.pk is None for obj in objs): raise ValueError("All bulk_update() objects must have a primary key set.") fields = [self.model._meta.get_field(name) for name in fields] if any(not f.concrete or f.many_to_many for f in fields): raise ValueError("bulk_update() can only be used with concrete fields.") if any(f.primary_key for f in fields): raise ValueError("bulk_update() cannot be used with primary key fields.") if not objs: return 0 for obj in objs: obj._prepare_related_fields_for_save( operation_name="bulk_update", fields=fields ) # PK is used twice in the resulting update query, once in the filter # and once in the WHEN. Each field will also have one CAST. self._for_write = True connection = connections[self.db] max_batch_size = connection.ops.bulk_batch_size(["pk", "pk"] + fields, objs) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size requires_casting = connection.features.requires_casted_case_in_updates batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size)) updates = [] for batch_objs in batches: update_kwargs = {} for field in fields: when_statements = [] for obj in batch_objs: attr = getattr(obj, field.attname) if not hasattr(attr, "resolve_expression"): attr = Value(attr, output_field=field) when_statements.append(When(pk=obj.pk, then=attr)) case_statement = Case(*when_statements, output_field=field) if requires_casting: case_statement = Cast(case_statement, output_field=field) update_kwargs[field.attname] = case_statement updates.append(([obj.pk for obj in batch_objs], update_kwargs)) rows_updated = 0 queryset = self.using(self.db) with transaction.atomic(using=self.db, savepoint=False): for pks, update_kwargs in updates: rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs) return rows_updated bulk_update.alters_data = True async def abulk_update(self, objs, fields, batch_size=None): return await sync_to_async(self.bulk_update)( objs=objs, fields=fields, batch_size=batch_size, ) abulk_update.alters_data = True def get_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, creating one if necessary. Return a tuple of (object, created), where created is a boolean specifying whether an object was created. """ # The get() needs to be targeted at the write database in order # to avoid potential transaction consistency problems. self._for_write = True try: return self.get(**kwargs), False except self.model.DoesNotExist: params = self._extract_model_params(defaults, **kwargs) # Try to create an object using passed params. try: with transaction.atomic(using=self.db): params = dict(resolve_callables(params)) return self.create(**params), True except IntegrityError: try: return self.get(**kwargs), False except self.model.DoesNotExist: pass raise async def aget_or_create(self, defaults=None, **kwargs): return await sync_to_async(self.get_or_create)( defaults=defaults, **kwargs, ) def update_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, updating one with defaults if it exists, otherwise create a new one. Return a tuple (object, created), where created is a boolean specifying whether an object was created. """ defaults = defaults or {} self._for_write = True with transaction.atomic(using=self.db): # Lock the row so that a concurrent update is blocked until # update_or_create() has performed its save. obj, created = self.select_for_update().get_or_create(defaults, **kwargs) if created: return obj, created for k, v in resolve_callables(defaults): setattr(obj, k, v) update_fields = set(defaults) concrete_field_names = self.model._meta._non_pk_concrete_field_names # update_fields does not support non-concrete fields. if concrete_field_names.issuperset(update_fields): # Add fields which are set on pre_save(), e.g. auto_now fields. # This is to maintain backward compatibility as these fields # are not updated unless explicitly specified in the # update_fields list. for field in self.model._meta.local_concrete_fields: if not ( field.primary_key or field.__class__.pre_save is Field.pre_save ): update_fields.add(field.name) if field.name != field.attname: update_fields.add(field.attname) obj.save(using=self.db, update_fields=update_fields) else: obj.save(using=self.db) return obj, False async def aupdate_or_create(self, defaults=None, **kwargs): return await sync_to_async(self.update_or_create)( defaults=defaults, **kwargs, ) def _extract_model_params(self, defaults, **kwargs): """ Prepare `params` for creating a model instance based on the given kwargs; for use by get_or_create(). """ defaults = defaults or {} params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k} params.update(defaults) property_names = self.model._meta._property_names invalid_params = [] for param in params: try: self.model._meta.get_field(param) except exceptions.FieldDoesNotExist: # It's okay to use a model's property if it has a setter. if not (param in property_names and getattr(self.model, param).fset): invalid_params.append(param) if invalid_params: raise exceptions.FieldError( "Invalid field name(s) for model %s: '%s'." % ( self.model._meta.object_name, "', '".join(sorted(invalid_params)), ) ) return params def _earliest(self, *fields): """ Return the earliest object according to fields (if given) or by the model's Meta.get_latest_by. """ if fields: order_by = fields else: order_by = getattr(self.model._meta, "get_latest_by") if order_by and not isinstance(order_by, (tuple, list)): order_by = (order_by,) if order_by is None: raise ValueError( "earliest() and latest() require either fields as positional " "arguments or 'get_latest_by' in the model's Meta." ) obj = self._chain() obj.query.set_limits(high=1) obj.query.clear_ordering(force=True) obj.query.add_ordering(*order_by) return obj.get() def earliest(self, *fields): if self.query.is_sliced: raise TypeError("Cannot change a query once a slice has been taken.") return self._earliest(*fields) async def aearliest(self, *fields): return await sync_to_async(self.earliest)(*fields) def latest(self, *fields): """ Return the latest object according to fields (if given) or by the model's Meta.get_latest_by. """ if self.query.is_sliced: raise TypeError("Cannot change a query once a slice has been taken.") return self.reverse()._earliest(*fields) async def alatest(self, *fields): return await sync_to_async(self.latest)(*fields) def first(self): """Return the first object of a query or None if no match is found.""" if self.ordered: queryset = self else: self._check_ordering_first_last_queryset_aggregation(method="first") queryset = self.order_by("pk") for obj in queryset[:1]: return obj async def afirst(self): return await sync_to_async(self.first)() def last(self): """Return the last object of a query or None if no match is found.""" if self.ordered: queryset = self.reverse() else: self._check_ordering_first_last_queryset_aggregation(method="last") queryset = self.order_by("-pk") for obj in queryset[:1]: return obj async def alast(self): return await sync_to_async(self.last)() def in_bulk(self, id_list=None, *, field_name="pk"): """ Return a dictionary mapping each of the given IDs to the object with that ID. If `id_list` isn't provided, evaluate the entire QuerySet. """ if self.query.is_sliced: raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().") opts = self.model._meta unique_fields = [ constraint.fields[0] for constraint in opts.total_unique_constraints if len(constraint.fields) == 1 ] if ( field_name != "pk" and not opts.get_field(field_name).unique and field_name not in unique_fields and self.query.distinct_fields != (field_name,) ): raise ValueError( "in_bulk()'s field_name must be a unique field but %r isn't." % field_name ) if id_list is not None: if not id_list: return {} filter_key = "{}__in".format(field_name) batch_size = connections[self.db].features.max_query_params id_list = tuple(id_list) # If the database has a limit on the number of query parameters # (e.g. SQLite), retrieve objects in batches if necessary. if batch_size and batch_size < len(id_list): qs = () for offset in range(0, len(id_list), batch_size): batch = id_list[offset : offset + batch_size] qs += tuple(self.filter(**{filter_key: batch}).order_by()) else: qs = self.filter(**{filter_key: id_list}).order_by() else: qs = self._chain() return {getattr(obj, field_name): obj for obj in qs} async def ain_bulk(self, id_list=None, *, field_name="pk"): return await sync_to_async(self.in_bulk)( id_list=id_list, field_name=field_name, ) def delete(self): """Delete the records in the current QuerySet.""" self._not_support_combined_queries("delete") if self.query.is_sliced: raise TypeError("Cannot use 'limit' or 'offset' with delete().") if self.query.distinct or self.query.distinct_fields: raise TypeError("Cannot call delete() after .distinct().") if self._fields is not None: raise TypeError("Cannot call delete() after .values() or .values_list()") del_query = self._chain() # The delete is actually 2 queries - one to find related objects, # and one to delete. Make sure that the discovery of related # objects is performed on the same database as the deletion. del_query._for_write = True # Disable non-supported fields. del_query.query.select_for_update = False del_query.query.select_related = False del_query.query.clear_ordering(force=True) collector = Collector(using=del_query.db, origin=self) collector.collect(del_query) deleted, _rows_count = collector.delete() # Clear the result cache, in case this QuerySet gets reused. self._result_cache = None return deleted, _rows_count delete.alters_data = True delete.queryset_only = True async def adelete(self): return await sync_to_async(self.delete)() adelete.alters_data = True adelete.queryset_only = True def _raw_delete(self, using): """ Delete objects found from the given queryset in single direct SQL query. No signals are sent and there is no protection for cascades. """ query = self.query.clone() query.__class__ = sql.DeleteQuery cursor = query.get_compiler(using).execute_sql(CURSOR) if cursor: with cursor: return cursor.rowcount return 0 _raw_delete.alters_data = True def update(self, **kwargs): """ Update all elements in the current QuerySet, setting all the given fields to the appropriate values. """ self._not_support_combined_queries("update") if self.query.is_sliced: raise TypeError("Cannot update a query once a slice has been taken.") self._for_write = True query = self.query.chain(sql.UpdateQuery) query.add_update_values(kwargs) # Inline annotations in order_by(), if possible. new_order_by = [] for col in query.order_by: if annotation := query.annotations.get(col): if getattr(annotation, "contains_aggregate", False): raise exceptions.FieldError( f"Cannot update when ordering by an aggregate: {annotation}" ) new_order_by.append(annotation) else: new_order_by.append(col) query.order_by = tuple(new_order_by) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} with transaction.mark_for_rollback_on_error(using=self.db): rows = query.get_compiler(self.db).execute_sql(CURSOR) self._result_cache = None return rows update.alters_data = True async def aupdate(self, **kwargs): return await sync_to_async(self.update)(**kwargs) aupdate.alters_data = True def _update(self, values): """ A version of update() that accepts field objects instead of field names. Used primarily for model saving and not intended for use by general code (it requires too much poking around at model internals to be useful at that level). """ if self.query.is_sliced: raise TypeError("Cannot update a query once a slice has been taken.") query = self.query.chain(sql.UpdateQuery) query.add_update_fields(values) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} self._result_cache = None return query.get_compiler(self.db).execute_sql(CURSOR) _update.alters_data = True _update.queryset_only = False def exists(self): """ Return True if the QuerySet would have any results, False otherwise. """ if self._result_cache is None: return self.query.has_results(using=self.db) return bool(self._result_cache) async def aexists(self): return await sync_to_async(self.exists)() def contains(self, obj): """ Return True if the QuerySet contains the provided obj, False otherwise. """ self._not_support_combined_queries("contains") if self._fields is not None: raise TypeError( "Cannot call QuerySet.contains() after .values() or .values_list()." ) try: if obj._meta.concrete_model != self.model._meta.concrete_model: return False except AttributeError: raise TypeError("'obj' must be a model instance.") if obj.pk is None: raise ValueError("QuerySet.contains() cannot be used on unsaved objects.") if self._result_cache is not None: return obj in self._result_cache return self.filter(pk=obj.pk).exists() async def acontains(self, obj): return await sync_to_async(self.contains)(obj=obj) def _prefetch_related_objects(self): # This method can only be called once the result cache has been filled. prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def explain(self, *, format=None, **options): """ Runs an EXPLAIN on the SQL query this QuerySet would perform, and returns the results. """ return self.query.explain(using=self.db, format=format, **options) async def aexplain(self, *, format=None, **options): return await sync_to_async(self.explain)(format=format, **options) ################################################## # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # ################################################## def raw(self, raw_query, params=(), translations=None, using=None): if using is None: using = self.db qs = RawQuerySet( raw_query, model=self.model, params=params, translations=translations, using=using, ) qs._prefetch_related_lookups = self._prefetch_related_lookups[:] return qs def _values(self, *fields, **expressions): clone = self._chain() if expressions: clone = clone.annotate(**expressions) clone._fields = fields clone.query.set_values(fields) return clone def values(self, *fields, **expressions): fields += tuple(expressions) clone = self._values(*fields, **expressions) clone._iterable_class = ValuesIterable return clone def values_list(self, *fields, flat=False, named=False): if flat and named: raise TypeError("'flat' and 'named' can't be used together.") if flat and len(fields) > 1: raise TypeError( "'flat' is not valid when values_list is called with more than one " "field." ) field_names = {f for f in fields if not hasattr(f, "resolve_expression")} _fields = [] expressions = {} counter = 1 for field in fields: if hasattr(field, "resolve_expression"): field_id_prefix = getattr( field, "default_alias", field.__class__.__name__.lower() ) while True: field_id = field_id_prefix + str(counter) counter += 1 if field_id not in field_names: break expressions[field_id] = field _fields.append(field_id) else: _fields.append(field) clone = self._values(*_fields, **expressions) clone._iterable_class = ( NamedValuesListIterable if named else FlatValuesListIterable if flat else ValuesListIterable ) return clone def dates(self, field_name, kind, order="ASC"): """ Return a list of date objects representing all available dates for the given field_name, scoped to 'kind'. """ if kind not in ("year", "month", "week", "day"): raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.") if order not in ("ASC", "DESC"): raise ValueError("'order' must be either 'ASC' or 'DESC'.") return ( self.annotate( datefield=Trunc(field_name, kind, output_field=DateField()), plain_field=F(field_name), ) .values_list("datefield", flat=True) .distinct() .filter(plain_field__isnull=False) .order_by(("-" if order == "DESC" else "") + "datefield") ) # RemovedInDjango50Warning: when the deprecation ends, remove is_dst # argument. def datetimes( self, field_name, kind, order="ASC", tzinfo=None, is_dst=timezone.NOT_PASSED ): """ Return a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'. """ if kind not in ("year", "month", "week", "day", "hour", "minute", "second"): raise ValueError( "'kind' must be one of 'year', 'month', 'week', 'day', " "'hour', 'minute', or 'second'." ) if order not in ("ASC", "DESC"): raise ValueError("'order' must be either 'ASC' or 'DESC'.") if settings.USE_TZ: if tzinfo is None: tzinfo = timezone.get_current_timezone() else: tzinfo = None return ( self.annotate( datetimefield=Trunc( field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo, is_dst=is_dst, ), plain_field=F(field_name), ) .values_list("datetimefield", flat=True) .distinct() .filter(plain_field__isnull=False) .order_by(("-" if order == "DESC" else "") + "datetimefield") ) def none(self): """Return an empty QuerySet.""" clone = self._chain() clone.query.set_empty() return clone ################################################################## # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # ################################################################## def all(self): """ Return a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases. """ return self._chain() def filter(self, *args, **kwargs): """ Return a new QuerySet instance with the args ANDed to the existing set. """ self._not_support_combined_queries("filter") return self._filter_or_exclude(False, args, kwargs) def exclude(self, *args, **kwargs): """ Return a new QuerySet instance with NOT (args) ANDed to the existing set. """ self._not_support_combined_queries("exclude") return self._filter_or_exclude(True, args, kwargs) def _filter_or_exclude(self, negate, args, kwargs): if (args or kwargs) and self.query.is_sliced: raise TypeError("Cannot filter a query once a slice has been taken.") clone = self._chain() if self._defer_next_filter: self._defer_next_filter = False clone._deferred_filter = negate, args, kwargs else: clone._filter_or_exclude_inplace(negate, args, kwargs) return clone def _filter_or_exclude_inplace(self, negate, args, kwargs): if negate: self._query.add_q(~Q(*args, **kwargs)) else: self._query.add_q(Q(*args, **kwargs)) def complex_filter(self, filter_obj): """ Return a new QuerySet instance with filter_obj added to the filters. filter_obj can be a Q object or a dictionary of keyword lookup arguments. This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods. """ if isinstance(filter_obj, Q): clone = self._chain() clone.query.add_q(filter_obj) return clone else: return self._filter_or_exclude(False, args=(), kwargs=filter_obj) def _combinator_query(self, combinator, *other_qs, all=False): # Clone the query to inherit the select list and everything clone = self._chain() # Clear limits and ordering so they can be reapplied clone.query.clear_ordering(force=True) clone.query.clear_limits() clone.query.combined_queries = (self.query,) + tuple( qs.query for qs in other_qs ) clone.query.combinator = combinator clone.query.combinator_all = all return clone def union(self, *other_qs, all=False): # If the query is an EmptyQuerySet, combine all nonempty querysets. if isinstance(self, EmptyQuerySet): qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)] if not qs: return self if len(qs) == 1: return qs[0] return qs[0]._combinator_query("union", *qs[1:], all=all) return self._combinator_query("union", *other_qs, all=all) def intersection(self, *other_qs): # If any query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self for other in other_qs: if isinstance(other, EmptyQuerySet): return other return self._combinator_query("intersection", *other_qs) def difference(self, *other_qs): # If the query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self return self._combinator_query("difference", *other_qs) def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False): """ Return a new QuerySet instance that will select objects with a FOR UPDATE lock. """ if nowait and skip_locked: raise ValueError("The nowait option cannot be used with skip_locked.") obj = self._chain() obj._for_write = True obj.query.select_for_update = True obj.query.select_for_update_nowait = nowait obj.query.select_for_update_skip_locked = skip_locked obj.query.select_for_update_of = of obj.query.select_for_no_key_update = no_key return obj def select_related(self, *fields): """ Return a new QuerySet instance that will select related objects. If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection. If select_related(None) is called, clear the list. """ self._not_support_combined_queries("select_related") if self._fields is not None: raise TypeError( "Cannot call select_related() after .values() or .values_list()" ) obj = self._chain() if fields == (None,): obj.query.select_related = False elif fields: obj.query.add_select_related(fields) else: obj.query.select_related = True return obj def prefetch_related(self, *lookups): """ Return a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated. When prefetch_related() is called more than once, append to the list of prefetch lookups. If prefetch_related(None) is called, clear the list. """ self._not_support_combined_queries("prefetch_related") clone = self._chain() if lookups == (None,): clone._prefetch_related_lookups = () else: for lookup in lookups: if isinstance(lookup, Prefetch): lookup = lookup.prefetch_to lookup = lookup.split(LOOKUP_SEP, 1)[0] if lookup in self.query._filtered_relations: raise ValueError( "prefetch_related() is not supported with FilteredRelation." ) clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def annotate(self, *args, **kwargs): """ Return a query set in which the returned objects have been annotated with extra data or aggregations. """ self._not_support_combined_queries("annotate") return self._annotate(args, kwargs, select=True) def alias(self, *args, **kwargs): """ Return a query set with added aliases for extra data or aggregations. """ self._not_support_combined_queries("alias") return self._annotate(args, kwargs, select=False) def _annotate(self, args, kwargs, select=True): self._validate_values_are_expressions( args + tuple(kwargs.values()), method_name="annotate" ) annotations = {} for arg in args: # The default_alias property may raise a TypeError. try: if arg.default_alias in kwargs: raise ValueError( "The named annotation '%s' conflicts with the " "default name for another annotation." % arg.default_alias ) except TypeError: raise TypeError("Complex annotations require an alias") annotations[arg.default_alias] = arg annotations.update(kwargs) clone = self._chain() names = self._fields if names is None: names = set( chain.from_iterable( (field.name, field.attname) if hasattr(field, "attname") else (field.name,) for field in self.model._meta.get_fields() ) ) for alias, annotation in annotations.items(): if alias in names: raise ValueError( "The annotation '%s' conflicts with a field on " "the model." % alias ) if isinstance(annotation, FilteredRelation): clone.query.add_filtered_relation(annotation, alias) else: clone.query.add_annotation( annotation, alias, is_summary=False, select=select, ) for alias, annotation in clone.query.annotations.items(): if alias in annotations and annotation.contains_aggregate: if clone._fields is None: clone.query.group_by = True else: clone.query.set_group_by() break return clone def order_by(self, *field_names): """Return a new QuerySet instance with the ordering changed.""" if self.query.is_sliced: raise TypeError("Cannot reorder a query once a slice has been taken.") obj = self._chain() obj.query.clear_ordering(force=True, clear_default=False) obj.query.add_ordering(*field_names) return obj def distinct(self, *field_names): """ Return a new QuerySet instance that will select only distinct results. """ self._not_support_combined_queries("distinct") if self.query.is_sliced: raise TypeError( "Cannot create distinct fields once a slice has been taken." ) obj = self._chain() obj.query.add_distinct_fields(*field_names) return obj def extra( self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None, ): """Add extra SQL fragments to the query.""" self._not_support_combined_queries("extra") if self.query.is_sliced: raise TypeError("Cannot change a query once a slice has been taken.") clone = self._chain() clone.query.add_extra(select, select_params, where, params, tables, order_by) return clone def reverse(self): """Reverse the ordering of the QuerySet.""" if self.query.is_sliced: raise TypeError("Cannot reverse a query once a slice has been taken.") clone = self._chain() clone.query.standard_ordering = not clone.query.standard_ordering return clone def defer(self, *fields): """ Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case removal all deferrals. """ self._not_support_combined_queries("defer") if self._fields is not None: raise TypeError("Cannot call defer() after .values() or .values_list()") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone def only(self, *fields): """ Essentially, the opposite of defer(). Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated. """ self._not_support_combined_queries("only") if self._fields is not None: raise TypeError("Cannot call only() after .values() or .values_list()") if fields == (None,): # Can only pass None to defer(), not only(), as the rest option. # That won't stop people trying to do this, so let's be explicit. raise TypeError("Cannot pass None as an argument to only().") for field in fields: field = field.split(LOOKUP_SEP, 1)[0] if field in self.query._filtered_relations: raise ValueError("only() is not supported with FilteredRelation.") clone = self._chain() clone.query.add_immediate_loading(fields) return clone def using(self, alias): """Select which database this QuerySet should execute against.""" clone = self._chain() clone._db = alias return clone ################################### # PUBLIC INTROSPECTION ATTRIBUTES # ################################### @property def ordered(self): """ Return True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model (or is empty). """ if isinstance(self, EmptyQuerySet): return True if self.query.extra_order_by or self.query.order_by: return True elif ( self.query.default_ordering and self.query.get_meta().ordering and # A default ordering doesn't affect GROUP BY queries. not self.query.group_by ): return True else: return False @property def db(self): """Return the database used if this query is executed now.""" if self._for_write: return self._db or router.db_for_write(self.model, **self._hints) return self._db or router.db_for_read(self.model, **self._hints) ################### # PRIVATE METHODS # ################### def _insert( self, objs, fields, returning_fields=None, raw=False, using=None, on_conflict=None, update_fields=None, unique_fields=None, ): """ Insert a new record for the given model. This provides an interface to the InsertQuery class and is how Model.save() is implemented. """ self._for_write = True if using is None: using = self.db query = sql.InsertQuery( self.model, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) query.insert_values(fields, objs, raw=raw) return query.get_compiler(using=using).execute_sql(returning_fields) _insert.alters_data = True _insert.queryset_only = False def _batched_insert( self, objs, fields, batch_size, on_conflict=None, update_fields=None, unique_fields=None, ): """ Helper method for bulk_create() to insert objs one batch at a time. """ connection = connections[self.db] ops = connection.ops max_batch_size = max(ops.bulk_batch_size(fields, objs), 1) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size inserted_rows = [] bulk_return = connection.features.can_return_rows_from_bulk_insert for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]: if bulk_return and on_conflict is None: inserted_rows.extend( self._insert( item, fields=fields, using=self.db, returning_fields=self.model._meta.db_returning_fields, ) ) else: self._insert( item, fields=fields, using=self.db, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) return inserted_rows def _chain(self): """ Return a copy of the current QuerySet that's ready for another operation. """ obj = self._clone() if obj._sticky_filter: obj.query.filter_is_sticky = True obj._sticky_filter = False return obj def _clone(self): """ Return a copy of the current QuerySet. A lightweight alternative to deepcopy(). """ c = self.__class__( model=self.model, query=self.query.chain(), using=self._db, hints=self._hints, ) c._sticky_filter = self._sticky_filter c._for_write = self._for_write c._prefetch_related_lookups = self._prefetch_related_lookups[:] c._known_related_objects = self._known_related_objects c._iterable_class = self._iterable_class c._fields = self._fields return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self._iterable_class(self)) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def _next_is_sticky(self): """ Indicate that the next filter call and the one following that should be treated as a single filter. This is only important when it comes to determining when to reuse tables for many-to-many filters. Required so that we can filter naturally on the results of related managers. This doesn't return a clone of the current QuerySet (it returns "self"). The method is only used internally and should be immediately followed by a filter() that does create a clone. """ self._sticky_filter = True return self def _merge_sanity_check(self, other): """Check that two QuerySet classes may be merged.""" if self._fields is not None and ( set(self.query.values_select) != set(other.query.values_select) or set(self.query.extra_select) != set(other.query.extra_select) or set(self.query.annotation_select) != set(other.query.annotation_select) ): raise TypeError( "Merging '%s' classes must involve the same values in each case." % self.__class__.__name__ ) def _merge_known_related_objects(self, other): """ Keep track of all known related objects from either QuerySet instance. """ for field, objects in other._known_related_objects.items(): self._known_related_objects.setdefault(field, {}).update(objects) def resolve_expression(self, *args, **kwargs): if self._fields and len(self._fields) > 1: # values() queryset can only be used as nested queries # if they are set up to select only a single field. raise TypeError("Cannot use multi-field values as a filter value.") query = self.query.resolve_expression(*args, **kwargs) query._db = self._db return query resolve_expression.queryset_only = True def _add_hints(self, **hints): """ Update hinting information for use by routers. Add new key/values or overwrite existing key/values. """ self._hints.update(hints) def _has_filters(self): """ Check if this QuerySet has any filtering going on. This isn't equivalent with checking if all objects are present in results, for example, qs[1:]._has_filters() -> False. """ return self.query.has_filters() @staticmethod def _validate_values_are_expressions(values, method_name): invalid_args = sorted( str(arg) for arg in values if not hasattr(arg, "resolve_expression") ) if invalid_args: raise TypeError( "QuerySet.%s() received non-expression(s): %s." % ( method_name, ", ".join(invalid_args), ) ) def _not_support_combined_queries(self, operation_name): if self.query.combinator: raise NotSupportedError( "Calling QuerySet.%s() after %s() is not supported." % (operation_name, self.query.combinator) ) def _check_operator_queryset(self, other, operator_): if self.query.combinator or other.query.combinator: raise TypeError(f"Cannot use {operator_} operator with combined queryset.") def _check_ordering_first_last_queryset_aggregation(self, method): if isinstance(self.query.group_by, tuple) and not any( col.output_field is self.model._meta.pk for col in self.query.group_by ): raise TypeError( f"Cannot use QuerySet.{method}() on an unordered queryset performing " f"aggregation. Add an ordering with order_by()." ) class InstanceCheckMeta(type): def __instancecheck__(self, instance): return isinstance(instance, QuerySet) and instance.query.is_empty() class EmptyQuerySet(metaclass=InstanceCheckMeta): """ Marker class to checking if a queryset is empty by .none(): isinstance(qs.none(), EmptyQuerySet) -> True """ def __init__(self, *args, **kwargs): raise TypeError("EmptyQuerySet can't be instantiated") class RawQuerySet: """ Provide an iterator which converts the results of raw SQL queries into annotated model instances. """ def __init__( self, raw_query, model=None, query=None, params=(), translations=None, using=None, hints=None, ): self.raw_query = raw_query self.model = model self._db = using self._hints = hints or {} self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) self.params = params self.translations = translations or {} self._result_cache = None self._prefetch_related_lookups = () self._prefetch_done = False def resolve_model_init_order(self): """Resolve the init field names and value positions.""" converter = connections[self.db].introspection.identifier_converter model_init_fields = [ f for f in self.model._meta.fields if converter(f.column) in self.columns ] annotation_fields = [ (column, pos) for pos, column in enumerate(self.columns) if column not in self.model_fields ] model_init_order = [ self.columns.index(converter(f.column)) for f in model_init_fields ] model_init_names = [f.attname for f in model_init_fields] return model_init_names, model_init_order, annotation_fields def prefetch_related(self, *lookups): """Same as QuerySet.prefetch_related()""" clone = self._clone() if lookups == (None,): clone._prefetch_related_lookups = () else: clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def _prefetch_related_objects(self): prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def _clone(self): """Same as QuerySet._clone()""" c = self.__class__( self.raw_query, model=self.model, query=self.query, params=self.params, translations=self.translations, using=self._db, hints=self._hints, ) c._prefetch_related_lookups = self._prefetch_related_lookups[:] return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self.iterator()) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def __len__(self): self._fetch_all() return len(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __iter__(self): self._fetch_all() return iter(self._result_cache) def __aiter__(self): # Remember, __aiter__ itself is synchronous, it's the thing it returns # that is async! async def generator(): await sync_to_async(self._fetch_all)() for item in self._result_cache: yield item return generator() def iterator(self): yield from RawModelIterable(self) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.query) def __getitem__(self, k): return list(self)[k] @property def db(self): """Return the database used if this query is executed now.""" return self._db or router.db_for_read(self.model, **self._hints) def using(self, alias): """Select the database this RawQuerySet should execute against.""" return RawQuerySet( self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias, ) @cached_property def columns(self): """ A list of model field names in the order they'll appear in the query results. """ columns = self.query.get_columns() # Adjust any column names which don't match field names for (query_name, model_name) in self.translations.items(): # Ignore translations for nonexistent column names try: index = columns.index(query_name) except ValueError: pass else: columns[index] = model_name return columns @cached_property def model_fields(self): """A dict mapping column names to model field names.""" converter = connections[self.db].introspection.identifier_converter model_fields = {} for field in self.model._meta.fields: name, column = field.get_attname_column() model_fields[converter(column)] = field return model_fields class Prefetch: def __init__(self, lookup, queryset=None, to_attr=None): # `prefetch_through` is the path we traverse to perform the prefetch. self.prefetch_through = lookup # `prefetch_to` is the path to the attribute that stores the result. self.prefetch_to = lookup if queryset is not None and ( isinstance(queryset, RawQuerySet) or ( hasattr(queryset, "_iterable_class") and not issubclass(queryset._iterable_class, ModelIterable) ) ): raise ValueError( "Prefetch querysets cannot use raw(), values(), and values_list()." ) if to_attr: self.prefetch_to = LOOKUP_SEP.join( lookup.split(LOOKUP_SEP)[:-1] + [to_attr] ) self.queryset = queryset self.to_attr = to_attr def __getstate__(self): obj_dict = self.__dict__.copy() if self.queryset is not None: queryset = self.queryset._chain() # Prevent the QuerySet from being evaluated queryset._result_cache = [] queryset._prefetch_done = True obj_dict["queryset"] = queryset return obj_dict def add_prefix(self, prefix): self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to def get_current_prefetch_to(self, level): return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1]) def get_current_to_attr(self, level): parts = self.prefetch_to.split(LOOKUP_SEP) to_attr = parts[level] as_attr = self.to_attr and level == len(parts) - 1 return to_attr, as_attr def get_current_queryset(self, level): if self.get_current_prefetch_to(level) == self.prefetch_to: return self.queryset return None def __eq__(self, other): if not isinstance(other, Prefetch): return NotImplemented return self.prefetch_to == other.prefetch_to def __hash__(self): return hash((self.__class__, self.prefetch_to)) def normalize_prefetch_lookups(lookups, prefix=None): """Normalize lookups into Prefetch objects.""" ret = [] for lookup in lookups: if not isinstance(lookup, Prefetch): lookup = Prefetch(lookup) if prefix: lookup.add_prefix(prefix) ret.append(lookup) return ret def prefetch_related_objects(model_instances, *related_lookups): """ Populate prefetched object caches for a list of model instances based on the lookups/Prefetch instances given. """ if not model_instances: return # nothing to do # We need to be able to dynamically add to the list of prefetch_related # lookups that we look up (see below). So we need some book keeping to # ensure we don't do duplicate work. done_queries = {} # dictionary of things like 'foo__bar': [results] auto_lookups = set() # we add to this as we go through. followed_descriptors = set() # recursion protection all_lookups = normalize_prefetch_lookups(reversed(related_lookups)) while all_lookups: lookup = all_lookups.pop() if lookup.prefetch_to in done_queries: if lookup.queryset is not None: raise ValueError( "'%s' lookup was already seen with a different queryset. " "You may need to adjust the ordering of your lookups." % lookup.prefetch_to ) continue # Top level, the list of objects to decorate is the result cache # from the primary QuerySet. It won't be for deeper levels. obj_list = model_instances through_attrs = lookup.prefetch_through.split(LOOKUP_SEP) for level, through_attr in enumerate(through_attrs): # Prepare main instances if not obj_list: break prefetch_to = lookup.get_current_prefetch_to(level) if prefetch_to in done_queries: # Skip any prefetching, and any object preparation obj_list = done_queries[prefetch_to] continue # Prepare objects: good_objects = True for obj in obj_list: # Since prefetching can re-use instances, it is possible to have # the same instance multiple times in obj_list, so obj might # already be prepared. if not hasattr(obj, "_prefetched_objects_cache"): try: obj._prefetched_objects_cache = {} except (AttributeError, TypeError): # Must be an immutable object from # values_list(flat=True), for example (TypeError) or # a QuerySet subclass that isn't returning Model # instances (AttributeError), either in Django or a 3rd # party. prefetch_related() doesn't make sense, so quit. good_objects = False break if not good_objects: break # Descend down tree # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. first_obj = obj_list[0] to_attr = lookup.get_current_to_attr(level)[0] prefetcher, descriptor, attr_found, is_fetched = get_prefetcher( first_obj, through_attr, to_attr ) if not attr_found: raise AttributeError( "Cannot find '%s' on %s object, '%s' is an invalid " "parameter to prefetch_related()" % ( through_attr, first_obj.__class__.__name__, lookup.prefetch_through, ) ) if level == len(through_attrs) - 1 and prefetcher is None: # Last one, this *must* resolve to something that supports # prefetching, otherwise there is no point adding it and the # developer asking for it has made a mistake. raise ValueError( "'%s' does not resolve to an item that supports " "prefetching - this is an invalid parameter to " "prefetch_related()." % lookup.prefetch_through ) obj_to_fetch = None if prefetcher is not None: obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)] if obj_to_fetch: obj_list, additional_lookups = prefetch_one_level( obj_to_fetch, prefetcher, lookup, level, ) # We need to ensure we don't keep adding lookups from the # same relationships to stop infinite recursion. So, if we # are already on an automatically added lookup, don't add # the new lookups from relationships we've seen already. if not ( prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors ): done_queries[prefetch_to] = obj_list new_lookups = normalize_prefetch_lookups( reversed(additional_lookups), prefetch_to ) auto_lookups.update(new_lookups) all_lookups.extend(new_lookups) followed_descriptors.add(descriptor) else: # Either a singly related object that has already been fetched # (e.g. via select_related), or hopefully some other property # that doesn't support prefetching but needs to be traversed. # We replace the current list of parent objects with the list # of related objects, filtering out empty or missing values so # that we can continue with nullable or reverse relations. new_obj_list = [] for obj in obj_list: if through_attr in getattr(obj, "_prefetched_objects_cache", ()): # If related objects have been prefetched, use the # cache rather than the object's through_attr. new_obj = list(obj._prefetched_objects_cache.get(through_attr)) else: try: new_obj = getattr(obj, through_attr) except exceptions.ObjectDoesNotExist: continue if new_obj is None: continue # We special-case `list` rather than something more generic # like `Iterable` because we don't want to accidentally match # user models that define __iter__. if isinstance(new_obj, list): new_obj_list.extend(new_obj) else: new_obj_list.append(new_obj) obj_list = new_obj_list def get_prefetcher(instance, through_attr, to_attr): """ For the attribute 'through_attr' on the given instance, find an object that has a get_prefetch_queryset(). Return a 4 tuple containing: (the object with get_prefetch_queryset (or None), the descriptor object representing this relationship (or None), a boolean that is False if the attribute was not found at all, a function that takes an instance and returns a boolean that is True if the attribute has already been fetched for that instance) """ def has_to_attr_attribute(instance): return hasattr(instance, to_attr) prefetcher = None is_fetched = has_to_attr_attribute # For singly related objects, we have to avoid getting the attribute # from the object, as this will trigger the query. So we first try # on the class, in order to get the descriptor object. rel_obj_descriptor = getattr(instance.__class__, through_attr, None) if rel_obj_descriptor is None: attr_found = hasattr(instance, through_attr) else: attr_found = True if rel_obj_descriptor: # singly related object, descriptor object has the # get_prefetch_queryset() method. if hasattr(rel_obj_descriptor, "get_prefetch_queryset"): prefetcher = rel_obj_descriptor is_fetched = rel_obj_descriptor.is_cached else: # descriptor doesn't support prefetching, so we go ahead and get # the attribute on the instance rather than the class to # support many related managers rel_obj = getattr(instance, through_attr) if hasattr(rel_obj, "get_prefetch_queryset"): prefetcher = rel_obj if through_attr != to_attr: # Special case cached_property instances because hasattr # triggers attribute computation and assignment. if isinstance( getattr(instance.__class__, to_attr, None), cached_property ): def has_cached_property(instance): return to_attr in instance.__dict__ is_fetched = has_cached_property else: def in_prefetched_cache(instance): return through_attr in instance._prefetched_objects_cache is_fetched = in_prefetched_cache return prefetcher, rel_obj_descriptor, attr_found, is_fetched def prefetch_one_level(instances, prefetcher, lookup, level): """ Helper function for prefetch_related_objects(). Run prefetches on all instances using the prefetcher object, assigning results to relevant caches in instance. Return the prefetched objects along with any additional prefetches that must be done due to prefetch_related lookups found from default managers. """ # prefetcher must have a method get_prefetch_queryset() which takes a list # of instances, and returns a tuple: # (queryset of instances of self.model that are related to passed in instances, # callable that gets value to be matched for returned instances, # callable that gets value to be matched for passed in instances, # boolean that is True for singly related objects, # cache or field name to assign to, # boolean that is True when the previous argument is a cache name vs a field name). # The 'values to be matched' must be hashable as they will be used # in a dictionary. ( rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor, ) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)) # We have to handle the possibility that the QuerySet we just got back # contains some prefetch_related lookups. We don't want to trigger the # prefetch_related functionality by evaluating the query. Rather, we need # to merge in the prefetch_related lookups. # Copy the lookups in case it is a Prefetch object which could be reused # later (happens in nested prefetch_related). additional_lookups = [ copy.copy(additional_lookup) for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ()) ] if additional_lookups: # Don't need to clone because the manager should have given us a fresh # instance, so we access an internal instead of using public interface # for performance reasons. rel_qs._prefetch_related_lookups = () all_related_objects = list(rel_qs) rel_obj_cache = {} for rel_obj in all_related_objects: rel_attr_val = rel_obj_attr(rel_obj) rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) to_attr, as_attr = lookup.get_current_to_attr(level) # Make sure `to_attr` does not conflict with a field. if as_attr and instances: # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. model = instances[0].__class__ try: model._meta.get_field(to_attr) except exceptions.FieldDoesNotExist: pass else: msg = "to_attr={} conflicts with a field on the {} model." raise ValueError(msg.format(to_attr, model.__name__)) # Whether or not we're prefetching the last part of the lookup. leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level for obj in instances: instance_attr_val = instance_attr(obj) vals = rel_obj_cache.get(instance_attr_val, []) if single: val = vals[0] if vals else None if as_attr: # A to_attr has been given for the prefetch. setattr(obj, to_attr, val) elif is_descriptor: # cache_name points to a field name in obj. # This field is a descriptor for a related object. setattr(obj, cache_name, val) else: # No to_attr has been given for this prefetch operation and the # cache_name does not point to a descriptor. Store the value of # the field in the object's field cache. obj._state.fields_cache[cache_name] = val else: if as_attr: setattr(obj, to_attr, vals) else: manager = getattr(obj, to_attr) if leaf and lookup.queryset is not None: qs = manager._apply_rel_filters(lookup.queryset) else: qs = manager.get_queryset() qs._result_cache = vals # We don't want the individual qs doing prefetch_related now, # since we have merged this into the current work. qs._prefetch_done = True obj._prefetched_objects_cache[cache_name] = qs return all_related_objects, additional_lookups class RelatedPopulator: """ RelatedPopulator is used for select_related() object instantiation. The idea is that each select_related() model will be populated by a different RelatedPopulator instance. The RelatedPopulator instances get klass_info and select (computed in SQLCompiler) plus the used db as input for initialization. That data is used to compute which columns to use, how to instantiate the model, and how to populate the links between the objects. The actual creation of the objects is done in populate() method. This method gets row and from_obj as input and populates the select_related() model instance. """ def __init__(self, klass_info, select, db): self.db = db # Pre-compute needed attributes. The attributes are: # - model_cls: the possibly deferred model class to instantiate # - either: # - cols_start, cols_end: usually the columns in the row are # in the same order model_cls.__init__ expects them, so we # can instantiate by model_cls(*row[cols_start:cols_end]) # - reorder_for_init: When select_related descends to a child # class, then we want to reuse the already selected parent # data. However, in this case the parent data isn't necessarily # in the same order that Model.__init__ expects it to be, so # we have to reorder the parent data. The reorder_for_init # attribute contains a function used to reorder the field data # in the order __init__ expects it. # - pk_idx: the index of the primary key field in the reordered # model data. Used to check if a related object exists at all. # - init_list: the field attnames fetched from the database. For # deferred models this isn't the same as all attnames of the # model's fields. # - related_populators: a list of RelatedPopulator instances if # select_related() descends to related models from this model. # - local_setter, remote_setter: Methods to set cached values on # the object being populated and on the remote object. Usually # these are Field.set_cached_value() methods. select_fields = klass_info["select_fields"] from_parent = klass_info["from_parent"] if not from_parent: self.cols_start = select_fields[0] self.cols_end = select_fields[-1] + 1 self.init_list = [ f[0].target.attname for f in select[self.cols_start : self.cols_end] ] self.reorder_for_init = None else: attname_indexes = { select[idx][0].target.attname: idx for idx in select_fields } model_init_attnames = ( f.attname for f in klass_info["model"]._meta.concrete_fields ) self.init_list = [ attname for attname in model_init_attnames if attname in attname_indexes ] self.reorder_for_init = operator.itemgetter( *[attname_indexes[attname] for attname in self.init_list] ) self.model_cls = klass_info["model"] self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname) self.related_populators = get_related_populators(klass_info, select, self.db) self.local_setter = klass_info["local_setter"] self.remote_setter = klass_info["remote_setter"] def populate(self, row, from_obj): if self.reorder_for_init: obj_data = self.reorder_for_init(row) else: obj_data = row[self.cols_start : self.cols_end] if obj_data[self.pk_idx] is None: obj = None else: obj = self.model_cls.from_db(self.db, self.init_list, obj_data) for rel_iter in self.related_populators: rel_iter.populate(row, obj) self.local_setter(from_obj, obj) if obj is not None: self.remote_setter(obj, from_obj) def get_related_populators(klass_info, select, db): iterators = [] related_klass_infos = klass_info.get("related_klass_infos", []) for rel_klass_info in related_klass_infos: rel_cls = RelatedPopulator(rel_klass_info, select, db) iterators.append(rel_cls) return iterators
3dc2261ecaf21b123d77a8398af2c34aabb82283dd024a94c05ec487ce849ce5
import bisect import copy import inspect import warnings from collections import defaultdict from django.apps import apps from django.conf import settings from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured from django.db import connections from django.db.models import AutoField, Manager, OrderWrt, UniqueConstraint from django.db.models.query_utils import PathInfo from django.utils.datastructures import ImmutableList, OrderedSet from django.utils.deprecation import RemovedInDjango51Warning from django.utils.functional import cached_property from django.utils.module_loading import import_string from django.utils.text import camel_case_to_spaces, format_lazy from django.utils.translation import override PROXY_PARENTS = object() EMPTY_RELATION_TREE = () IMMUTABLE_WARNING = ( "The return type of '%s' should never be mutated. If you want to manipulate this " "list for your own use, make a copy first." ) DEFAULT_NAMES = ( "verbose_name", "verbose_name_plural", "db_table", "ordering", "unique_together", "permissions", "get_latest_by", "order_with_respect_to", "app_label", "db_tablespace", "abstract", "managed", "proxy", "swappable", "auto_created", # Must be kept for backward compatibility with old migrations. "index_together", "apps", "default_permissions", "select_on_save", "default_related_name", "required_db_features", "required_db_vendor", "base_manager_name", "default_manager_name", "indexes", "constraints", ) def normalize_together(option_together): """ option_together can be either a tuple of tuples, or a single tuple of two strings. Normalize it to a tuple of tuples, so that calling code can uniformly expect that. """ try: if not option_together: return () if not isinstance(option_together, (tuple, list)): raise TypeError first_element = option_together[0] if not isinstance(first_element, (tuple, list)): option_together = (option_together,) # Normalize everything to tuples return tuple(tuple(ot) for ot in option_together) except TypeError: # If the value of option_together isn't valid, return it # verbatim; this will be picked up by the check framework later. return option_together def make_immutable_fields_list(name, data): return ImmutableList(data, warning=IMMUTABLE_WARNING % name) class Options: FORWARD_PROPERTIES = { "fields", "many_to_many", "concrete_fields", "local_concrete_fields", "_non_pk_concrete_field_names", "_forward_fields_map", "managers", "managers_map", "base_manager", "default_manager", } REVERSE_PROPERTIES = {"related_objects", "fields_map", "_relation_tree"} default_apps = apps def __init__(self, meta, app_label=None): self._get_fields_cache = {} self.local_fields = [] self.local_many_to_many = [] self.private_fields = [] self.local_managers = [] self.base_manager_name = None self.default_manager_name = None self.model_name = None self.verbose_name = None self.verbose_name_plural = None self.db_table = "" self.ordering = [] self._ordering_clash = False self.indexes = [] self.constraints = [] self.unique_together = [] self.index_together = [] # RemovedInDjango51Warning. self.select_on_save = False self.default_permissions = ("add", "change", "delete", "view") self.permissions = [] self.object_name = None self.app_label = app_label self.get_latest_by = None self.order_with_respect_to = None self.db_tablespace = settings.DEFAULT_TABLESPACE self.required_db_features = [] self.required_db_vendor = None self.meta = meta self.pk = None self.auto_field = None self.abstract = False self.managed = True self.proxy = False # For any class that is a proxy (including automatically created # classes for deferred object loading), proxy_for_model tells us # which class this model is proxying. Note that proxy_for_model # can create a chain of proxy models. For non-proxy models, the # variable is always None. self.proxy_for_model = None # For any non-abstract class, the concrete class is the model # in the end of the proxy_for_model chain. In particular, for # concrete models, the concrete_model is always the class itself. self.concrete_model = None self.swappable = None self.parents = {} self.auto_created = False # List of all lookups defined in ForeignKey 'limit_choices_to' options # from *other* models. Needed for some admin checks. Internal use only. self.related_fkey_lookups = [] # A custom app registry to use, if you're making a separate model set. self.apps = self.default_apps self.default_related_name = None @property def label(self): return "%s.%s" % (self.app_label, self.object_name) @property def label_lower(self): return "%s.%s" % (self.app_label, self.model_name) @property def app_config(self): # Don't go through get_app_config to avoid triggering imports. return self.apps.app_configs.get(self.app_label) def contribute_to_class(self, cls, name): from django.db import connection from django.db.backends.utils import truncate_name cls._meta = self self.model = cls # First, construct the default values for these options. self.object_name = cls.__name__ self.model_name = self.object_name.lower() self.verbose_name = camel_case_to_spaces(self.object_name) # Store the original user-defined values for each option, # for use when serializing the model definition self.original_attrs = {} # Next, apply any overridden values from 'class Meta'. if self.meta: meta_attrs = self.meta.__dict__.copy() for name in self.meta.__dict__: # Ignore any private attributes that Django doesn't care about. # NOTE: We can't modify a dictionary's contents while looping # over it, so we loop over the *original* dictionary instead. if name.startswith("_"): del meta_attrs[name] for attr_name in DEFAULT_NAMES: if attr_name in meta_attrs: setattr(self, attr_name, meta_attrs.pop(attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) elif hasattr(self.meta, attr_name): setattr(self, attr_name, getattr(self.meta, attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) self.unique_together = normalize_together(self.unique_together) self.index_together = normalize_together(self.index_together) if self.index_together: warnings.warn( f"'index_together' is deprecated. Use 'Meta.indexes' in " f"{self.label!r} instead.", RemovedInDjango51Warning, ) # App label/class name interpolation for names of constraints and # indexes. if not getattr(cls._meta, "abstract", False): for attr_name in {"constraints", "indexes"}: objs = getattr(self, attr_name, []) setattr(self, attr_name, self._format_names_with_class(cls, objs)) # verbose_name_plural is a special case because it uses a 's' # by default. if self.verbose_name_plural is None: self.verbose_name_plural = format_lazy("{}s", self.verbose_name) # order_with_respect_and ordering are mutually exclusive. self._ordering_clash = bool(self.ordering and self.order_with_respect_to) # Any leftover attributes must be invalid. if meta_attrs != {}: raise TypeError( "'class Meta' got invalid attribute(s): %s" % ",".join(meta_attrs) ) else: self.verbose_name_plural = format_lazy("{}s", self.verbose_name) del self.meta # If the db_table wasn't provided, use the app_label + model_name. if not self.db_table: self.db_table = "%s_%s" % (self.app_label, self.model_name) self.db_table = truncate_name( self.db_table, connection.ops.max_name_length() ) def _format_names_with_class(self, cls, objs): """App label/class name interpolation for object names.""" new_objs = [] for obj in objs: obj = obj.clone() obj.name = obj.name % { "app_label": cls._meta.app_label.lower(), "class": cls.__name__.lower(), } new_objs.append(obj) return new_objs def _get_default_pk_class(self): pk_class_path = getattr( self.app_config, "default_auto_field", settings.DEFAULT_AUTO_FIELD, ) if self.app_config and self.app_config._is_default_auto_field_overridden: app_config_class = type(self.app_config) source = ( f"{app_config_class.__module__}." f"{app_config_class.__qualname__}.default_auto_field" ) else: source = "DEFAULT_AUTO_FIELD" if not pk_class_path: raise ImproperlyConfigured(f"{source} must not be empty.") try: pk_class = import_string(pk_class_path) except ImportError as e: msg = ( f"{source} refers to the module '{pk_class_path}' that could " f"not be imported." ) raise ImproperlyConfigured(msg) from e if not issubclass(pk_class, AutoField): raise ValueError( f"Primary key '{pk_class_path}' referred by {source} must " f"subclass AutoField." ) return pk_class def _prepare(self, model): if self.order_with_respect_to: # The app registry will not be ready at this point, so we cannot # use get_field(). query = self.order_with_respect_to try: self.order_with_respect_to = next( f for f in self._get_fields(reverse=False) if f.name == query or f.attname == query ) except StopIteration: raise FieldDoesNotExist( "%s has no field named '%s'" % (self.object_name, query) ) self.ordering = ("_order",) if not any( isinstance(field, OrderWrt) for field in model._meta.local_fields ): model.add_to_class("_order", OrderWrt()) else: self.order_with_respect_to = None if self.pk is None: if self.parents: # Promote the first parent link in lieu of adding yet another # field. field = next(iter(self.parents.values())) # Look for a local field with the same name as the # first parent link. If a local field has already been # created, use it instead of promoting the parent already_created = [ fld for fld in self.local_fields if fld.name == field.name ] if already_created: field = already_created[0] field.primary_key = True self.setup_pk(field) else: pk_class = self._get_default_pk_class() auto = pk_class(verbose_name="ID", primary_key=True, auto_created=True) model.add_to_class("id", auto) def add_manager(self, manager): self.local_managers.append(manager) self._expire_cache() def add_field(self, field, private=False): # Insert the given field in the order in which it was created, using # the "creation_counter" attribute of the field. # Move many-to-many related fields from self.fields into # self.many_to_many. if private: self.private_fields.append(field) elif field.is_relation and field.many_to_many: bisect.insort(self.local_many_to_many, field) else: bisect.insort(self.local_fields, field) self.setup_pk(field) # If the field being added is a relation to another known field, # expire the cache on this field and the forward cache on the field # being referenced, because there will be new relationships in the # cache. Otherwise, expire the cache of references *to* this field. # The mechanism for getting at the related model is slightly odd - # ideally, we'd just ask for field.related_model. However, related_model # is a cached property, and all the models haven't been loaded yet, so # we need to make sure we don't cache a string reference. if ( field.is_relation and hasattr(field.remote_field, "model") and field.remote_field.model ): try: field.remote_field.model._meta._expire_cache(forward=False) except AttributeError: pass self._expire_cache() else: self._expire_cache(reverse=False) def setup_pk(self, field): if not self.pk and field.primary_key: self.pk = field field.serialize = False def setup_proxy(self, target): """ Do the internal setup so that the current model is a proxy for "target". """ self.pk = target._meta.pk self.proxy_for_model = target self.db_table = target._meta.db_table def __repr__(self): return "<Options for %s>" % self.object_name def __str__(self): return self.label_lower def can_migrate(self, connection): """ Return True if the model can/should be migrated on the `connection`. `connection` can be either a real connection or a connection alias. """ if self.proxy or self.swapped or not self.managed: return False if isinstance(connection, str): connection = connections[connection] if self.required_db_vendor: return self.required_db_vendor == connection.vendor if self.required_db_features: return all( getattr(connection.features, feat, False) for feat in self.required_db_features ) return True @property def verbose_name_raw(self): """Return the untranslated verbose name.""" with override(None): return str(self.verbose_name) @property def swapped(self): """ Has this model been swapped out for another? If so, return the model name of the replacement; otherwise, return None. For historical reasons, model name lookups using get_model() are case insensitive, so we make sure we are case insensitive here. """ if self.swappable: swapped_for = getattr(settings, self.swappable, None) if swapped_for: try: swapped_label, swapped_object = swapped_for.split(".") except ValueError: # setting not in the format app_label.model_name # raising ImproperlyConfigured here causes problems with # test cleanup code - instead it is raised in get_user_model # or as part of validation. return swapped_for if ( "%s.%s" % (swapped_label, swapped_object.lower()) != self.label_lower ): return swapped_for return None @cached_property def managers(self): managers = [] seen_managers = set() bases = (b for b in self.model.mro() if hasattr(b, "_meta")) for depth, base in enumerate(bases): for manager in base._meta.local_managers: if manager.name in seen_managers: continue manager = copy.copy(manager) manager.model = self.model seen_managers.add(manager.name) managers.append((depth, manager.creation_counter, manager)) return make_immutable_fields_list( "managers", (m[2] for m in sorted(managers)), ) @cached_property def managers_map(self): return {manager.name: manager for manager in self.managers} @cached_property def base_manager(self): base_manager_name = self.base_manager_name if not base_manager_name: # Get the first parent's base_manager_name if there's one. for parent in self.model.mro()[1:]: if hasattr(parent, "_meta"): if parent._base_manager.name != "_base_manager": base_manager_name = parent._base_manager.name break if base_manager_name: try: return self.managers_map[base_manager_name] except KeyError: raise ValueError( "%s has no manager named %r" % ( self.object_name, base_manager_name, ) ) manager = Manager() manager.name = "_base_manager" manager.model = self.model manager.auto_created = True return manager @cached_property def default_manager(self): default_manager_name = self.default_manager_name if not default_manager_name and not self.local_managers: # Get the first parent's default_manager_name if there's one. for parent in self.model.mro()[1:]: if hasattr(parent, "_meta"): default_manager_name = parent._meta.default_manager_name break if default_manager_name: try: return self.managers_map[default_manager_name] except KeyError: raise ValueError( "%s has no manager named %r" % ( self.object_name, default_manager_name, ) ) if self.managers: return self.managers[0] @cached_property def fields(self): """ Return a list of all forward fields on the model and its parents, excluding ManyToManyFields. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ # For legacy reasons, the fields property should only contain forward # fields that are not private or with a m2m cardinality. Therefore we # pass these three filters as filters to the generator. # The third lambda is a longwinded way of checking f.related_model - we don't # use that property directly because related_model is a cached property, # and all the models may not have been loaded yet; we don't want to cache # the string reference to the related_model. def is_not_an_m2m_field(f): return not (f.is_relation and f.many_to_many) def is_not_a_generic_relation(f): return not (f.is_relation and f.one_to_many) def is_not_a_generic_foreign_key(f): return not ( f.is_relation and f.many_to_one and not (hasattr(f.remote_field, "model") and f.remote_field.model) ) return make_immutable_fields_list( "fields", ( f for f in self._get_fields(reverse=False) if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f) ), ) @cached_property def concrete_fields(self): """ Return a list of all concrete fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ return make_immutable_fields_list( "concrete_fields", (f for f in self.fields if f.concrete) ) @cached_property def local_concrete_fields(self): """ Return a list of all concrete fields on the model. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ return make_immutable_fields_list( "local_concrete_fields", (f for f in self.local_fields if f.concrete) ) @cached_property def many_to_many(self): """ Return a list of all many to many fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this list. """ return make_immutable_fields_list( "many_to_many", ( f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many ), ) @cached_property def related_objects(self): """ Return all related objects pointing to the current model. The related objects can come from a one-to-one, one-to-many, or many-to-many field relation type. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ all_related_fields = self._get_fields( forward=False, reverse=True, include_hidden=True ) return make_immutable_fields_list( "related_objects", ( obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many ), ) @cached_property def _forward_fields_map(self): res = {} fields = self._get_fields(reverse=False) for field in fields: res[field.name] = field # Due to the way Django's internals work, get_field() should also # be able to fetch a field by attname. In the case of a concrete # field with relation, includes the *_id name too try: res[field.attname] = field except AttributeError: pass return res @cached_property def fields_map(self): res = {} fields = self._get_fields(forward=False, include_hidden=True) for field in fields: res[field.name] = field # Due to the way Django's internals work, get_field() should also # be able to fetch a field by attname. In the case of a concrete # field with relation, includes the *_id name too try: res[field.attname] = field except AttributeError: pass return res def get_field(self, field_name): """ Return a field instance given the name of a forward or reverse field. """ try: # In order to avoid premature loading of the relation tree # (expensive) we prefer checking if the field is a forward field. return self._forward_fields_map[field_name] except KeyError: # If the app registry is not ready, reverse fields are # unavailable, therefore we throw a FieldDoesNotExist exception. if not self.apps.models_ready: raise FieldDoesNotExist( "%s has no field named '%s'. The app cache isn't ready yet, " "so if this is an auto-created related field, it won't " "be available yet." % (self.object_name, field_name) ) try: # Retrieve field instance by name from cached or just-computed # field map. return self.fields_map[field_name] except KeyError: raise FieldDoesNotExist( "%s has no field named '%s'" % (self.object_name, field_name) ) def get_base_chain(self, model): """ Return a list of parent classes leading to `model` (ordered from closest to most distant ancestor). This has to handle the case where `model` is a grandparent or even more distant relation. """ if not self.parents: return [] if model in self.parents: return [model] for parent in self.parents: res = parent._meta.get_base_chain(model) if res: res.insert(0, parent) return res return [] def get_parent_list(self): """ Return all the ancestors of this model as a list ordered by MRO. Useful for determining if something is an ancestor, regardless of lineage. """ result = OrderedSet(self.parents) for parent in self.parents: for ancestor in parent._meta.get_parent_list(): result.add(ancestor) return list(result) def get_ancestor_link(self, ancestor): """ Return the field on the current model which points to the given "ancestor". This is possible an indirect link (a pointer to a parent model, which points, eventually, to the ancestor). Used when constructing table joins for model inheritance. Return None if the model isn't an ancestor of this one. """ if ancestor in self.parents: return self.parents[ancestor] for parent in self.parents: # Tries to get a link field from the immediate parent parent_link = parent._meta.get_ancestor_link(ancestor) if parent_link: # In case of a proxied model, the first link # of the chain to the ancestor is that parent # links return self.parents[parent] or parent_link def get_path_to_parent(self, parent): """ Return a list of PathInfos containing the path from the current model to the parent model, or an empty list if parent is not a parent of the current model. """ if self.model is parent: return [] # Skip the chain of proxy to the concrete proxied model. proxied_model = self.concrete_model path = [] opts = self for int_model in self.get_base_chain(parent): if int_model is proxied_model: opts = int_model._meta else: final_field = opts.parents[int_model] targets = (final_field.remote_field.get_related_field(),) opts = int_model._meta path.append( PathInfo( from_opts=final_field.model._meta, to_opts=opts, target_fields=targets, join_field=final_field, m2m=False, direct=True, filtered_relation=None, ) ) return path def get_path_from_parent(self, parent): """ Return a list of PathInfos containing the path from the parent model to the current model, or an empty list if parent is not a parent of the current model. """ if self.model is parent: return [] model = self.concrete_model # Get a reversed base chain including both the current and parent # models. chain = model._meta.get_base_chain(parent) chain.reverse() chain.append(model) # Construct a list of the PathInfos between models in chain. path = [] for i, ancestor in enumerate(chain[:-1]): child = chain[i + 1] link = child._meta.get_ancestor_link(ancestor) path.extend(link.reverse_path_infos) return path def _populate_directed_relation_graph(self): """ This method is used by each model to find its reverse objects. As this method is very expensive and is accessed frequently (it looks up every field in a model, in every app), it is computed on first access and then is set as a property on every model. """ related_objects_graph = defaultdict(list) all_models = self.apps.get_models(include_auto_created=True) for model in all_models: opts = model._meta # Abstract model's fields are copied to child models, hence we will # see the fields from the child models. if opts.abstract: continue fields_with_relations = ( f for f in opts._get_fields(reverse=False, include_parents=False) if f.is_relation and f.related_model is not None ) for f in fields_with_relations: if not isinstance(f.remote_field.model, str): remote_label = f.remote_field.model._meta.concrete_model._meta.label related_objects_graph[remote_label].append(f) for model in all_models: # Set the relation_tree using the internal __dict__. In this way # we avoid calling the cached property. In attribute lookup, # __dict__ takes precedence over a data descriptor (such as # @cached_property). This means that the _meta._relation_tree is # only called if related_objects is not in __dict__. related_objects = related_objects_graph[ model._meta.concrete_model._meta.label ] model._meta.__dict__["_relation_tree"] = related_objects # It seems it is possible that self is not in all_models, so guard # against that with default for get(). return self.__dict__.get("_relation_tree", EMPTY_RELATION_TREE) @cached_property def _relation_tree(self): return self._populate_directed_relation_graph() def _expire_cache(self, forward=True, reverse=True): # This method is usually called by apps.cache_clear(), when the # registry is finalized, or when a new field is added. if forward: for cache_key in self.FORWARD_PROPERTIES: if cache_key in self.__dict__: delattr(self, cache_key) if reverse and not self.abstract: for cache_key in self.REVERSE_PROPERTIES: if cache_key in self.__dict__: delattr(self, cache_key) self._get_fields_cache = {} def get_fields(self, include_parents=True, include_hidden=False): """ Return a list of fields associated to the model. By default, include forward and reverse fields, fields derived from inheritance, but not hidden fields. The returned fields can be changed using the parameters: - include_parents: include fields derived from inheritance - include_hidden: include fields that have a related_name that starts with a "+" """ if include_parents is False: include_parents = PROXY_PARENTS return self._get_fields( include_parents=include_parents, include_hidden=include_hidden ) def _get_fields( self, forward=True, reverse=True, include_parents=True, include_hidden=False, seen_models=None, ): """ Internal helper function to return fields of the model. * If forward=True, then fields defined on this model are returned. * If reverse=True, then relations pointing to this model are returned. * If include_hidden=True, then fields with is_hidden=True are returned. * The include_parents argument toggles if fields from parent models should be included. It has three values: True, False, and PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all fields defined for the current model or any of its parents in the parent chain to the model's concrete model. """ if include_parents not in (True, False, PROXY_PARENTS): raise TypeError( "Invalid argument for include_parents: %s" % (include_parents,) ) # This helper function is used to allow recursion in ``get_fields()`` # implementation and to provide a fast way for Django's internals to # access specific subsets of fields. # We must keep track of which models we have already seen. Otherwise we # could include the same field multiple times from different models. topmost_call = seen_models is None if topmost_call: seen_models = set() seen_models.add(self.model) # Creates a cache key composed of all arguments cache_key = (forward, reverse, include_parents, include_hidden, topmost_call) try: # In order to avoid list manipulation. Always return a shallow copy # of the results. return self._get_fields_cache[cache_key] except KeyError: pass fields = [] # Recursively call _get_fields() on each parent, with the same # options provided in this call. if include_parents is not False: for parent in self.parents: # In diamond inheritance it is possible that we see the same # model from two different routes. In that case, avoid adding # fields from the same parent again. if parent in seen_models: continue if ( parent._meta.concrete_model != self.concrete_model and include_parents == PROXY_PARENTS ): continue for obj in parent._meta._get_fields( forward=forward, reverse=reverse, include_parents=include_parents, include_hidden=include_hidden, seen_models=seen_models, ): if ( not getattr(obj, "parent_link", False) or obj.model == self.concrete_model ): fields.append(obj) if reverse and not self.proxy: # Tree is computed once and cached until the app cache is expired. # It is composed of a list of fields pointing to the current model # from other models. all_fields = self._relation_tree for field in all_fields: # If hidden fields should be included or the relation is not # intentionally hidden, add to the fields dict. if include_hidden or not field.remote_field.hidden: fields.append(field.remote_field) if forward: fields += self.local_fields fields += self.local_many_to_many # Private fields are recopied to each child model, and they get a # different model as field.model in each child. Hence we have to # add the private fields separately from the topmost call. If we # did this recursively similar to local_fields, we would get field # instances with field.model != self.model. if topmost_call: fields += self.private_fields # In order to avoid list manipulation. Always # return a shallow copy of the results fields = make_immutable_fields_list("get_fields()", fields) # Store result into cache for later access self._get_fields_cache[cache_key] = fields return fields @cached_property def total_unique_constraints(self): """ Return a list of total unique constraints. Useful for determining set of fields guaranteed to be unique for all rows. """ return [ constraint for constraint in self.constraints if ( isinstance(constraint, UniqueConstraint) and constraint.condition is None and not constraint.contains_expressions ) ] @cached_property def _property_names(self): """Return a set of the names of the properties defined on the model.""" names = [] for name in dir(self.model): attr = inspect.getattr_static(self.model, name) if isinstance(attr, property): names.append(name) return frozenset(names) @cached_property def _non_pk_concrete_field_names(self): """ Return a set of the non-pk concrete field names defined on the model. """ names = [] for field in self.concrete_fields: if not field.primary_key: names.append(field.name) if field.name != field.attname: names.append(field.attname) return frozenset(names) @cached_property def db_returning_fields(self): """ Private API intended only to be used by Django itself. Fields to be returned after a database insert. """ return [ field for field in self._get_fields( forward=True, reverse=False, include_parents=PROXY_PARENTS ) if getattr(field, "db_returning", False) ]
b84bad8b68551f88f1a941aa0c9c13a5822ffa77855b477a3140226c9234e56e
import copy import inspect import warnings from functools import partialmethod from itertools import chain import django from django.apps import apps from django.conf import settings from django.core import checks from django.core.exceptions import ( NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, ObjectDoesNotExist, ValidationError, ) from django.db import ( DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, connections, router, transaction, ) from django.db.models import NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value from django.db.models.constants import LOOKUP_SEP from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, Collector from django.db.models.expressions import RawSQL from django.db.models.fields.related import ( ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, ) from django.db.models.functions import Coalesce from django.db.models.manager import Manager from django.db.models.options import Options from django.db.models.query import F, Q from django.db.models.signals import ( class_prepared, post_init, post_save, pre_init, pre_save, ) from django.db.models.utils import make_model_tuple from django.utils.encoding import force_str from django.utils.hashable import make_hashable from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext_lazy as _ class Deferred: def __repr__(self): return "<Deferred field>" def __str__(self): return "<Deferred field>" DEFERRED = Deferred() def subclass_exception(name, bases, module, attached_to): """ Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class. """ return type( name, bases, { "__module__": module, "__qualname__": "%s.%s" % (attached_to.__qualname__, name), }, ) def _has_contribute_to_class(value): # Only call contribute_to_class() if it's bound. return not inspect.isclass(value) and hasattr(value, "contribute_to_class") class ModelBase(type): """Metaclass for all models.""" def __new__(cls, name, bases, attrs, **kwargs): super_new = super().__new__ # Also ensure initialization is only performed for subclasses of Model # (excluding Model class itself). parents = [b for b in bases if isinstance(b, ModelBase)] if not parents: return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop("__module__") new_attrs = {"__module__": module} classcell = attrs.pop("__classcell__", None) if classcell is not None: new_attrs["__classcell__"] = classcell attr_meta = attrs.pop("Meta", None) # Pass all attrs without a (Django-specific) contribute_to_class() # method to type.__new__() so that they're properly initialized # (i.e. __set_name__()). contributable_attrs = {} for obj_name, obj in attrs.items(): if _has_contribute_to_class(obj): contributable_attrs[obj_name] = obj else: new_attrs[obj_name] = obj new_class = super_new(cls, name, bases, new_attrs, **kwargs) abstract = getattr(attr_meta, "abstract", False) meta = attr_meta or getattr(new_class, "Meta", None) base_meta = getattr(new_class, "_meta", None) app_label = None # Look for an application configuration to attach the model to. app_config = apps.get_containing_app_config(module) if getattr(meta, "app_label", None) is None: if app_config is None: if not abstract: raise RuntimeError( "Model class %s.%s doesn't declare an explicit " "app_label and isn't in an application in " "INSTALLED_APPS." % (module, name) ) else: app_label = app_config.label new_class.add_to_class("_meta", Options(meta, app_label)) if not abstract: new_class.add_to_class( "DoesNotExist", subclass_exception( "DoesNotExist", tuple( x.DoesNotExist for x in parents if hasattr(x, "_meta") and not x._meta.abstract ) or (ObjectDoesNotExist,), module, attached_to=new_class, ), ) new_class.add_to_class( "MultipleObjectsReturned", subclass_exception( "MultipleObjectsReturned", tuple( x.MultipleObjectsReturned for x in parents if hasattr(x, "_meta") and not x._meta.abstract ) or (MultipleObjectsReturned,), module, attached_to=new_class, ), ) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, "ordering"): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, "get_latest_by"): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError( "%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped) ) # Add remaining attributes (those with a contribute_to_class() method) # to the class. for obj_name, obj in contributable_attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = chain( new_class._meta.local_fields, new_class._meta.local_many_to_many, new_class._meta.private_fields, ) field_names = {f.name for f in new_fields} # Basic setup for proxy models. if is_proxy: base = None for parent in [kls for kls in parents if hasattr(kls, "_meta")]: if parent._meta.abstract: if parent._meta.fields: raise TypeError( "Abstract base class containing model fields not " "permitted for proxy model '%s'." % name ) else: continue if base is None: base = parent elif parent._meta.concrete_model is not base._meta.concrete_model: raise TypeError( "Proxy model '%s' has more than one non-abstract model base " "class." % name ) if base is None: raise TypeError( "Proxy model '%s' has no non-abstract model base class." % name ) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Collect the parent links for multi-table inheritance. parent_links = {} for base in reversed([new_class] + parents): # Conceptually equivalent to `if base is Model`. if not hasattr(base, "_meta"): continue # Skip concrete parent classes. if base != new_class and not base._meta.abstract: continue # Locate OneToOneField instances. for field in base._meta.local_fields: if isinstance(field, OneToOneField) and field.remote_field.parent_link: related = resolve_relation(new_class, field.remote_field.model) parent_links[make_model_tuple(related)] = field # Track fields inherited from base models. inherited_attributes = set() # Do the appropriate setup for any model parents. for base in new_class.mro(): if base not in parents or not hasattr(base, "_meta"): # Things without _meta aren't functional models, so they're # uninteresting parents. inherited_attributes.update(base.__dict__) continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many if not base._meta.abstract: # Check for clashes between locally declared fields and those # on the base classes. for field in parent_fields: if field.name in field_names: raise FieldError( "Local field %r in class %r clashes with field of " "the same name from base class %r." % ( field.name, name, base.__name__, ) ) else: inherited_attributes.add(field.name) # Concrete classes... base = base._meta.concrete_model base_key = make_model_tuple(base) if base_key in parent_links: field = parent_links[base_key] elif not is_proxy: attr_name = "%s_ptr" % base._meta.model_name field = OneToOneField( base, on_delete=CASCADE, name=attr_name, auto_created=True, parent_link=True, ) if attr_name in field_names: raise FieldError( "Auto-generated field '%s' in class %r for " "parent_link to base class %r clashes with " "declared field of the same name." % ( attr_name, name, base.__name__, ) ) # Only add the ptr field if it's not already present; # e.g. migrations will already have it specified if not hasattr(new_class, attr_name): new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: base_parents = base._meta.parents.copy() # Add fields from abstract base class if it wasn't overridden. for field in parent_fields: if ( field.name not in field_names and field.name not in new_class.__dict__ and field.name not in inherited_attributes ): new_field = copy.deepcopy(field) new_class.add_to_class(field.name, new_field) # Replace parent links defined on this base by the new # field. It will be appropriately resolved if required. if field.one_to_one: for parent, parent_link in base_parents.items(): if field == parent_link: base_parents[parent] = new_field # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base_parents) # Inherit private fields (like GenericForeignKey) from the parent # class for field in base._meta.private_fields: if field.name in field_names: if not base._meta.abstract: raise FieldError( "Local field %r in class %r clashes with field of " "the same name from base class %r." % ( field.name, name, base.__name__, ) ) else: field = copy.deepcopy(field) if not base._meta.abstract: field.mti_inherited = True new_class.add_to_class(field.name, field) # Copy indexes so that index names are unique when models extend an # abstract model. new_class._meta.indexes = [ copy.deepcopy(idx) for idx in new_class._meta.indexes ] if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() new_class._meta.apps.register_model(new_class._meta.app_label, new_class) return new_class def add_to_class(cls, name, value): if _has_contribute_to_class(value): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """Create some methods once self._meta has been populated.""" opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = partialmethod( cls._get_next_or_previous_in_order, is_next=True ) cls.get_previous_in_order = partialmethod( cls._get_next_or_previous_in_order, is_next=False ) # Defer creating accessors on the foreign class until it has been # created and registered. If remote_field is None, we're ordering # with respect to a GenericForeignKey and don't know what the # foreign class is - we'll add those accessors later in # contribute_to_class(). if opts.order_with_respect_to.remote_field: wrt = opts.order_with_respect_to remote = wrt.remote_field.model lazy_related_operation(make_foreign_order_accessors, cls, remote) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % ( cls.__name__, ", ".join(f.name for f in opts.fields), ) get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get( opts.label_lower ) if get_absolute_url_override: setattr(cls, "get_absolute_url", get_absolute_url_override) if not opts.managers: if any(f.name == "objects" for f in opts.fields): raise ValueError( "Model %s must specify a custom Manager, because it has a " "field named 'objects'." % cls.__name__ ) manager = Manager() manager.auto_created = True cls.add_to_class("objects", manager) # Set the name of _meta.indexes. This can't be done in # Options.contribute_to_class() because fields haven't been added to # the model at that point. for index in cls._meta.indexes: if not index.name: index.set_name_with_model(cls) class_prepared.send(sender=cls) @property def _base_manager(cls): return cls._meta.base_manager @property def _default_manager(cls): return cls._meta.default_manager class ModelStateFieldsCacheDescriptor: def __get__(self, instance, cls=None): if instance is None: return self res = instance.fields_cache = {} return res class ModelState: """Store model instance state.""" db = None # If true, uniqueness validation checks will consider this a new, unsaved # object. Necessary for correct validation of new instances of objects with # explicit (non-auto) PKs. This impacts validation only; it has no effect # on the actual save. adding = True fields_cache = ModelStateFieldsCacheDescriptor() class Model(metaclass=ModelBase): def __init__(self, *args, **kwargs): # Alias some things as locals to avoid repeat global lookups cls = self.__class__ opts = self._meta _setattr = setattr _DEFERRED = DEFERRED if opts.abstract: raise TypeError("Abstract models cannot be instantiated.") pre_init.send(sender=cls, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) else: # Slower, kwargs-ready version. fields_iter = iter(opts.fields) for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) if kwargs.pop(field.name, NOT_PROVIDED) is not NOT_PROVIDED: raise TypeError( f"{cls.__qualname__}() got both positional and " f"keyword arguments for field '{field.name}'." ) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # Virtual field if field.attname not in kwargs and field.column is None: continue if kwargs: if isinstance(field.remote_field, ForeignObjectRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. if rel_obj is not _DEFERRED: _setattr(self, field.name, rel_obj) else: if val is not _DEFERRED: _setattr(self, field.attname, val) if kwargs: property_names = opts._property_names unexpected = () for prop, value in kwargs.items(): # Any remaining kwargs must correspond to properties or virtual # fields. if prop in property_names: if value is not _DEFERRED: _setattr(self, prop, value) else: try: opts.get_field(prop) except FieldDoesNotExist: unexpected += (prop,) else: if value is not _DEFERRED: _setattr(self, prop, value) if unexpected: unexpected_names = ", ".join(repr(n) for n in unexpected) raise TypeError( f"{cls.__name__}() got unexpected keyword arguments: " f"{unexpected_names}" ) super().__init__() post_init.send(sender=cls, instance=self) @classmethod def from_db(cls, db, field_names, values): if len(values) != len(cls._meta.concrete_fields): values_iter = iter(values) values = [ next(values_iter) if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields ] new = cls(*values) new._state.adding = False new._state.db = db return new def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def __str__(self): return "%s object (%s)" % (self.__class__.__name__, self.pk) def __eq__(self, other): if not isinstance(other, Model): return NotImplemented if self._meta.concrete_model != other._meta.concrete_model: return False my_pk = self.pk if my_pk is None: return self is other return my_pk == other.pk def __hash__(self): if self.pk is None: raise TypeError("Model instances without primary key value are unhashable") return hash(self.pk) def __reduce__(self): data = self.__getstate__() data[DJANGO_VERSION_PICKLE_KEY] = django.__version__ class_id = self._meta.app_label, self._meta.object_name return model_unpickle, (class_id,), data def __getstate__(self): """Hook to allow choosing the attributes to pickle.""" state = self.__dict__.copy() state["_state"] = copy.copy(state["_state"]) state["_state"].fields_cache = state["_state"].fields_cache.copy() # memoryview cannot be pickled, so cast it to bytes and store # separately. _memoryview_attrs = [] for attr, value in state.items(): if isinstance(value, memoryview): _memoryview_attrs.append((attr, bytes(value))) if _memoryview_attrs: state["_memoryview_attrs"] = _memoryview_attrs for attr, value in _memoryview_attrs: state.pop(attr) return state def __setstate__(self, state): pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: if pickled_version != django.__version__: warnings.warn( "Pickled model instance's Django version %s does not " "match the current version %s." % (pickled_version, django.__version__), RuntimeWarning, stacklevel=2, ) else: warnings.warn( "Pickled model instance's Django version is not specified.", RuntimeWarning, stacklevel=2, ) if "_memoryview_attrs" in state: for attr, value in state.pop("_memoryview_attrs"): state[attr] = memoryview(value) self.__dict__.update(state) def _get_pk_val(self, meta=None): meta = meta or self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): for parent_link in self._meta.parents.values(): if parent_link and parent_link != self._meta.pk: setattr(self, parent_link.target_field.attname, value) return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def get_deferred_fields(self): """ Return a set containing names of deferred fields for this instance. """ return { f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__ } def refresh_from_db(self, using=None, fields=None): """ Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. """ if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, "_prefetched_objects_cache", ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' "are not allowed in fields." % LOOKUP_SEP ) hints = {"instance": self} db_instance_qs = self.__class__._base_manager.db_manager( using, hints=hints ).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [ f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields ] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db def serializable_value(self, field_name): """ Return the value of the field name for this instance. If the field is a foreign key, return the id value instead of the object. If there's no Field object with this name on the model, return the model attribute's value. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field(field_name) except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save( self, force_insert=False, force_update=False, using=None, update_fields=None ): """ Save the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ self._prepare_related_fields_for_save(operation_name="save") using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") deferred_fields = self.get_deferred_fields() if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if not update_fields: return update_fields = frozenset(update_fields) field_names = self._meta._non_pk_concrete_field_names non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError( "The following fields do not exist in this model, are m2m " "fields, or are non-concrete fields: %s" % ", ".join(non_model_fields) ) # If saving to the same database, and this model is deferred, then # automatically do an "update_fields" save on the loaded fields. elif not force_insert and deferred_fields and using == self._state.db: field_names = set() for field in self._meta.concrete_fields: if not field.primary_key and not hasattr(field, "through"): field_names.add(field.attname) loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base( using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields, ) save.alters_data = True def save_base( self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None, ): """ Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading. """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ # Skip proxies, but keep the origin as the proxy model. if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send( sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields, ) # A transaction isn't needed if one query is issued. if meta.parents: context_manager = transaction.atomic(using=using, savepoint=False) else: context_manager = transaction.mark_for_rollback_on_error(using=using) with context_manager: parent_inserted = False if not raw: parent_inserted = self._save_parents(cls, using, update_fields) updated = self._save_table( raw, cls, force_insert or parent_inserted, force_update, using, update_fields, ) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if not meta.auto_created: post_save.send( sender=origin, instance=self, created=(not updated), update_fields=update_fields, raw=raw, using=using, ) save_base.alters_data = True def _save_parents(self, cls, using, update_fields): """Save all the parents of cls using values from self.""" meta = cls._meta inserted = False for parent, field in meta.parents.items(): # Make sure the link fields are synced between parent and self. if ( field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None ): setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) parent_inserted = self._save_parents( cls=parent, using=using, update_fields=update_fields ) updated = self._save_table( cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted, ) if not updated: inserted = True # Set the parent's PK value to self. if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) # Since we didn't have an instance of the parent handy set # attname directly, bypassing the descriptor. Invalidate # the related object cache, in case it's been accidentally # populated. A fresh instance will be re-built from the # database if necessary. if field.is_cached(self): field.delete_cached_value(self) return inserted def _save_table( self, raw=False, cls=None, force_insert=False, force_update=False, using=None, update_fields=None, ): """ Do the heavy-lifting involved in saving. Update or insert the data for a single table. """ meta = cls._meta non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] if update_fields: non_pks = [ f for f in non_pks if f.name in update_fields or f.attname in update_fields ] pk_val = self._get_pk_val(meta) if pk_val is None: pk_val = meta.pk.get_pk_value_on_save(self) setattr(self, meta.pk.attname, pk_val) pk_set = pk_val is not None if not pk_set and (force_update or update_fields): raise ValueError("Cannot force an update in save() with no primary key.") updated = False # Skip an UPDATE when adding an instance and primary key has a default. if ( not raw and not force_insert and self._state.adding and meta.pk.default and meta.pk.default is not NOT_PROVIDED ): force_insert = True # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [ ( f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)), ) for f in non_pks ] forced_update = update_fields or force_update updated = self._do_update( base_qs, using, pk_val, values, update_fields, forced_update ) if force_update and not updated: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not updated: raise DatabaseError("Save with update_fields did not affect any rows.") if not updated: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to filter_args = field.get_filter_kwargs_for_object(self) self._order = ( cls._base_manager.using(using) .filter(**filter_args) .aggregate( _order__max=Coalesce( ExpressionWrapper( Max("_order") + Value(1), output_field=IntegerField() ), Value(0), ), )["_order__max"] ) fields = meta.local_concrete_fields if not pk_set: fields = [f for f in fields if f is not meta.auto_field] returning_fields = meta.db_returning_fields results = self._do_insert( cls._base_manager, using, fields, returning_fields, raw ) if results: for value, field in zip(results[0], returning_fields): setattr(self, field.attname, value) return updated def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): """ Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB). """ filtered = base_qs.filter(pk=pk_val) if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that # case we just say the update succeeded. Another case ending up here # is a model with just PK - in that case check that the PK still # exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and # It may happen that the object is deleted from the DB right after # this check, causing the subsequent UPDATE to return zero matching # rows. The same result can occur in some rare cases when the # database returns zero despite the UPDATE being executed # successfully (a row is matched and updated). In order to # distinguish these two cases, the object's existence in the # database is again checked for if the UPDATE query returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 def _do_insert(self, manager, using, fields, returning_fields, raw): """ Do an INSERT. If returning_fields is defined then this method should return the newly created data for the model. """ return manager._insert( [self], fields=fields, returning_fields=returning_fields, using=using, raw=raw, ) def _prepare_related_fields_for_save(self, operation_name, fields=None): # Ensure that a model instance without a PK hasn't been assigned to # a ForeignKey, GenericForeignKey or OneToOneField on this model. If # the field is nullable, allowing the save would result in silent data # loss. for field in self._meta.concrete_fields: if fields and field not in fields: continue # If the related field isn't cached, then an instance hasn't been # assigned and there's no need to worry about this check. if field.is_relation and field.is_cached(self): obj = getattr(self, field.name, None) if not obj: continue # A pk may have been assigned manually to a model instance not # saved to the database (or auto-generated in a case like # UUIDField), but we allow the save to proceed and rely on the # database to raise an IntegrityError if applicable. If # constraints aren't supported by the database, there's the # unavoidable risk of data corruption. if obj.pk is None: # Remove the object from a related instance cache. if not field.remote_field.multiple: field.remote_field.delete_cached_value(obj) raise ValueError( "%s() prohibited to prevent data loss due to unsaved " "related object '%s'." % (operation_name, field.name) ) elif getattr(self, field.attname) in field.empty_values: # Set related object if it has been saved after an # assignment. setattr(self, field.name, obj) # If the relationship's pk/to_field was changed, clear the # cached relationship. if getattr(obj, field.target_field.attname) != getattr( self, field.attname ): field.delete_cached_value(self) # GenericForeignKeys are private. for field in self._meta.private_fields: if fields and field not in fields: continue if ( field.is_relation and field.is_cached(self) and hasattr(field, "fk_field") ): obj = field.get_cached_value(self, default=None) if obj and obj.pk is None: raise ValueError( f"{operation_name}() prohibited to prevent data loss due to " f"unsaved related object '{field.name}'." ) def delete(self, using=None, keep_parents=False): if self.pk is None: raise ValueError( "%s object can't be deleted because its %s attribute is set " "to None." % (self._meta.object_name, self._meta.pk.attname) ) using = using or router.db_for_write(self.__class__, instance=self) collector = Collector(using=using, origin=self) collector.collect([self], keep_parents=keep_parents) return collector.delete() delete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) choices_dict = dict(make_hashable(field.flatchoices)) # force_str() to coerce lazy strings. return force_str( choices_dict.get(make_hashable(value), value), strings_only=True ) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = "gt" if is_next else "lt" order = "" if is_next else "-" param = getattr(self, field.attname) q = Q.create([(field.name, param), (f"pk__{op}", self.pk)], connector=Q.AND) q = Q.create([q, (f"{field.name}__{op}", param)], connector=Q.OR) qs = ( self.__class__._default_manager.using(self._state.db) .filter(**kwargs) .filter(q) .order_by("%s%s" % (order, field.name), "%spk" % order) ) try: return qs[0] except IndexError: raise self.DoesNotExist( "%s matching query does not exist." % self.__class__._meta.object_name ) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = "gt" if is_next else "lt" order = "_order" if is_next else "-_order" order_field = self._meta.order_with_respect_to filter_args = order_field.get_filter_kwargs_for_object(self) obj = ( self.__class__._default_manager.filter(**filter_args) .filter( **{ "_order__%s" % op: self.__class__._default_manager.values("_order").filter( **{self._meta.pk.name: self.pk} ) } ) .order_by(order)[:1] .get() ) setattr(self, cachename, obj) return getattr(self, cachename) def _get_field_value_map(self, meta, exclude=None): if exclude is None: exclude = set() meta = meta or self._meta return { field.name: Value(getattr(self, field.attname), field) for field in meta.local_concrete_fields if field.name not in exclude } def prepare_database_save(self, field): if self.pk is None: raise ValueError( "Unsaved model instance %r cannot be used in an ORM query." % self ) return getattr(self, field.remote_field.get_related_field().attname) def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Check unique constraints on the model and raise ValidationError if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None, include_meta_constraints=False): """ Return a list of checks to perform. Since validate_unique() could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = set() unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] constraints = [] if include_meta_constraints: constraints = [(self.__class__, self._meta.total_unique_constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.unique_together: unique_togethers.append( (parent_class, parent_class._meta.unique_together) ) if include_meta_constraints and parent_class._meta.total_unique_constraints: constraints.append( (parent_class, parent_class._meta.total_unique_constraints) ) for model_class, unique_together in unique_togethers: for check in unique_together: if not any(name in exclude for name in check): # Add the check if the field isn't excluded. unique_checks.append((model_class, tuple(check))) if include_meta_constraints: for model_class, model_constraints in constraints: for constraint in model_constraints: if not any(name in exclude for name in constraint.fields): unique_checks.append((model_class, constraint.fields)) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.get_parent_list(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, "date", name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, "year", name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, "month", name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) # TODO: Handle multiple backends with different feature flags. if lookup_value is None or ( lookup_value == "" and connection.features.interprets_empty_strings_as_nulls ): # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append( self.unique_error_message(model_class, unique_check) ) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == "date": lookup_kwargs["%s__day" % unique_for] = date.day lookup_kwargs["%s__month" % unique_for] = date.month lookup_kwargs["%s__year" % unique_for] = date.year else: lookup_kwargs["%s__%s" % (unique_for, lookup_type)] = getattr( date, lookup_type ) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field_name, unique_for): opts = self._meta field = opts.get_field(field_name) return ValidationError( message=field.error_messages["unique_for_date"], code="unique_for_date", params={ "model": self, "model_name": capfirst(opts.verbose_name), "lookup_type": lookup_type, "field": field_name, "field_label": capfirst(field.verbose_name), "date_field": unique_for, "date_field_label": capfirst(opts.get_field(unique_for).verbose_name), }, ) def unique_error_message(self, model_class, unique_check): opts = model_class._meta params = { "model": self, "model_class": model_class, "model_name": capfirst(opts.verbose_name), "unique_check": unique_check, } # A unique field if len(unique_check) == 1: field = opts.get_field(unique_check[0]) params["field_label"] = capfirst(field.verbose_name) return ValidationError( message=field.error_messages["unique"], code="unique", params=params, ) # unique_together else: field_labels = [ capfirst(opts.get_field(f).verbose_name) for f in unique_check ] params["field_labels"] = get_text_list(field_labels, _("and")) return ValidationError( message=_("%(model_name)s with this %(field_labels)s already exists."), code="unique_together", params=params, ) def get_constraints(self): constraints = [(self.__class__, self._meta.constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.constraints: constraints.append((parent_class, parent_class._meta.constraints)) return constraints def validate_constraints(self, exclude=None): constraints = self.get_constraints() using = router.db_for_write(self.__class__, instance=self) errors = {} for model_class, model_constraints in constraints: for constraint in model_constraints: try: constraint.validate(model_class, self, exclude=exclude, using=using) except ValidationError as e: if e.code == "unique" and len(constraint.fields) == 1: errors.setdefault(constraint.fields[0], []).append(e) else: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def full_clean(self, exclude=None, validate_unique=True, validate_constraints=True): """ Call clean_fields(), clean(), validate_unique(), and validate_constraints() on the model. Raise a ValidationError for any errors that occur. """ errors = {} if exclude is None: exclude = set() else: exclude = set(exclude) try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. if validate_unique: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.add(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Run constraints checks, but only for fields that passed validation. if validate_constraints: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.add(name) try: self.validate_constraints(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Clean all fields and raise a ValidationError containing a dict of all validation errors if any occur. """ if exclude is None: exclude = set() errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in f.empty_values: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.error_list if errors: raise ValidationError(errors) @classmethod def check(cls, **kwargs): errors = [ *cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs), ] if not cls._meta.swapped: databases = kwargs.get("databases") or [] errors += [ *cls._check_fields(**kwargs), *cls._check_m2m_through_same_relationship(), *cls._check_long_column_names(databases), ] clash_errors = ( *cls._check_id_field(), *cls._check_field_name_clashes(), *cls._check_model_name_db_lookup_clashes(), *cls._check_property_name_related_field_accessor_clashes(), *cls._check_single_primary_key(), ) errors.extend(clash_errors) # If there are field name clashes, hide consequent column name # clashes. if not clash_errors: errors.extend(cls._check_column_name_clashes()) errors += [ *cls._check_index_together(), *cls._check_unique_together(), *cls._check_indexes(databases), *cls._check_ordering(), *cls._check_constraints(databases), *cls._check_default_pk(), ] return errors @classmethod def _check_default_pk(cls): if ( not cls._meta.abstract and cls._meta.pk.auto_created and # Inherited PKs are checked in parents models. not ( isinstance(cls._meta.pk, OneToOneField) and cls._meta.pk.remote_field.parent_link ) and not settings.is_overridden("DEFAULT_AUTO_FIELD") and cls._meta.app_config and not cls._meta.app_config._is_default_auto_field_overridden ): return [ checks.Warning( f"Auto-created primary key used when not defining a " f"primary key type, by default " f"'{settings.DEFAULT_AUTO_FIELD}'.", hint=( f"Configure the DEFAULT_AUTO_FIELD setting or the " f"{cls._meta.app_config.__class__.__qualname__}." f"default_auto_field attribute to point to a subclass " f"of AutoField, e.g. 'django.db.models.BigAutoField'." ), obj=cls, id="models.W042", ), ] return [] @classmethod def _check_swappable(cls): """Check if the swapped model exists.""" errors = [] if cls._meta.swapped: try: apps.get_model(cls._meta.swapped) except ValueError: errors.append( checks.Error( "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, id="models.E001", ) ) except LookupError: app_label, model_name = cls._meta.swapped.split(".") errors.append( checks.Error( "'%s' references '%s.%s', which has not been " "installed, or is abstract." % (cls._meta.swappable, app_label, model_name), id="models.E002", ) ) return errors @classmethod def _check_model(cls): errors = [] if cls._meta.proxy: if cls._meta.local_fields or cls._meta.local_many_to_many: errors.append( checks.Error( "Proxy model '%s' contains model fields." % cls.__name__, id="models.E017", ) ) return errors @classmethod def _check_managers(cls, **kwargs): """Perform all manager checks.""" errors = [] for manager in cls._meta.managers: errors.extend(manager.check(**kwargs)) return errors @classmethod def _check_fields(cls, **kwargs): """Perform all field checks.""" errors = [] for field in cls._meta.local_fields: errors.extend(field.check(**kwargs)) for field in cls._meta.local_many_to_many: errors.extend(field.check(from_model=cls, **kwargs)) return errors @classmethod def _check_m2m_through_same_relationship(cls): """Check if no relationship model is used by more than one m2m field.""" errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id="models.E003", ) ) else: seen_intermediary_signatures.append(signature) return errors @classmethod def _check_id_field(cls): """Check if `id` field is a primary key.""" fields = [ f for f in cls._meta.local_fields if f.name == "id" and f != cls._meta.pk ] # fields is empty or consists of the invalid "id" field if fields and not fields[0].primary_key and cls._meta.pk.name == "id": return [ checks.Error( "'id' can only be used as a field name if the field also " "sets 'primary_key=True'.", obj=cls, id="models.E004", ) ] else: return [] @classmethod def _check_field_name_clashes(cls): """Forbid field shadowing in multi-table inheritance.""" errors = [] used_fields = {} # name or attname -> field # Check that multi-inheritance doesn't cause field name shadowing. for parent in cls._meta.get_parent_list(): for f in parent._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None if clash: errors.append( checks.Error( "The field '%s' from parent model " "'%s' clashes with the field '%s' " "from parent model '%s'." % (clash.name, clash.model._meta, f.name, f.model._meta), obj=cls, id="models.E005", ) ) used_fields[f.name] = f used_fields[f.attname] = f # Check that fields defined in the model don't clash with fields from # parents, including auto-generated fields like multi-table inheritance # child accessors. for parent in cls._meta.get_parent_list(): for f in parent._meta.get_fields(): if f not in used_fields: used_fields[f.name] = f for f in cls._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None # Note that we may detect clash between user-defined non-unique # field "id" and automatically added unique field "id", both # defined at the same model. This special case is considered in # _check_id_field and here we ignore it. id_conflict = ( f.name == "id" and clash and clash.name == "id" and clash.model == cls ) if clash and not id_conflict: errors.append( checks.Error( "The field '%s' clashes with the field '%s' " "from model '%s'." % (f.name, clash.name, clash.model._meta), obj=f, id="models.E006", ) ) used_fields[f.name] = f used_fields[f.attname] = f return errors @classmethod def _check_column_name_clashes(cls): # Store a list of column names which have already been used by other fields. used_column_names = [] errors = [] for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Ensure the column name is not already in use. if column_name and column_name in used_column_names: errors.append( checks.Error( "Field '%s' has column name '%s' that is used by " "another field." % (f.name, column_name), hint="Specify a 'db_column' for the field.", obj=cls, id="models.E007", ) ) else: used_column_names.append(column_name) return errors @classmethod def _check_model_name_db_lookup_clashes(cls): errors = [] model_name = cls.__name__ if model_name.startswith("_") or model_name.endswith("_"): errors.append( checks.Error( "The model name '%s' cannot start or end with an underscore " "as it collides with the query lookup syntax." % model_name, obj=cls, id="models.E023", ) ) elif LOOKUP_SEP in model_name: errors.append( checks.Error( "The model name '%s' cannot contain double underscores as " "it collides with the query lookup syntax." % model_name, obj=cls, id="models.E024", ) ) return errors @classmethod def _check_property_name_related_field_accessor_clashes(cls): errors = [] property_names = cls._meta._property_names related_field_accessors = ( f.get_attname() for f in cls._meta._get_fields(reverse=False) if f.is_relation and f.related_model is not None ) for accessor in related_field_accessors: if accessor in property_names: errors.append( checks.Error( "The property '%s' clashes with a related field " "accessor." % accessor, obj=cls, id="models.E025", ) ) return errors @classmethod def _check_single_primary_key(cls): errors = [] if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: errors.append( checks.Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=cls, id="models.E026", ) ) return errors # RemovedInDjango51Warning. @classmethod def _check_index_together(cls): """Check the value of "index_together" option.""" if not isinstance(cls._meta.index_together, (tuple, list)): return [ checks.Error( "'index_together' must be a list or tuple.", obj=cls, id="models.E008", ) ] elif any( not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together ): return [ checks.Error( "All 'index_together' elements must be lists or tuples.", obj=cls, id="models.E009", ) ] else: errors = [] for fields in cls._meta.index_together: errors.extend(cls._check_local_fields(fields, "index_together")) return errors @classmethod def _check_unique_together(cls): """Check the value of "unique_together" option.""" if not isinstance(cls._meta.unique_together, (tuple, list)): return [ checks.Error( "'unique_together' must be a list or tuple.", obj=cls, id="models.E010", ) ] elif any( not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together ): return [ checks.Error( "All 'unique_together' elements must be lists or tuples.", obj=cls, id="models.E011", ) ] else: errors = [] for fields in cls._meta.unique_together: errors.extend(cls._check_local_fields(fields, "unique_together")) return errors @classmethod def _check_indexes(cls, databases): """Check fields, names, and conditions of indexes.""" errors = [] references = set() for index in cls._meta.indexes: # Index name can't start with an underscore or a number, restricted # for cross-database compatibility with Oracle. if index.name[0] == "_" or index.name[0].isdigit(): errors.append( checks.Error( "The index name '%s' cannot start with an underscore " "or a number." % index.name, obj=cls, id="models.E033", ), ) if len(index.name) > index.max_name_length: errors.append( checks.Error( "The index name '%s' cannot be longer than %d " "characters." % (index.name, index.max_name_length), obj=cls, id="models.E034", ), ) if index.contains_expressions: for expression in index.expressions: references.update( ref[0] for ref in cls._get_expr_references(expression) ) for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_partial_indexes or "supports_partial_indexes" in cls._meta.required_db_features ) and any(index.condition is not None for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes with conditions." % connection.display_name, hint=( "Conditions will be ignored. Silence this warning " "if you don't care about it." ), obj=cls, id="models.W037", ) ) if not ( connection.features.supports_covering_indexes or "supports_covering_indexes" in cls._meta.required_db_features ) and any(index.include for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes with non-key columns." % connection.display_name, hint=( "Non-key columns will be ignored. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W040", ) ) if not ( connection.features.supports_expression_indexes or "supports_expression_indexes" in cls._meta.required_db_features ) and any(index.contains_expressions for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes on expressions." % connection.display_name, hint=( "An index won't be created. Silence this warning " "if you don't care about it." ), obj=cls, id="models.W043", ) ) fields = [ field for index in cls._meta.indexes for field, _ in index.fields_orders ] fields += [include for index in cls._meta.indexes for include in index.include] fields += references errors.extend(cls._check_local_fields(fields, "indexes")) return errors @classmethod def _check_local_fields(cls, fields, option): from django.db import models # In order to avoid hitting the relation tree prematurely, we use our # own fields_map instead of using get_field() forward_fields_map = {} for field in cls._meta._get_fields(reverse=False): forward_fields_map[field.name] = field if hasattr(field, "attname"): forward_fields_map[field.attname] = field errors = [] for field_name in fields: try: field = forward_fields_map[field_name] except KeyError: errors.append( checks.Error( "'%s' refers to the nonexistent field '%s'." % ( option, field_name, ), obj=cls, id="models.E012", ) ) else: if isinstance(field.remote_field, models.ManyToManyRel): errors.append( checks.Error( "'%s' refers to a ManyToManyField '%s', but " "ManyToManyFields are not permitted in '%s'." % ( option, field_name, option, ), obj=cls, id="models.E013", ) ) elif field not in cls._meta.local_fields: errors.append( checks.Error( "'%s' refers to field '%s' which is not local to model " "'%s'." % (option, field_name, cls._meta.object_name), hint="This issue may be caused by multi-table inheritance.", obj=cls, id="models.E016", ) ) return errors @classmethod def _check_ordering(cls): """ Check "ordering" option -- is it a list of strings and do all fields exist? """ if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id="models.E021", ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by " "only one field).", obj=cls, id="models.E014", ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != "?") # Convert "-field" to "field". fields = ((f[1:] if f.startswith("-") else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: # pk is an alias that won't be found by opts.get_field. if part == "pk": fld = _cls._meta.pk else: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.path_infos[-1].to_opts.model else: _cls = None except (FieldDoesNotExist, AttributeError): if fld is None or ( fld.get_transform(part) is None and fld.get_lookup(part) is None ): errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id="models.E015", ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != "pk"} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set( chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) ) ) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id="models.E015", ) ) return errors @classmethod def _check_long_column_names(cls, databases): """ Check that any auto-generated column names are shorter than the limits for each database in which the model will be created. """ if not databases: return [] errors = [] allowed_len = None db_alias = None # Find the minimum max allowed length among all specified db_aliases. for db in databases: # skip databases where the model won't be created if not router.allow_migrate_model(db, cls): continue connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is None or connection.features.truncates_names: continue else: if allowed_len is None: allowed_len = max_name_length db_alias = db elif max_name_length < allowed_len: allowed_len = max_name_length db_alias = db if allowed_len is None: return errors for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Check if auto-generated name for the field is too long # for the database. if ( f.db_column is None and column_name is not None and len(column_name) > allowed_len ): errors.append( checks.Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (column_name, allowed_len, db_alias), hint="Set the column name manually using 'db_column'.", obj=cls, id="models.E018", ) ) for f in cls._meta.local_many_to_many: # Skip nonexistent models. if isinstance(f.remote_field.through, str): continue # Check if auto-generated name for the M2M field is too long # for the database. for m2m in f.remote_field.through._meta.local_fields: _, rel_name = m2m.get_attname_column() if ( m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len ): errors.append( checks.Error( "Autogenerated column name too long for M2M field " '"%s". Maximum length is "%s" for database "%s".' % (rel_name, allowed_len, db_alias), hint=( "Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'." ), obj=cls, id="models.E019", ) ) return errors @classmethod def _get_expr_references(cls, expr): if isinstance(expr, Q): for child in expr.children: if isinstance(child, tuple): lookup, value = child yield tuple(lookup.split(LOOKUP_SEP)) yield from cls._get_expr_references(value) else: yield from cls._get_expr_references(child) elif isinstance(expr, F): yield tuple(expr.name.split(LOOKUP_SEP)) elif hasattr(expr, "get_source_expressions"): for src_expr in expr.get_source_expressions(): yield from cls._get_expr_references(src_expr) @classmethod def _check_constraints(cls, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_table_check_constraints or "supports_table_check_constraints" in cls._meta.required_db_features ) and any( isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support check constraints." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W027", ) ) if not ( connection.features.supports_partial_indexes or "supports_partial_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.condition is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints with " "conditions." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W036", ) ) if not ( connection.features.supports_deferrable_unique_constraints or "supports_deferrable_unique_constraints" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.deferrable is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support deferrable unique constraints." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W038", ) ) if not ( connection.features.supports_covering_indexes or "supports_covering_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.include for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints with non-key " "columns." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W039", ) ) if not ( connection.features.supports_expression_indexes or "supports_expression_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.contains_expressions for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints on " "expressions." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W044", ) ) fields = set( chain.from_iterable( (*constraint.fields, *constraint.include) for constraint in cls._meta.constraints if isinstance(constraint, UniqueConstraint) ) ) references = set() for constraint in cls._meta.constraints: if isinstance(constraint, UniqueConstraint): if ( connection.features.supports_partial_indexes or "supports_partial_indexes" not in cls._meta.required_db_features ) and isinstance(constraint.condition, Q): references.update( cls._get_expr_references(constraint.condition) ) if ( connection.features.supports_expression_indexes or "supports_expression_indexes" not in cls._meta.required_db_features ) and constraint.contains_expressions: for expression in constraint.expressions: references.update(cls._get_expr_references(expression)) elif isinstance(constraint, CheckConstraint): if ( connection.features.supports_table_check_constraints or "supports_table_check_constraints" not in cls._meta.required_db_features ): if isinstance(constraint.check, Q): references.update( cls._get_expr_references(constraint.check) ) if any( isinstance(expr, RawSQL) for expr in constraint.check.flatten() ): errors.append( checks.Warning( f"Check constraint {constraint.name!r} contains " f"RawSQL() expression and won't be validated " f"during the model full_clean().", hint=( "Silence this warning if you don't care about " "it." ), obj=cls, id="models.W045", ), ) for field_name, *lookups in references: # pk is an alias that won't be found by opts.get_field. if field_name != "pk": fields.add(field_name) if not lookups: # If it has no lookups it cannot result in a JOIN. continue try: if field_name == "pk": field = cls._meta.pk else: field = cls._meta.get_field(field_name) if not field.is_relation or field.many_to_many or field.one_to_many: continue except FieldDoesNotExist: continue # JOIN must happen at the first lookup. first_lookup = lookups[0] if ( hasattr(field, "get_transform") and hasattr(field, "get_lookup") and field.get_transform(first_lookup) is None and field.get_lookup(first_lookup) is None ): errors.append( checks.Error( "'constraints' refers to the joined field '%s'." % LOOKUP_SEP.join([field_name] + lookups), obj=cls, id="models.E041", ) ) errors.extend(cls._check_local_fields(fields, "constraints")) return errors ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(self, ordered_obj, id_list, using=None): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update( [ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list)], ["_order"], ) def method_get_order(self, ordered_obj): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) pk_name = ordered_obj._meta.pk.name return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) def make_foreign_order_accessors(model, related_model): setattr( related_model, "get_%s_order" % model.__name__.lower(), partialmethod(method_get_order, model), ) setattr( related_model, "set_%s_order" % model.__name__.lower(), partialmethod(method_set_order, model), ) ######## # MISC # ######## def model_unpickle(model_id): """Used to unpickle Model subclasses with deferred fields.""" if isinstance(model_id, tuple): model = apps.get_model(*model_id) else: # Backwards compat - the model was cached directly in earlier versions. model = model_id return model.__new__(model) model_unpickle.__safe_for_unpickle__ = True
dde38fb8c9355dbc58ed4241fd03a83afcf3524b958a251335fd320dfdfdddd5
""" Various data structures used in query construction. Factored out from django.db.models.query to avoid making the main module very large and/or so that they can be used by other modules without getting into circular import difficulties. """ import functools import inspect import logging from collections import namedtuple from django.core.exceptions import FieldError from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections from django.db.models.constants import LOOKUP_SEP from django.utils import tree logger = logging.getLogger("django.db.models") # PathInfo is used when converting lookups (fk__somecol). The contents # describe the relation in Model terms (model Options and Fields for both # sides of the relation. The join_field is the field backing the relation. PathInfo = namedtuple( "PathInfo", "from_opts to_opts target_fields join_field m2m direct filtered_relation", ) def subclasses(cls): yield cls for subclass in cls.__subclasses__(): yield from subclasses(subclass) class Q(tree.Node): """ Encapsulate filters as objects that can then be combined logically (using `&` and `|`). """ # Connection types AND = "AND" OR = "OR" XOR = "XOR" default = AND conditional = True def __init__(self, *args, _connector=None, _negated=False, **kwargs): super().__init__( children=[*args, *sorted(kwargs.items())], connector=_connector, negated=_negated, ) def _combine(self, other, conn): if getattr(other, "conditional", False) is False: raise TypeError(other) if not self: return other.copy() if not other and isinstance(other, Q): return self.copy() obj = self.create(connector=conn) obj.add(self, conn) obj.add(other, conn) return obj def __or__(self, other): return self._combine(other, self.OR) def __and__(self, other): return self._combine(other, self.AND) def __xor__(self, other): return self._combine(other, self.XOR) def __invert__(self): obj = self.copy() obj.negate() return obj def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # We must promote any new joins to left outer joins so that when Q is # used as an expression, rows aren't filtered due to joins. clause, joins = query._add_q( self, reuse, allow_joins=allow_joins, split_subq=False, check_filterable=False, ) query.promote_joins(joins) return clause def flatten(self): """ Recursively yield this Q object and all subexpressions, in depth-first order. """ yield self for child in self.children: if isinstance(child, tuple): # Use the lookup. child = child[1] if hasattr(child, "flatten"): yield from child.flatten() else: yield child def check(self, against, using=DEFAULT_DB_ALIAS): """ Do a database query to check if the expressions of the Q instance matches against the expressions. """ # Avoid circular imports. from django.db.models import BooleanField, Value from django.db.models.functions import Coalesce from django.db.models.sql import Query from django.db.models.sql.constants import SINGLE query = Query(None) for name, value in against.items(): if not hasattr(value, "resolve_expression"): value = Value(value) query.add_annotation(value, name, select=False) query.add_annotation(Value(1), "_check") # This will raise a FieldError if a field is missing in "against". if connections[using].features.supports_comparing_boolean_expr: query.add_q(Q(Coalesce(self, True, output_field=BooleanField()))) else: query.add_q(self) compiler = query.get_compiler(using=using) try: return compiler.execute_sql(SINGLE) is not None except DatabaseError as e: logger.warning("Got a database error calling check() on %r: %s", self, e) return True def deconstruct(self): path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) if path.startswith("django.db.models.query_utils"): path = path.replace("django.db.models.query_utils", "django.db.models") args = tuple(self.children) kwargs = {} if self.connector != self.default: kwargs["_connector"] = self.connector if self.negated: kwargs["_negated"] = True return path, args, kwargs class DeferredAttribute: """ A wrapper for a deferred-loading field. When the value is read from this object the first time, the query is executed. """ def __init__(self, field): self.field = field def __get__(self, instance, cls=None): """ Retrieve and caches the value from the datastore on the first lookup. Return the cached value. """ if instance is None: return self data = instance.__dict__ field_name = self.field.attname if field_name not in data: # Let's see if the field is part of the parent chain. If so we # might be able to reuse the already loaded value. Refs #18343. val = self._check_parent_chain(instance) if val is None: instance.refresh_from_db(fields=[field_name]) else: data[field_name] = val return data[field_name] def _check_parent_chain(self, instance): """ Check if the field value can be fetched from a parent field already loaded in the instance. This can be done if the to-be fetched field is a primary key field. """ opts = instance._meta link_field = opts.get_ancestor_link(self.field.model) if self.field.primary_key and self.field != link_field: return getattr(instance, link_field.attname) return None class class_or_instance_method: """ Hook used in RegisterLookupMixin to return partial functions depending on the caller type (instance or class of models.Field). """ def __init__(self, class_method, instance_method): self.class_method = class_method self.instance_method = instance_method def __get__(self, instance, owner): if instance is None: return functools.partial(self.class_method, owner) return functools.partial(self.instance_method, instance) class RegisterLookupMixin: def _get_lookup(self, lookup_name): return self.get_lookups().get(lookup_name, None) @functools.lru_cache(maxsize=None) def get_class_lookups(cls): class_lookups = [ parent.__dict__.get("class_lookups", {}) for parent in inspect.getmro(cls) ] return cls.merge_dicts(class_lookups) def get_instance_lookups(self): class_lookups = self.get_class_lookups() if instance_lookups := getattr(self, "instance_lookups", None): return {**class_lookups, **instance_lookups} return class_lookups get_lookups = class_or_instance_method(get_class_lookups, get_instance_lookups) get_class_lookups = classmethod(get_class_lookups) def get_lookup(self, lookup_name): from django.db.models.lookups import Lookup found = self._get_lookup(lookup_name) if found is None and hasattr(self, "output_field"): return self.output_field.get_lookup(lookup_name) if found is not None and not issubclass(found, Lookup): return None return found def get_transform(self, lookup_name): from django.db.models.lookups import Transform found = self._get_lookup(lookup_name) if found is None and hasattr(self, "output_field"): return self.output_field.get_transform(lookup_name) if found is not None and not issubclass(found, Transform): return None return found @staticmethod def merge_dicts(dicts): """ Merge dicts in reverse to preference the order of the original list. e.g., merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'. """ merged = {} for d in reversed(dicts): merged.update(d) return merged @classmethod def _clear_cached_class_lookups(cls): for subclass in subclasses(cls): subclass.get_class_lookups.cache_clear() def register_class_lookup(cls, lookup, lookup_name=None): if lookup_name is None: lookup_name = lookup.lookup_name if "class_lookups" not in cls.__dict__: cls.class_lookups = {} cls.class_lookups[lookup_name] = lookup cls._clear_cached_class_lookups() return lookup def register_instance_lookup(self, lookup, lookup_name=None): if lookup_name is None: lookup_name = lookup.lookup_name if "instance_lookups" not in self.__dict__: self.instance_lookups = {} self.instance_lookups[lookup_name] = lookup return lookup register_lookup = class_or_instance_method( register_class_lookup, register_instance_lookup ) register_class_lookup = classmethod(register_class_lookup) def _unregister_class_lookup(cls, lookup, lookup_name=None): """ Remove given lookup from cls lookups. For use in tests only as it's not thread-safe. """ if lookup_name is None: lookup_name = lookup.lookup_name del cls.class_lookups[lookup_name] cls._clear_cached_class_lookups() def _unregister_instance_lookup(self, lookup, lookup_name=None): """ Remove given lookup from instance lookups. For use in tests only as it's not thread-safe. """ if lookup_name is None: lookup_name = lookup.lookup_name del self.instance_lookups[lookup_name] _unregister_lookup = class_or_instance_method( _unregister_class_lookup, _unregister_instance_lookup ) _unregister_class_lookup = classmethod(_unregister_class_lookup) def select_related_descend(field, restricted, requested, select_mask, reverse=False): """ Return True if this field should be used to descend deeper for select_related() purposes. Used by both the query construction code (compiler.get_related_selections()) and the model instance creation code (compiler.klass_info). Arguments: * field - the field to be checked * restricted - a boolean field, indicating if the field list has been manually restricted using a requested clause) * requested - The select_related() dictionary. * select_mask - the dictionary of selected fields. * reverse - boolean, True if we are checking a reverse select related """ if not field.remote_field: return False if field.remote_field.parent_link and not reverse: return False if restricted: if reverse and field.related_query_name() not in requested: return False if not reverse and field.name not in requested: return False if not restricted and field.null: return False if ( restricted and select_mask and field.name in requested and field not in select_mask ): raise FieldError( f"Field {field.model._meta.object_name}.{field.name} cannot be both " "deferred and traversed using select_related at the same time." ) return True def refs_expression(lookup_parts, annotations): """ Check if the lookup_parts contains references to the given annotations set. Because the LOOKUP_SEP is contained in the default annotation names, check each prefix of the lookup_parts for a match. """ for n in range(1, len(lookup_parts) + 1): level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n]) if level_n_lookup in annotations and annotations[level_n_lookup]: return annotations[level_n_lookup], lookup_parts[n:] return False, () def check_rel_lookup_compatibility(model, target_opts, field): """ Check that self.model is compatible with target_opts. Compatibility is OK if: 1) model and opts match (where proxy inheritance is removed) 2) model is parent of opts' model or the other way around """ def check(opts): return ( model._meta.concrete_model == opts.concrete_model or opts.concrete_model in model._meta.get_parent_list() or model in opts.get_parent_list() ) # If the field is a primary key, then doing a query against the field's # model is ok, too. Consider the case: # class Restaurant(models.Model): # place = OneToOneField(Place, primary_key=True): # Restaurant.objects.filter(pk__in=Restaurant.objects.all()). # If we didn't have the primary key check, then pk__in (== place__in) would # give Place's opts as the target opts, but Restaurant isn't compatible # with that. This logic applies only to primary keys, as when doing __in=qs, # we are going to turn this into __in=qs.values('pk') later on. return check(target_opts) or ( getattr(field, "primary_key", False) and check(field.model._meta) ) class FilteredRelation: """Specify custom filtering in the ON clause of SQL joins.""" def __init__(self, relation_name, *, condition=Q()): if not relation_name: raise ValueError("relation_name cannot be empty.") self.relation_name = relation_name self.alias = None if not isinstance(condition, Q): raise ValueError("condition argument must be a Q() instance.") self.condition = condition self.path = [] def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return ( self.relation_name == other.relation_name and self.alias == other.alias and self.condition == other.condition ) def clone(self): clone = FilteredRelation(self.relation_name, condition=self.condition) clone.alias = self.alias clone.path = self.path[:] return clone def resolve_expression(self, *args, **kwargs): """ QuerySet.annotate() only accepts expression-like arguments (with a resolve_expression() method). """ raise NotImplementedError("FilteredRelation.resolve_expression() is unused.") def as_sql(self, compiler, connection): # Resolve the condition in Join.filtered_relation. query = compiler.query where = query.build_filtered_relation_q(self.condition, reuse=set(self.path)) return compiler.compile(where)
f81e98d3700f678eb912cb1ce823e85709e54b6f9abfae6967e21f8524b0b0fa
import copy import datetime import functools import inspect import warnings from collections import defaultdict from decimal import Decimal from uuid import UUID from django.core.exceptions import EmptyResultSet, FieldError from django.db import DatabaseError, NotSupportedError, connection from django.db.models import fields from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import Q from django.utils.deconstruct import deconstructible from django.utils.deprecation import RemovedInDjango50Warning from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLiteNumericMixin: """ Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered. """ def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() == "DecimalField": sql = "CAST(%s AS NUMERIC)" % sql except FieldError: pass return sql, params class Combinable: """ Provide the ability to combine one or two objects with some connector. For example F('foo') + F('bar'). """ # Arithmetic connectors ADD = "+" SUB = "-" MUL = "*" DIV = "/" POW = "^" # The following is a quoted % operator - it is quoted because it can be # used in strings that also have parameter substitution. MOD = "%%" # Bitwise operators - note that these are generated by .bitand() # and .bitor(), the '&' and '|' are reserved for boolean operator # usage. BITAND = "&" BITOR = "|" BITLEFTSHIFT = "<<" BITRIGHTSHIFT = ">>" BITXOR = "#" def _combine(self, other, connector, reversed): if not hasattr(other, "resolve_expression"): # everything must be resolvable to an expression other = Value(other) if reversed: return CombinedExpression(other, connector, self) return CombinedExpression(self, connector, other) ############# # OPERATORS # ############# def __neg__(self): return self._combine(-1, self.MUL, False) def __add__(self, other): return self._combine(other, self.ADD, False) def __sub__(self, other): return self._combine(other, self.SUB, False) def __mul__(self, other): return self._combine(other, self.MUL, False) def __truediv__(self, other): return self._combine(other, self.DIV, False) def __mod__(self, other): return self._combine(other, self.MOD, False) def __pow__(self, other): return self._combine(other, self.POW, False) def __and__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) & Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitand(self, other): return self._combine(other, self.BITAND, False) def bitleftshift(self, other): return self._combine(other, self.BITLEFTSHIFT, False) def bitrightshift(self, other): return self._combine(other, self.BITRIGHTSHIFT, False) def __xor__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) ^ Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitxor(self, other): return self._combine(other, self.BITXOR, False) def __or__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) | Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitor(self, other): return self._combine(other, self.BITOR, False) def __radd__(self, other): return self._combine(other, self.ADD, True) def __rsub__(self, other): return self._combine(other, self.SUB, True) def __rmul__(self, other): return self._combine(other, self.MUL, True) def __rtruediv__(self, other): return self._combine(other, self.DIV, True) def __rmod__(self, other): return self._combine(other, self.MOD, True) def __rpow__(self, other): return self._combine(other, self.POW, True) def __rand__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def __ror__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def __rxor__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) class BaseExpression: """Base class for all query expressions.""" empty_result_set_value = NotImplemented # aggregate specific fields is_summary = False _output_field_resolved_to_none = False # Can the expression be used in a WHERE clause? filterable = True # Can the expression can be used as a source expression in Window? window_compatible = False def __init__(self, output_field=None): if output_field is not None: self.output_field = output_field def __getstate__(self): state = self.__dict__.copy() state.pop("convert_value", None) return state def get_db_converters(self, connection): return ( [] if self.convert_value is self._convert_value_noop else [self.convert_value] ) + self.output_field.get_db_converters(connection) def get_source_expressions(self): return [] def set_source_expressions(self, exprs): assert not exprs def _parse_expressions(self, *expressions): return [ arg if hasattr(arg, "resolve_expression") else (F(arg) if isinstance(arg, str) else Value(arg)) for arg in expressions ] def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()") @cached_property def contains_aggregate(self): return any( expr and expr.contains_aggregate for expr in self.get_source_expressions() ) @cached_property def contains_over_clause(self): return any( expr and expr.contains_over_clause for expr in self.get_source_expressions() ) @cached_property def contains_column_references(self): return any( expr and expr.contains_column_references for expr in self.get_source_expressions() ) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions( [ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ] ) return c @property def conditional(self): return isinstance(self.output_field, fields.BooleanField) @property def field(self): return self.output_field @cached_property def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError("Cannot resolve expression type, unknown output_field") return output_field @cached_property def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise def _resolve_output_field(self): """ Attempt to infer the output type of the expression. As a guess, if the output fields of all source fields match then simply infer the same type here. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ # This guess is mostly a bad idea, but there is quite a lot of code # (especially 3rd party Func subclasses) that depend on it, we'd need a # deprecation path to fix it. sources_iter = ( source for source in self.get_source_fields() if source is not None ) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( "Expression contains mixed types: %s, %s. You must " "set output_field." % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field @staticmethod def _convert_value_noop(value, expression, connection): return value @cached_property def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == "FloatField": return ( lambda value, expression, connection: None if value is None else float(value) ) elif internal_type.endswith("IntegerField"): return ( lambda value, expression, connection: None if value is None else int(value) ) elif internal_type == "DecimalField": return ( lambda value, expression, connection: None if value is None else Decimal(value) ) return self._convert_value_noop def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def get_transform(self, name): return self.output_field.get_transform(name) def relabeled_clone(self, change_map): clone = self.copy() clone.set_source_expressions( [ e.relabeled_clone(change_map) if e is not None else None for e in self.get_source_expressions() ] ) return clone def replace_expressions(self, replacements): if replacement := replacements.get(self): return replacement clone = self.copy() source_expressions = clone.get_source_expressions() clone.set_source_expressions( [ expr.replace_expressions(replacements) if expr else None for expr in source_expressions ] ) return clone def copy(self): return copy.copy(self) def prefix_references(self, prefix): clone = self.copy() clone.set_source_expressions( [ F(f"{prefix}{expr.name}") if isinstance(expr, F) else expr.prefix_references(prefix) for expr in self.get_source_expressions() ] ) return clone def get_group_by_cols(self): if not self.contains_aggregate: return [self] cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()] def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def reverse_ordering(self): return self def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: if hasattr(expr, "flatten"): yield from expr.flatten() else: yield expr def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle. """ if hasattr(self.output_field, "select_format"): return self.output_field.select_format(compiler, sql, params) return sql, params @deconstructible class Expression(BaseExpression, Combinable): """An expression that can be combined with other expressions.""" @cached_property def identity(self): constructor_signature = inspect.signature(self.__init__) args, kwargs = self._constructor_args signature = constructor_signature.bind_partial(*args, **kwargs) signature.apply_defaults() arguments = signature.arguments.items() identity = [self.__class__] for arg, value in arguments: if isinstance(value, fields.Field): if value.name and value.model: value = (value.model._meta.label, value.name) else: value = type(value) else: value = make_hashable(value) identity.append((arg, value)) return tuple(identity) def __eq__(self, other): if not isinstance(other, Expression): return NotImplemented return other.identity == self.identity def __hash__(self): return hash(self.identity) # Type inference for CombinedExpression.output_field. # Missing items will result in FieldError, by design. # # The current approach for NULL is based on lowest common denominator behavior # i.e. if one of the supported databases is raising an error (rather than # return NULL) for `val <op> NULL`, then Django raises FieldError. NoneType = type(None) _connector_combinations = [ # Numeric operations - operands of same type. { connector: [ (fields.IntegerField, fields.IntegerField, fields.IntegerField), (fields.FloatField, fields.FloatField, fields.FloatField), (fields.DecimalField, fields.DecimalField, fields.DecimalField), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, # Behavior for DIV with integer arguments follows Postgres/SQLite, # not MySQL/Oracle. Combinable.DIV, Combinable.MOD, Combinable.POW, ) }, # Numeric operations - operands of different type. { connector: [ (fields.IntegerField, fields.DecimalField, fields.DecimalField), (fields.DecimalField, fields.IntegerField, fields.DecimalField), (fields.IntegerField, fields.FloatField, fields.FloatField), (fields.FloatField, fields.IntegerField, fields.FloatField), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV, Combinable.MOD, ) }, # Bitwise operators. { connector: [ (fields.IntegerField, fields.IntegerField, fields.IntegerField), ] for connector in ( Combinable.BITAND, Combinable.BITOR, Combinable.BITLEFTSHIFT, Combinable.BITRIGHTSHIFT, Combinable.BITXOR, ) }, # Numeric with NULL. { connector: [ (field_type, NoneType, field_type), (NoneType, field_type, field_type), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV, Combinable.MOD, Combinable.POW, ) for field_type in (fields.IntegerField, fields.DecimalField, fields.FloatField) }, # Date/DateTimeField/DurationField/TimeField. { Combinable.ADD: [ # Date/DateTimeField. (fields.DateField, fields.DurationField, fields.DateTimeField), (fields.DateTimeField, fields.DurationField, fields.DateTimeField), (fields.DurationField, fields.DateField, fields.DateTimeField), (fields.DurationField, fields.DateTimeField, fields.DateTimeField), # DurationField. (fields.DurationField, fields.DurationField, fields.DurationField), # TimeField. (fields.TimeField, fields.DurationField, fields.TimeField), (fields.DurationField, fields.TimeField, fields.TimeField), ], }, { Combinable.SUB: [ # Date/DateTimeField. (fields.DateField, fields.DurationField, fields.DateTimeField), (fields.DateTimeField, fields.DurationField, fields.DateTimeField), (fields.DateField, fields.DateField, fields.DurationField), (fields.DateField, fields.DateTimeField, fields.DurationField), (fields.DateTimeField, fields.DateField, fields.DurationField), (fields.DateTimeField, fields.DateTimeField, fields.DurationField), # DurationField. (fields.DurationField, fields.DurationField, fields.DurationField), # TimeField. (fields.TimeField, fields.DurationField, fields.TimeField), (fields.TimeField, fields.TimeField, fields.DurationField), ], }, ] _connector_combinators = defaultdict(list) def register_combinable_fields(lhs, connector, rhs, result): """ Register combinable types: lhs <connector> rhs -> result e.g. register_combinable_fields( IntegerField, Combinable.ADD, FloatField, FloatField ) """ _connector_combinators[connector].append((lhs, rhs, result)) for d in _connector_combinations: for connector, field_types in d.items(): for lhs, rhs, result in field_types: register_combinable_fields(lhs, connector, rhs, result) @functools.lru_cache(maxsize=128) def _resolve_combined_type(connector, lhs_type, rhs_type): combinators = _connector_combinators.get(connector, ()) for combinator_lhs_type, combinator_rhs_type, combined_type in combinators: if issubclass(lhs_type, combinator_lhs_type) and issubclass( rhs_type, combinator_rhs_type ): return combined_type class CombinedExpression(SQLiteNumericMixin, Expression): def __init__(self, lhs, connector, rhs, output_field=None): super().__init__(output_field=output_field) self.connector = connector self.lhs = lhs self.rhs = rhs def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self) def __str__(self): return "{} {} {}".format(self.lhs, self.connector, self.rhs) def get_source_expressions(self): return [self.lhs, self.rhs] def set_source_expressions(self, exprs): self.lhs, self.rhs = exprs def _resolve_output_field(self): # We avoid using super() here for reasons given in # Expression._resolve_output_field() combined_type = _resolve_combined_type( self.connector, type(self.lhs._output_field_or_none), type(self.rhs._output_field_or_none), ) if combined_type is None: raise FieldError( f"Cannot infer type of {self.connector!r} expression involving these " f"types: {self.lhs.output_field.__class__.__name__}, " f"{self.rhs.output_field.__class__.__name__}. You must set " f"output_field." ) return combined_type() def as_sql(self, compiler, connection): expressions = [] expression_params = [] sql, params = compiler.compile(self.lhs) expressions.append(sql) expression_params.extend(params) sql, params = compiler.compile(self.rhs) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = "(%s)" sql = connection.ops.combine_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): lhs = self.lhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) rhs = self.rhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) if not isinstance(self, (DurationExpression, TemporalSubtraction)): try: lhs_type = lhs.output_field.get_internal_type() except (AttributeError, FieldError): lhs_type = None try: rhs_type = rhs.output_field.get_internal_type() except (AttributeError, FieldError): rhs_type = None if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type: return DurationExpression( self.lhs, self.connector, self.rhs ).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) datetime_fields = {"DateField", "DateTimeField", "TimeField"} if ( self.connector == self.SUB and lhs_type in datetime_fields and lhs_type == rhs_type ): return TemporalSubtraction(self.lhs, self.rhs).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) c = self.copy() c.is_summary = summarize c.lhs = lhs c.rhs = rhs return c class DurationExpression(CombinedExpression): def compile(self, side, compiler, connection): try: output = side.output_field except FieldError: pass else: if output.get_internal_type() == "DurationField": sql, params = compiler.compile(side) return connection.ops.format_for_duration_arithmetic(sql), params return compiler.compile(side) def as_sql(self, compiler, connection): if connection.features.has_native_duration_field: return super().as_sql(compiler, connection) connection.ops.check_expression_support(self) expressions = [] expression_params = [] sql, params = self.compile(self.lhs, compiler, connection) expressions.append(sql) expression_params.extend(params) sql, params = self.compile(self.rhs, compiler, connection) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = "(%s)" sql = connection.ops.combine_duration_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) if self.connector in {Combinable.MUL, Combinable.DIV}: try: lhs_type = self.lhs.output_field.get_internal_type() rhs_type = self.rhs.output_field.get_internal_type() except (AttributeError, FieldError): pass else: allowed_fields = { "DecimalField", "DurationField", "FloatField", "IntegerField", } if lhs_type not in allowed_fields or rhs_type not in allowed_fields: raise DatabaseError( f"Invalid arguments for operator {self.connector}." ) return sql, params class TemporalSubtraction(CombinedExpression): output_field = fields.DurationField() def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) lhs = compiler.compile(self.lhs) rhs = compiler.compile(self.rhs) return connection.ops.subtract_temporals( self.lhs.output_field.get_internal_type(), lhs, rhs ) @deconstructible(path="django.db.models.F") class F(Combinable): """An object capable of resolving references to existing query objects.""" def __init__(self, name): """ Arguments: * name: the name of the field this expression references """ self.name = name def __repr__(self): return "{}({})".format(self.__class__.__name__, self.name) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): return query.resolve_ref(self.name, allow_joins, reuse, summarize) def replace_expressions(self, replacements): return replacements.get(self, self) def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __hash__(self): return hash(self.name) class ResolvedOuterRef(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False contains_over_clause = False def as_sql(self, *args, **kwargs): raise ValueError( "This queryset contains a reference to an outer query and may " "only be used in a subquery." ) def resolve_expression(self, *args, **kwargs): col = super().resolve_expression(*args, **kwargs) # FIXME: Rename possibly_multivalued to multivalued and fix detection # for non-multivalued JOINs (e.g. foreign key fields). This should take # into account only many-to-many and one-to-many relationships. col.possibly_multivalued = LOOKUP_SEP in self.name return col def relabeled_clone(self, relabels): return self def get_group_by_cols(self): return [] class OuterRef(F): contains_aggregate = False def resolve_expression(self, *args, **kwargs): if isinstance(self.name, self.__class__): return self.name return ResolvedOuterRef(self.name) def relabeled_clone(self, relabels): return self @deconstructible(path="django.db.models.Func") class Func(SQLiteNumericMixin, Expression): """An SQL function call.""" function = None template = "%(function)s(%(expressions)s)" arg_joiner = ", " arity = None # The number of arguments the function accepts. def __init__(self, *expressions, output_field=None, **extra): if self.arity is not None and len(expressions) != self.arity: raise TypeError( "'%s' takes exactly %s %s (%s given)" % ( self.__class__.__name__, self.arity, "argument" if self.arity == 1 else "arguments", len(expressions), ) ) super().__init__(output_field=output_field) self.source_expressions = self._parse_expressions(*expressions) self.extra = extra def __repr__(self): args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) extra = {**self.extra, **self._get_repr_options()} if extra: extra = ", ".join( str(key) + "=" + str(val) for key, val in sorted(extra.items()) ) return "{}({}, {})".format(self.__class__.__name__, args, extra) return "{}({})".format(self.__class__.__name__, args) def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {} def get_source_expressions(self): return self.source_expressions def set_source_expressions(self, exprs): self.source_expressions = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize for pos, arg in enumerate(c.source_expressions): c.source_expressions[pos] = arg.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def as_sql( self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context, ): connection.ops.check_expression_support(self) sql_parts = [] params = [] for arg in self.source_expressions: try: arg_sql, arg_params = compiler.compile(arg) except EmptyResultSet: empty_result_set_value = getattr( arg, "empty_result_set_value", NotImplemented ) if empty_result_set_value is NotImplemented: raise arg_sql, arg_params = compiler.compile(Value(empty_result_set_value)) sql_parts.append(arg_sql) params.extend(arg_params) data = {**self.extra, **extra_context} # Use the first supplied value in this order: the parameter to this # method, a value supplied in __init__()'s **extra (the value in # `data`), or the value defined on the class. if function is not None: data["function"] = function else: data.setdefault("function", self.function) template = template or data.get("template", self.template) arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner) data["expressions"] = data["field"] = arg_joiner.join(sql_parts) return template % data, params def copy(self): copy = super().copy() copy.source_expressions = self.source_expressions[:] copy.extra = self.extra.copy() return copy @deconstructible(path="django.db.models.Value") class Value(SQLiteNumericMixin, Expression): """Represent a wrapped value as a node within an expression.""" # Provide a default value for `for_save` in order to allow unresolved # instances to be compiled until a decision is taken in #25425. for_save = False def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value def __repr__(self): return f"{self.__class__.__name__}({self.value!r})" def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) val = self.value output_field = self._output_field_or_none if output_field is not None: if self.for_save: val = output_field.get_db_prep_save(val, connection=connection) else: val = output_field.get_db_prep_value(val, connection=connection) if hasattr(output_field, "get_placeholder"): return output_field.get_placeholder(val, compiler, connection), [val] if val is None: # cx_Oracle does not always convert None to the appropriate # NULL type (like in case expressions using numbers), so we # use a literal SQL NULL return "NULL", [] return "%s", [val] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) c.for_save = for_save return c def get_group_by_cols(self): return [] def _resolve_output_field(self): if isinstance(self.value, str): return fields.CharField() if isinstance(self.value, bool): return fields.BooleanField() if isinstance(self.value, int): return fields.IntegerField() if isinstance(self.value, float): return fields.FloatField() if isinstance(self.value, datetime.datetime): return fields.DateTimeField() if isinstance(self.value, datetime.date): return fields.DateField() if isinstance(self.value, datetime.time): return fields.TimeField() if isinstance(self.value, datetime.timedelta): return fields.DurationField() if isinstance(self.value, Decimal): return fields.DecimalField() if isinstance(self.value, bytes): return fields.BinaryField() if isinstance(self.value, UUID): return fields.UUIDField() @property def empty_result_set_value(self): return self.value class RawSQL(Expression): def __init__(self, sql, params, output_field=None): if output_field is None: output_field = fields.Field() self.sql, self.params = sql, params super().__init__(output_field=output_field) def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) def as_sql(self, compiler, connection): return "(%s)" % self.sql, self.params def get_group_by_cols(self): return [self] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # Resolve parents fields used in raw SQL. if query.model: for parent in query.model._meta.get_parent_list(): for parent_field in parent._meta.local_fields: _, column_name = parent_field.get_attname_column() if column_name.lower() in self.sql.lower(): query.resolve_ref( parent_field.name, allow_joins, reuse, summarize ) break return super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) class Star(Expression): def __repr__(self): return "'*'" def as_sql(self, compiler, connection): return "*", [] class Col(Expression): contains_column_references = True possibly_multivalued = False def __init__(self, alias, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.alias, self.target = alias, target def __repr__(self): alias, target = self.alias, self.target identifiers = (alias, str(target)) if alias else (str(target),) return "{}({})".format(self.__class__.__name__, ", ".join(identifiers)) def as_sql(self, compiler, connection): alias, column = self.alias, self.target.column identifiers = (alias, column) if alias else (column,) sql = ".".join(map(compiler.quote_name_unless_alias, identifiers)) return sql, [] def relabeled_clone(self, relabels): if self.alias is None: return self return self.__class__( relabels.get(self.alias, self.alias), self.target, self.output_field ) def get_group_by_cols(self): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return self.output_field.get_db_converters( connection ) + self.target.get_db_converters(connection) class Ref(Expression): """ Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query. """ def __init__(self, refs, source): super().__init__() self.refs, self.source = refs, source def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) def get_source_expressions(self): return [self.source] def set_source_expressions(self, exprs): (self.source,) = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # The sub-expression `source` has already been resolved, as this is # just a reference to the name of `source`. return self def relabeled_clone(self, relabels): return self def as_sql(self, compiler, connection): return connection.ops.quote_name(self.refs), [] def get_group_by_cols(self): return [self] class ExpressionList(Func): """ An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like a partition clause. """ template = "%(expressions)s" def __init__(self, *expressions, **extra): if not expressions: raise ValueError( "%s requires at least one expression." % self.__class__.__name__ ) super().__init__(*expressions, **extra) def __str__(self): return self.arg_joiner.join(str(arg) for arg in self.source_expressions) def as_sqlite(self, compiler, connection, **extra_context): # Casting to numeric is unnecessary. return self.as_sql(compiler, connection, **extra_context) class OrderByList(Func): template = "ORDER BY %(expressions)s" def __init__(self, *expressions, **extra): expressions = ( ( OrderBy(F(expr[1:]), descending=True) if isinstance(expr, str) and expr[0] == "-" else expr ) for expr in expressions ) super().__init__(*expressions, **extra) def as_sql(self, *args, **kwargs): if not self.source_expressions: return "", () return super().as_sql(*args, **kwargs) def get_group_by_cols(self): group_by_cols = [] for order_by in self.get_source_expressions(): group_by_cols.extend(order_by.get_group_by_cols()) return group_by_cols @deconstructible(path="django.db.models.ExpressionWrapper") class ExpressionWrapper(SQLiteNumericMixin, Expression): """ An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field. """ def __init__(self, expression, output_field): super().__init__(output_field=output_field) self.expression = expression def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def get_group_by_cols(self): if isinstance(self.expression, Expression): expression = self.expression.copy() expression.output_field = self.output_field return expression.get_group_by_cols() # For non-expressions e.g. an SQL WHERE clause, the entire # `expression` must be included in the GROUP BY clause. return super().get_group_by_cols() def as_sql(self, compiler, connection): return compiler.compile(self.expression) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.expression) @deconstructible(path="django.db.models.When") class When(Expression): template = "WHEN %(condition)s THEN %(result)s" # This isn't a complete conditional expression, must be used in Case(). conditional = False def __init__(self, condition=None, then=None, **lookups): if lookups: if condition is None: condition, lookups = Q(**lookups), None elif getattr(condition, "conditional", False): condition, lookups = Q(condition, **lookups), None if condition is None or not getattr(condition, "conditional", False) or lookups: raise TypeError( "When() supports a Q object, a boolean expression, or lookups " "as a condition." ) if isinstance(condition, Q) and not condition: raise ValueError("An empty Q() can't be used as a When() condition.") super().__init__(output_field=None) self.condition = condition self.result = self._parse_expressions(then)[0] def __str__(self): return "WHEN %r THEN %r" % (self.condition, self.result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return [self.condition, self.result] def set_source_expressions(self, exprs): self.condition, self.result = exprs def get_source_fields(self): # We're only interested in the fields of the result expressions. return [self.result._output_field_or_none] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize if hasattr(c.condition, "resolve_expression"): c.condition = c.condition.resolve_expression( query, allow_joins, reuse, summarize, False ) c.result = c.result.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = extra_context sql_params = [] condition_sql, condition_params = compiler.compile(self.condition) # Filters that match everything are handled as empty strings in the # WHERE clause, but in a CASE WHEN expression they must use a predicate # that's always True. if condition_sql == "": if connection.features.supports_boolean_expr_in_select_clause: condition_sql, condition_params = compiler.compile(Value(True)) else: condition_sql, condition_params = "1=1", () template_params["condition"] = condition_sql result_sql, result_params = compiler.compile(self.result) template_params["result"] = result_sql template = template or self.template return template % template_params, ( *sql_params, *condition_params, *result_params, ) def get_group_by_cols(self): # This is not a complete expression and cannot be used in GROUP BY. cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols @deconstructible(path="django.db.models.Case") class Case(SQLiteNumericMixin, Expression): """ An SQL searched CASE expression: CASE WHEN n > 0 THEN 'positive' WHEN n < 0 THEN 'negative' ELSE 'zero' END """ template = "CASE %(cases)s ELSE %(default)s END" case_joiner = " " def __init__(self, *cases, default=None, output_field=None, **extra): if not all(isinstance(case, When) for case in cases): raise TypeError("Positional arguments must all be When objects.") super().__init__(output_field) self.cases = list(cases) self.default = self._parse_expressions(default)[0] self.extra = extra def __str__(self): return "CASE %s, ELSE %r" % ( ", ".join(str(c) for c in self.cases), self.default, ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return self.cases + [self.default] def set_source_expressions(self, exprs): *self.cases, self.default = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize for pos, case in enumerate(c.cases): c.cases[pos] = case.resolve_expression( query, allow_joins, reuse, summarize, for_save ) c.default = c.default.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def copy(self): c = super().copy() c.cases = c.cases[:] return c def as_sql( self, compiler, connection, template=None, case_joiner=None, **extra_context ): connection.ops.check_expression_support(self) if not self.cases: return compiler.compile(self.default) template_params = {**self.extra, **extra_context} case_parts = [] sql_params = [] for case in self.cases: try: case_sql, case_params = compiler.compile(case) except EmptyResultSet: continue case_parts.append(case_sql) sql_params.extend(case_params) default_sql, default_params = compiler.compile(self.default) if not case_parts: return default_sql, default_params case_joiner = case_joiner or self.case_joiner template_params["cases"] = case_joiner.join(case_parts) template_params["default"] = default_sql sql_params.extend(default_params) template = template or template_params.get("template", self.template) sql = template % template_params if self._output_field_or_none is not None: sql = connection.ops.unification_cast_sql(self.output_field) % sql return sql, sql_params def get_group_by_cols(self): if not self.cases: return self.default.get_group_by_cols() return super().get_group_by_cols() class Subquery(BaseExpression, Combinable): """ An explicit subquery. It may contain OuterRef() references to the outer query which will be resolved when it is applied to that query. """ template = "(%(subquery)s)" contains_aggregate = False empty_result_set_value = None def __init__(self, queryset, output_field=None, **extra): # Allow the usage of both QuerySet and sql.Query objects. self.query = getattr(queryset, "query", queryset).clone() self.query.subquery = True self.extra = extra super().__init__(output_field) def get_source_expressions(self): return [self.query] def set_source_expressions(self, exprs): self.query = exprs[0] def _resolve_output_field(self): return self.query.output_field def copy(self): clone = super().copy() clone.query = clone.query.clone() return clone @property def external_aliases(self): return self.query.external_aliases def get_external_cols(self): return self.query.get_external_cols() def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = {**self.extra, **extra_context} subquery_sql, sql_params = self.query.as_sql(compiler, connection) template_params["subquery"] = subquery_sql[1:-1] template = template or template_params.get("template", self.template) sql = template % template_params return sql, sql_params def get_group_by_cols(self): return self.query.get_group_by_cols(wrapper=self) class Exists(Subquery): template = "EXISTS(%(subquery)s)" output_field = fields.BooleanField() def __init__(self, queryset, negated=False, **kwargs): self.negated = negated super().__init__(queryset, **kwargs) self.query = self.query.exists() def __invert__(self): clone = self.copy() clone.negated = not self.negated return clone def as_sql(self, compiler, connection, **extra_context): try: sql, params = super().as_sql( compiler, connection, **extra_context, ) except EmptyResultSet: if self.negated: features = compiler.connection.features if not features.supports_boolean_expr_in_select_clause: return "1=1", () return compiler.compile(Value(True)) raise if self.negated: sql = "NOT {}".format(sql) return sql, params def select_format(self, compiler, sql, params): # Wrap EXISTS() with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql) return sql, params @deconstructible(path="django.db.models.OrderBy") class OrderBy(Expression): template = "%(expression)s %(ordering)s" conditional = False def __init__(self, expression, descending=False, nulls_first=None, nulls_last=None): if nulls_first and nulls_last: raise ValueError("nulls_first and nulls_last are mutually exclusive") if nulls_first is False or nulls_last is False: # When the deprecation ends, replace with: # raise ValueError( # "nulls_first and nulls_last values must be True or None." # ) warnings.warn( "Passing nulls_first=False or nulls_last=False is deprecated, use None " "instead.", RemovedInDjango50Warning, stacklevel=2, ) self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, "resolve_expression"): raise ValueError("expression must be an expression type") self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending ) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): template = template or self.template if connection.features.supports_order_by_nulls_modifier: if self.nulls_last: template = "%s NULLS LAST" % template elif self.nulls_first: template = "%s NULLS FIRST" % template else: if self.nulls_last and not ( self.descending and connection.features.order_by_nulls_first ): template = "%%(expression)s IS NULL, %s" % template elif self.nulls_first and not ( not self.descending and connection.features.order_by_nulls_first ): template = "%%(expression)s IS NOT NULL, %s" % template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { "expression": expression_sql, "ordering": "DESC" if self.descending else "ASC", **extra_context, } params *= template.count("%(expression)s") return (template % placeholders).rstrip(), params def as_oracle(self, compiler, connection): # Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped # in a CASE WHEN. if connection.ops.conditional_expression_supported_in_where_clause( self.expression ): copy = self.copy() copy.expression = Case( When(self.expression, then=True), default=False, ) return copy.as_sql(compiler, connection) return self.as_sql(compiler, connection) def get_group_by_cols(self): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first: self.nulls_last = True self.nulls_first = None elif self.nulls_last: self.nulls_first = True self.nulls_last = None return self def asc(self): self.descending = False def desc(self): self.descending = True class Window(SQLiteNumericMixin, Expression): template = "%(expression)s OVER (%(window)s)" # Although the main expression may either be an aggregate or an # expression with an aggregate function, the GROUP BY that will # be introduced in the query as a result is not desired. contains_aggregate = False contains_over_clause = True def __init__( self, expression, partition_by=None, order_by=None, frame=None, output_field=None, ): self.partition_by = partition_by self.order_by = order_by self.frame = frame if not getattr(expression, "window_compatible", False): raise ValueError( "Expression '%s' isn't compatible with OVER clauses." % expression.__class__.__name__ ) if self.partition_by is not None: if not isinstance(self.partition_by, (tuple, list)): self.partition_by = (self.partition_by,) self.partition_by = ExpressionList(*self.partition_by) if self.order_by is not None: if isinstance(self.order_by, (list, tuple)): self.order_by = OrderByList(*self.order_by) elif isinstance(self.order_by, (BaseExpression, str)): self.order_by = OrderByList(self.order_by) else: raise ValueError( "Window.order_by must be either a string reference to a " "field, an expression, or a list or tuple of them." ) super().__init__(output_field=output_field) self.source_expression = self._parse_expressions(expression)[0] def _resolve_output_field(self): return self.source_expression.output_field def get_source_expressions(self): return [self.source_expression, self.partition_by, self.order_by, self.frame] def set_source_expressions(self, exprs): self.source_expression, self.partition_by, self.order_by, self.frame = exprs def as_sql(self, compiler, connection, template=None): connection.ops.check_expression_support(self) if not connection.features.supports_over_clause: raise NotSupportedError("This backend does not support window expressions.") expr_sql, params = compiler.compile(self.source_expression) window_sql, window_params = [], () if self.partition_by is not None: sql_expr, sql_params = self.partition_by.as_sql( compiler=compiler, connection=connection, template="PARTITION BY %(expressions)s", ) window_sql.append(sql_expr) window_params += tuple(sql_params) if self.order_by is not None: order_sql, order_params = compiler.compile(self.order_by) window_sql.append(order_sql) window_params += tuple(order_params) if self.frame: frame_sql, frame_params = compiler.compile(self.frame) window_sql.append(frame_sql) window_params += tuple(frame_params) template = template or self.template return ( template % {"expression": expr_sql, "window": " ".join(window_sql).strip()}, (*params, *window_params), ) def as_sqlite(self, compiler, connection): if isinstance(self.output_field, fields.DecimalField): # Casting to numeric must be outside of the window expression. copy = self.copy() source_expressions = copy.get_source_expressions() source_expressions[0].output_field = fields.FloatField() copy.set_source_expressions(source_expressions) return super(Window, copy).as_sqlite(compiler, connection) return self.as_sql(compiler, connection) def __str__(self): return "{} OVER ({}{}{})".format( str(self.source_expression), "PARTITION BY " + str(self.partition_by) if self.partition_by else "", str(self.order_by or ""), str(self.frame or ""), ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_group_by_cols(self): group_by_cols = [] if self.partition_by: group_by_cols.extend(self.partition_by.get_group_by_cols()) if self.order_by is not None: group_by_cols.extend(self.order_by.get_group_by_cols()) return group_by_cols class WindowFrame(Expression): """ Model the frame clause in window expressions. There are two types of frame clauses which are subclasses, however, all processing and validation (by no means intended to be complete) is done here. Thus, providing an end for a frame is optional (the default is UNBOUNDED FOLLOWING, which is the last row in the frame). """ template = "%(frame_type)s BETWEEN %(start)s AND %(end)s" def __init__(self, start=None, end=None): self.start = Value(start) self.end = Value(end) def set_source_expressions(self, exprs): self.start, self.end = exprs def get_source_expressions(self): return [self.start, self.end] def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) start, end = self.window_frame_start_end( connection, self.start.value, self.end.value ) return ( self.template % { "frame_type": self.frame_type, "start": start, "end": end, }, [], ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_group_by_cols(self): return [] def __str__(self): if self.start.value is not None and self.start.value < 0: start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING) elif self.start.value is not None and self.start.value == 0: start = connection.ops.CURRENT_ROW else: start = connection.ops.UNBOUNDED_PRECEDING if self.end.value is not None and self.end.value > 0: end = "%d %s" % (self.end.value, connection.ops.FOLLOWING) elif self.end.value is not None and self.end.value == 0: end = connection.ops.CURRENT_ROW else: end = connection.ops.UNBOUNDED_FOLLOWING return self.template % { "frame_type": self.frame_type, "start": start, "end": end, } def window_frame_start_end(self, connection, start, end): raise NotImplementedError("Subclasses must implement window_frame_start_end().") class RowRange(WindowFrame): frame_type = "ROWS" def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_rows_start_end(start, end) class ValueRange(WindowFrame): frame_type = "RANGE" def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_range_start_end(start, end)
4a486c648cdbd28ac68593aec59a82fd1884110443549de767546b37e8945aa3
from collections import Counter, defaultdict from functools import partial, reduce from itertools import chain from operator import attrgetter, or_ from django.db import IntegrityError, connections, models, transaction from django.db.models import query_utils, signals, sql class ProtectedError(IntegrityError): def __init__(self, msg, protected_objects): self.protected_objects = protected_objects super().__init__(msg, protected_objects) class RestrictedError(IntegrityError): def __init__(self, msg, restricted_objects): self.restricted_objects = restricted_objects super().__init__(msg, restricted_objects) def CASCADE(collector, field, sub_objs, using): collector.collect( sub_objs, source=field.remote_field.model, source_attr=field.name, nullable=field.null, fail_on_restricted=False, ) if field.null and not connections[using].features.can_defer_constraint_checks: collector.add_field_update(field, None, sub_objs) def PROTECT(collector, field, sub_objs, using): raise ProtectedError( "Cannot delete some instances of model '%s' because they are " "referenced through a protected foreign key: '%s.%s'" % ( field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name, ), sub_objs, ) def RESTRICT(collector, field, sub_objs, using): collector.add_restricted_objects(field, sub_objs) collector.add_dependency(field.remote_field.model, field.model) def SET(value): if callable(value): def set_on_delete(collector, field, sub_objs, using): collector.add_field_update(field, value(), sub_objs) else: def set_on_delete(collector, field, sub_objs, using): collector.add_field_update(field, value, sub_objs) set_on_delete.deconstruct = lambda: ("django.db.models.SET", (value,), {}) set_on_delete.lazy_sub_objs = True return set_on_delete def SET_NULL(collector, field, sub_objs, using): collector.add_field_update(field, None, sub_objs) SET_NULL.lazy_sub_objs = True def SET_DEFAULT(collector, field, sub_objs, using): collector.add_field_update(field, field.get_default(), sub_objs) SET_DEFAULT.lazy_sub_objs = True def DO_NOTHING(collector, field, sub_objs, using): pass def get_candidate_relations_to_delete(opts): # The candidate relations are the ones that come from N-1 and 1-1 relations. # N-N (i.e., many-to-many) relations aren't candidates for deletion. return ( f for f in opts.get_fields(include_hidden=True) if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many) ) class Collector: def __init__(self, using, origin=None): self.using = using # A Model or QuerySet object. self.origin = origin # Initially, {model: {instances}}, later values become lists. self.data = defaultdict(set) # {(field, value): [instances, …]} self.field_updates = defaultdict(list) # {model: {field: {instances}}} self.restricted_objects = defaultdict(partial(defaultdict, set)) # fast_deletes is a list of queryset-likes that can be deleted without # fetching the objects into memory. self.fast_deletes = [] # Tracks deletion-order dependency for databases without transactions # or ability to defer constraint checks. Only concrete model classes # should be included, as the dependencies exist only between actual # database tables; proxy models are represented here by their concrete # parent. self.dependencies = defaultdict(set) # {model: {models}} def add(self, objs, source=None, nullable=False, reverse_dependency=False): """ Add 'objs' to the collection of objects to be deleted. If the call is the result of a cascade, 'source' should be the model that caused it, and 'nullable' should be set to True if the relation can be null. Return a list of all objects that were not already collected. """ if not objs: return [] new_objs = [] model = objs[0].__class__ instances = self.data[model] for obj in objs: if obj not in instances: new_objs.append(obj) instances.update(new_objs) # Nullable relationships can be ignored -- they are nulled out before # deleting, and therefore do not affect the order in which objects have # to be deleted. if source is not None and not nullable: self.add_dependency(source, model, reverse_dependency=reverse_dependency) return new_objs def add_dependency(self, model, dependency, reverse_dependency=False): if reverse_dependency: model, dependency = dependency, model self.dependencies[model._meta.concrete_model].add( dependency._meta.concrete_model ) self.data.setdefault(dependency, self.data.default_factory()) def add_field_update(self, field, value, objs): """ Schedule a field update. 'objs' must be a homogeneous iterable collection of model instances (e.g. a QuerySet). """ self.field_updates[field, value].append(objs) def add_restricted_objects(self, field, objs): if objs: model = objs[0].__class__ self.restricted_objects[model][field].update(objs) def clear_restricted_objects_from_set(self, model, objs): if model in self.restricted_objects: self.restricted_objects[model] = { field: items - objs for field, items in self.restricted_objects[model].items() } def clear_restricted_objects_from_queryset(self, model, qs): if model in self.restricted_objects: objs = set( qs.filter( pk__in=[ obj.pk for objs in self.restricted_objects[model].values() for obj in objs ] ) ) self.clear_restricted_objects_from_set(model, objs) def _has_signal_listeners(self, model): return signals.pre_delete.has_listeners( model ) or signals.post_delete.has_listeners(model) def can_fast_delete(self, objs, from_field=None): """ Determine if the objects in the given queryset-like or single object can be fast-deleted. This can be done if there are no cascades, no parents and no signal listeners for the object class. The 'from_field' tells where we are coming from - we need this to determine if the objects are in fact to be deleted. Allow also skipping parent -> child -> parent chain preventing fast delete of the child. """ if from_field and from_field.remote_field.on_delete is not CASCADE: return False if hasattr(objs, "_meta"): model = objs._meta.model elif hasattr(objs, "model") and hasattr(objs, "_raw_delete"): model = objs.model else: return False if self._has_signal_listeners(model): return False # The use of from_field comes from the need to avoid cascade back to # parent when parent delete is cascading to child. opts = model._meta return ( all( link == from_field for link in opts.concrete_model._meta.parents.values() ) and # Foreign keys pointing to this model. all( related.field.remote_field.on_delete is DO_NOTHING for related in get_candidate_relations_to_delete(opts) ) and ( # Something like generic foreign key. not any( hasattr(field, "bulk_related_objects") for field in opts.private_fields ) ) ) def get_del_batches(self, objs, fields): """ Return the objs in suitably sized batches for the used connection. """ field_names = [field.name for field in fields] conn_batch_size = max( connections[self.using].ops.bulk_batch_size(field_names, objs), 1 ) if len(objs) > conn_batch_size: return [ objs[i : i + conn_batch_size] for i in range(0, len(objs), conn_batch_size) ] else: return [objs] def collect( self, objs, source=None, nullable=False, collect_related=True, source_attr=None, reverse_dependency=False, keep_parents=False, fail_on_restricted=True, ): """ Add 'objs' to the collection of objects to be deleted as well as all parent instances. 'objs' must be a homogeneous iterable collection of model instances (e.g. a QuerySet). If 'collect_related' is True, related objects will be handled by their respective on_delete handler. If the call is the result of a cascade, 'source' should be the model that caused it and 'nullable' should be set to True, if the relation can be null. If 'reverse_dependency' is True, 'source' will be deleted before the current model, rather than after. (Needed for cascading to parent models, the one case in which the cascade follows the forwards direction of an FK rather than the reverse direction.) If 'keep_parents' is True, data of parent model's will be not deleted. If 'fail_on_restricted' is False, error won't be raised even if it's prohibited to delete such objects due to RESTRICT, that defers restricted object checking in recursive calls where the top-level call may need to collect more objects to determine whether restricted ones can be deleted. """ if self.can_fast_delete(objs): self.fast_deletes.append(objs) return new_objs = self.add( objs, source, nullable, reverse_dependency=reverse_dependency ) if not new_objs: return model = new_objs[0].__class__ if not keep_parents: # Recursively collect concrete model's parent models, but not their # related objects. These will be found by meta.get_fields() concrete_model = model._meta.concrete_model for ptr in concrete_model._meta.parents.values(): if ptr: parent_objs = [getattr(obj, ptr.name) for obj in new_objs] self.collect( parent_objs, source=model, source_attr=ptr.remote_field.related_name, collect_related=False, reverse_dependency=True, fail_on_restricted=False, ) if not collect_related: return if keep_parents: parents = set(model._meta.get_parent_list()) model_fast_deletes = defaultdict(list) protected_objects = defaultdict(list) for related in get_candidate_relations_to_delete(model._meta): # Preserve parent reverse relationships if keep_parents=True. if keep_parents and related.model in parents: continue field = related.field on_delete = field.remote_field.on_delete if on_delete == DO_NOTHING: continue related_model = related.related_model if self.can_fast_delete(related_model, from_field=field): model_fast_deletes[related_model].append(field) continue batches = self.get_del_batches(new_objs, [field]) for batch in batches: sub_objs = self.related_objects(related_model, [field], batch) # Non-referenced fields can be deferred if no signal receivers # are connected for the related model as they'll never be # exposed to the user. Skip field deferring when some # relationships are select_related as interactions between both # features are hard to get right. This should only happen in # the rare cases where .related_objects is overridden anyway. if not ( sub_objs.query.select_related or self._has_signal_listeners(related_model) ): referenced_fields = set( chain.from_iterable( (rf.attname for rf in rel.field.foreign_related_fields) for rel in get_candidate_relations_to_delete( related_model._meta ) ) ) sub_objs = sub_objs.only(*tuple(referenced_fields)) if getattr(on_delete, "lazy_sub_objs", False) or sub_objs: try: on_delete(self, field, sub_objs, self.using) except ProtectedError as error: key = "'%s.%s'" % (field.model.__name__, field.name) protected_objects[key] += error.protected_objects if protected_objects: raise ProtectedError( "Cannot delete some instances of model %r because they are " "referenced through protected foreign keys: %s." % ( model.__name__, ", ".join(protected_objects), ), set(chain.from_iterable(protected_objects.values())), ) for related_model, related_fields in model_fast_deletes.items(): batches = self.get_del_batches(new_objs, related_fields) for batch in batches: sub_objs = self.related_objects(related_model, related_fields, batch) self.fast_deletes.append(sub_objs) for field in model._meta.private_fields: if hasattr(field, "bulk_related_objects"): # It's something like generic foreign key. sub_objs = field.bulk_related_objects(new_objs, self.using) self.collect( sub_objs, source=model, nullable=True, fail_on_restricted=False ) if fail_on_restricted: # Raise an error if collected restricted objects (RESTRICT) aren't # candidates for deletion also collected via CASCADE. for related_model, instances in self.data.items(): self.clear_restricted_objects_from_set(related_model, instances) for qs in self.fast_deletes: self.clear_restricted_objects_from_queryset(qs.model, qs) if self.restricted_objects.values(): restricted_objects = defaultdict(list) for related_model, fields in self.restricted_objects.items(): for field, objs in fields.items(): if objs: key = "'%s.%s'" % (related_model.__name__, field.name) restricted_objects[key] += objs if restricted_objects: raise RestrictedError( "Cannot delete some instances of model %r because " "they are referenced through restricted foreign keys: " "%s." % ( model.__name__, ", ".join(restricted_objects), ), set(chain.from_iterable(restricted_objects.values())), ) def related_objects(self, related_model, related_fields, objs): """ Get a QuerySet of the related model to objs via related fields. """ predicate = query_utils.Q.create( [(f"{related_field.name}__in", objs) for related_field in related_fields], connector=query_utils.Q.OR, ) return related_model._base_manager.using(self.using).filter(predicate) def instances_with_model(self): for model, instances in self.data.items(): for obj in instances: yield model, obj def sort(self): sorted_models = [] concrete_models = set() models = list(self.data) while len(sorted_models) < len(models): found = False for model in models: if model in sorted_models: continue dependencies = self.dependencies.get(model._meta.concrete_model) if not (dependencies and dependencies.difference(concrete_models)): sorted_models.append(model) concrete_models.add(model._meta.concrete_model) found = True if not found: return self.data = {model: self.data[model] for model in sorted_models} def delete(self): # sort instance collections for model, instances in self.data.items(): self.data[model] = sorted(instances, key=attrgetter("pk")) # if possible, bring the models in an order suitable for databases that # don't support transactions or cannot defer constraint checks until the # end of a transaction. self.sort() # number of objects deleted for each model label deleted_counter = Counter() # Optimize for the case with a single obj and no dependencies if len(self.data) == 1 and len(instances) == 1: instance = list(instances)[0] if self.can_fast_delete(instance): with transaction.mark_for_rollback_on_error(self.using): count = sql.DeleteQuery(model).delete_batch( [instance.pk], self.using ) setattr(instance, model._meta.pk.attname, None) return count, {model._meta.label: count} with transaction.atomic(using=self.using, savepoint=False): # send pre_delete signals for model, obj in self.instances_with_model(): if not model._meta.auto_created: signals.pre_delete.send( sender=model, instance=obj, using=self.using, origin=self.origin, ) # fast deletes for qs in self.fast_deletes: count = qs._raw_delete(using=self.using) if count: deleted_counter[qs.model._meta.label] += count # update fields for (field, value), instances_list in self.field_updates.items(): updates = [] objs = [] for instances in instances_list: if ( isinstance(instances, models.QuerySet) and instances._result_cache is None ): updates.append(instances) else: objs.extend(instances) if updates: combined_updates = reduce(or_, updates) combined_updates.update(**{field.name: value}) if objs: model = objs[0].__class__ query = sql.UpdateQuery(model) query.update_batch( list({obj.pk for obj in objs}), {field.name: value}, self.using ) # reverse instance collections for instances in self.data.values(): instances.reverse() # delete instances for model, instances in self.data.items(): query = sql.DeleteQuery(model) pk_list = [obj.pk for obj in instances] count = query.delete_batch(pk_list, self.using) if count: deleted_counter[model._meta.label] += count if not model._meta.auto_created: for obj in instances: signals.post_delete.send( sender=model, instance=obj, using=self.using, origin=self.origin, ) for model, instances in self.data.items(): for instance in instances: setattr(instance, model._meta.pk.attname, None) return sum(deleted_counter.values()), dict(deleted_counter)
dddd55b01e56141a9da21ebf611e071395f5db47f85c8bb823dd00994d90529c
import itertools import math from django.core.exceptions import EmptyResultSet from django.db.models.expressions import Case, Expression, Func, Value, When from django.db.models.fields import ( BooleanField, CharField, DateTimeField, Field, IntegerField, UUIDField, ) from django.db.models.query_utils import RegisterLookupMixin from django.utils.datastructures import OrderedSet from django.utils.functional import cached_property from django.utils.hashable import make_hashable class Lookup(Expression): lookup_name = None prepare_rhs = True can_use_none_as_rhs = False def __init__(self, lhs, rhs): self.lhs, self.rhs = lhs, rhs self.rhs = self.get_prep_lookup() self.lhs = self.get_prep_lhs() if hasattr(self.lhs, "get_bilateral_transforms"): bilateral_transforms = self.lhs.get_bilateral_transforms() else: bilateral_transforms = [] if bilateral_transforms: # Warn the user as soon as possible if they are trying to apply # a bilateral transformation on a nested QuerySet: that won't work. from django.db.models.sql.query import Query # avoid circular import if isinstance(rhs, Query): raise NotImplementedError( "Bilateral transformations on nested querysets are not implemented." ) self.bilateral_transforms = bilateral_transforms def apply_bilateral_transforms(self, value): for transform in self.bilateral_transforms: value = transform(value) return value def __repr__(self): return f"{self.__class__.__name__}({self.lhs!r}, {self.rhs!r})" def batch_process_rhs(self, compiler, connection, rhs=None): if rhs is None: rhs = self.rhs if self.bilateral_transforms: sqls, sqls_params = [], [] for p in rhs: value = Value(p, output_field=self.lhs.output_field) value = self.apply_bilateral_transforms(value) value = value.resolve_expression(compiler.query) sql, sql_params = compiler.compile(value) sqls.append(sql) sqls_params.extend(sql_params) else: _, params = self.get_db_prep_lookup(rhs, connection) sqls, sqls_params = ["%s"] * len(params), params return sqls, sqls_params def get_source_expressions(self): if self.rhs_is_direct_value(): return [self.lhs] return [self.lhs, self.rhs] def set_source_expressions(self, new_exprs): if len(new_exprs) == 1: self.lhs = new_exprs[0] else: self.lhs, self.rhs = new_exprs def get_prep_lookup(self): if not self.prepare_rhs or hasattr(self.rhs, "resolve_expression"): return self.rhs if hasattr(self.lhs, "output_field"): if hasattr(self.lhs.output_field, "get_prep_value"): return self.lhs.output_field.get_prep_value(self.rhs) elif self.rhs_is_direct_value(): return Value(self.rhs) return self.rhs def get_prep_lhs(self): if hasattr(self.lhs, "resolve_expression"): return self.lhs return Value(self.lhs) def get_db_prep_lookup(self, value, connection): return ("%s", [value]) def process_lhs(self, compiler, connection, lhs=None): lhs = lhs or self.lhs if hasattr(lhs, "resolve_expression"): lhs = lhs.resolve_expression(compiler.query) sql, params = compiler.compile(lhs) if isinstance(lhs, Lookup): # Wrapped in parentheses to respect operator precedence. sql = f"({sql})" return sql, params def process_rhs(self, compiler, connection): value = self.rhs if self.bilateral_transforms: if self.rhs_is_direct_value(): # Do not call get_db_prep_lookup here as the value will be # transformed before being used for lookup value = Value(value, output_field=self.lhs.output_field) value = self.apply_bilateral_transforms(value) value = value.resolve_expression(compiler.query) if hasattr(value, "as_sql"): sql, params = compiler.compile(value) # Ensure expression is wrapped in parentheses to respect operator # precedence but avoid double wrapping as it can be misinterpreted # on some backends (e.g. subqueries on SQLite). if sql and sql[0] != "(": sql = "(%s)" % sql return sql, params else: return self.get_db_prep_lookup(value, connection) def rhs_is_direct_value(self): return not hasattr(self.rhs, "as_sql") def get_group_by_cols(self): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def as_oracle(self, compiler, connection): # Oracle doesn't allow EXISTS() and filters to be compared to another # expression unless they're wrapped in a CASE WHEN. wrapped = False exprs = [] for expr in (self.lhs, self.rhs): if connection.ops.conditional_expression_supported_in_where_clause(expr): expr = Case(When(expr, then=True), default=False) wrapped = True exprs.append(expr) lookup = type(self)(*exprs) if wrapped else self return lookup.as_sql(compiler, connection) @cached_property def output_field(self): return BooleanField() @property def identity(self): return self.__class__, self.lhs, self.rhs def __eq__(self, other): if not isinstance(other, Lookup): return NotImplemented return self.identity == other.identity def __hash__(self): return hash(make_hashable(self.identity)) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize c.lhs = self.lhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) if hasattr(self.rhs, "resolve_expression"): c.rhs = self.rhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def select_format(self, compiler, sql, params): # Wrap filters with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END" return sql, params class Transform(RegisterLookupMixin, Func): """ RegisterLookupMixin() is first so that get_lookup() and get_transform() first examine self and then check output_field. """ bilateral = False arity = 1 @property def lhs(self): return self.get_source_expressions()[0] def get_bilateral_transforms(self): if hasattr(self.lhs, "get_bilateral_transforms"): bilateral_transforms = self.lhs.get_bilateral_transforms() else: bilateral_transforms = [] if self.bilateral: bilateral_transforms.append(self.__class__) return bilateral_transforms class BuiltinLookup(Lookup): def process_lhs(self, compiler, connection, lhs=None): lhs_sql, params = super().process_lhs(compiler, connection, lhs) field_internal_type = self.lhs.output_field.get_internal_type() db_type = self.lhs.output_field.db_type(connection=connection) lhs_sql = connection.ops.field_cast_sql(db_type, field_internal_type) % lhs_sql lhs_sql = ( connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql ) return lhs_sql, list(params) def as_sql(self, compiler, connection): lhs_sql, params = self.process_lhs(compiler, connection) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) rhs_sql = self.get_rhs_op(connection, rhs_sql) return "%s %s" % (lhs_sql, rhs_sql), params def get_rhs_op(self, connection, rhs): return connection.operators[self.lookup_name] % rhs class FieldGetDbPrepValueMixin: """ Some lookups require Field.get_db_prep_value() to be called on their inputs. """ get_db_prep_lookup_value_is_iterable = False def get_db_prep_lookup(self, value, connection): # For relational fields, use the 'target_field' attribute of the # output_field. field = getattr(self.lhs.output_field, "target_field", None) get_db_prep_value = ( getattr(field, "get_db_prep_value", None) or self.lhs.output_field.get_db_prep_value ) return ( "%s", [get_db_prep_value(v, connection, prepared=True) for v in value] if self.get_db_prep_lookup_value_is_iterable else [get_db_prep_value(value, connection, prepared=True)], ) class FieldGetDbPrepValueIterableMixin(FieldGetDbPrepValueMixin): """ Some lookups require Field.get_db_prep_value() to be called on each value in an iterable. """ get_db_prep_lookup_value_is_iterable = True def get_prep_lookup(self): if hasattr(self.rhs, "resolve_expression"): return self.rhs prepared_values = [] for rhs_value in self.rhs: if hasattr(rhs_value, "resolve_expression"): # An expression will be handled by the database but can coexist # alongside real values. pass elif self.prepare_rhs and hasattr(self.lhs.output_field, "get_prep_value"): rhs_value = self.lhs.output_field.get_prep_value(rhs_value) prepared_values.append(rhs_value) return prepared_values def process_rhs(self, compiler, connection): if self.rhs_is_direct_value(): # rhs should be an iterable of values. Use batch_process_rhs() # to prepare/transform those values. return self.batch_process_rhs(compiler, connection) else: return super().process_rhs(compiler, connection) def resolve_expression_parameter(self, compiler, connection, sql, param): params = [param] if hasattr(param, "resolve_expression"): param = param.resolve_expression(compiler.query) if hasattr(param, "as_sql"): sql, params = compiler.compile(param) return sql, params def batch_process_rhs(self, compiler, connection, rhs=None): pre_processed = super().batch_process_rhs(compiler, connection, rhs) # The params list may contain expressions which compile to a # sql/param pair. Zip them to get sql and param pairs that refer to the # same argument and attempt to replace them with the result of # compiling the param step. sql, params = zip( *( self.resolve_expression_parameter(compiler, connection, sql, param) for sql, param in zip(*pre_processed) ) ) params = itertools.chain.from_iterable(params) return sql, tuple(params) class PostgresOperatorLookup(FieldGetDbPrepValueMixin, Lookup): """Lookup defined by operators on PostgreSQL.""" postgres_operator = None def as_postgresql(self, compiler, connection): lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(lhs_params) + tuple(rhs_params) return "%s %s %s" % (lhs, self.postgres_operator, rhs), params @Field.register_lookup class Exact(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = "exact" def get_prep_lookup(self): from django.db.models.sql.query import Query # avoid circular import if isinstance(self.rhs, Query): if self.rhs.has_limit_one(): if not self.rhs.has_select_fields: self.rhs.clear_select_clause() self.rhs.add_fields(["pk"]) else: raise ValueError( "The QuerySet value for an exact lookup must be limited to " "one result using slicing." ) return super().get_prep_lookup() def as_sql(self, compiler, connection): # Avoid comparison against direct rhs if lhs is a boolean value. That # turns "boolfield__exact=True" into "WHERE boolean_field" instead of # "WHERE boolean_field = True" when allowed. if ( isinstance(self.rhs, bool) and getattr(self.lhs, "conditional", False) and connection.ops.conditional_expression_supported_in_where_clause( self.lhs ) ): lhs_sql, params = self.process_lhs(compiler, connection) template = "%s" if self.rhs else "NOT %s" return template % lhs_sql, params return super().as_sql(compiler, connection) @Field.register_lookup class IExact(BuiltinLookup): lookup_name = "iexact" prepare_rhs = False def process_rhs(self, qn, connection): rhs, params = super().process_rhs(qn, connection) if params: params[0] = connection.ops.prep_for_iexact_query(params[0]) return rhs, params @Field.register_lookup class GreaterThan(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = "gt" @Field.register_lookup class GreaterThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = "gte" @Field.register_lookup class LessThan(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = "lt" @Field.register_lookup class LessThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = "lte" class IntegerFieldFloatRounding: """ Allow floats to work as query values for IntegerField. Without this, the decimal portion of the float would always be discarded. """ def get_prep_lookup(self): if isinstance(self.rhs, float): self.rhs = math.ceil(self.rhs) return super().get_prep_lookup() @IntegerField.register_lookup class IntegerGreaterThanOrEqual(IntegerFieldFloatRounding, GreaterThanOrEqual): pass @IntegerField.register_lookup class IntegerLessThan(IntegerFieldFloatRounding, LessThan): pass @Field.register_lookup class In(FieldGetDbPrepValueIterableMixin, BuiltinLookup): lookup_name = "in" def get_prep_lookup(self): from django.db.models.sql.query import Query # avoid circular import if isinstance(self.rhs, Query): self.rhs.clear_ordering(clear_default=True) if not self.rhs.has_select_fields: self.rhs.clear_select_clause() self.rhs.add_fields(["pk"]) return super().get_prep_lookup() def process_rhs(self, compiler, connection): db_rhs = getattr(self.rhs, "_db", None) if db_rhs is not None and db_rhs != connection.alias: raise ValueError( "Subqueries aren't allowed across different databases. Force " "the inner query to be evaluated using `list(inner_query)`." ) if self.rhs_is_direct_value(): # Remove None from the list as NULL is never equal to anything. try: rhs = OrderedSet(self.rhs) rhs.discard(None) except TypeError: # Unhashable items in self.rhs rhs = [r for r in self.rhs if r is not None] if not rhs: raise EmptyResultSet # rhs should be an iterable; use batch_process_rhs() to # prepare/transform those values. sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs) placeholder = "(" + ", ".join(sqls) + ")" return (placeholder, sqls_params) return super().process_rhs(compiler, connection) def get_rhs_op(self, connection, rhs): return "IN %s" % rhs def as_sql(self, compiler, connection): max_in_list_size = connection.ops.max_in_list_size() if ( self.rhs_is_direct_value() and max_in_list_size and len(self.rhs) > max_in_list_size ): return self.split_parameter_list_as_sql(compiler, connection) return super().as_sql(compiler, connection) def split_parameter_list_as_sql(self, compiler, connection): # This is a special case for databases which limit the number of # elements which can appear in an 'IN' clause. max_in_list_size = connection.ops.max_in_list_size() lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.batch_process_rhs(compiler, connection) in_clause_elements = ["("] params = [] for offset in range(0, len(rhs_params), max_in_list_size): if offset > 0: in_clause_elements.append(" OR ") in_clause_elements.append("%s IN (" % lhs) params.extend(lhs_params) sqls = rhs[offset : offset + max_in_list_size] sqls_params = rhs_params[offset : offset + max_in_list_size] param_group = ", ".join(sqls) in_clause_elements.append(param_group) in_clause_elements.append(")") params.extend(sqls_params) in_clause_elements.append(")") return "".join(in_clause_elements), params class PatternLookup(BuiltinLookup): param_pattern = "%%%s%%" prepare_rhs = False def get_rhs_op(self, connection, rhs): # Assume we are in startswith. We need to produce SQL like: # col LIKE %s, ['thevalue%'] # For python values we can (and should) do that directly in Python, # but if the value is for example reference to other column, then # we need to add the % pattern match to the lookup by something like # col LIKE othercol || '%%' # So, for Python values we don't need any special pattern, but for # SQL reference values or SQL transformations we need the correct # pattern added. if hasattr(self.rhs, "as_sql") or self.bilateral_transforms: pattern = connection.pattern_ops[self.lookup_name].format( connection.pattern_esc ) return pattern.format(rhs) else: return super().get_rhs_op(connection, rhs) def process_rhs(self, qn, connection): rhs, params = super().process_rhs(qn, connection) if self.rhs_is_direct_value() and params and not self.bilateral_transforms: params[0] = self.param_pattern % connection.ops.prep_for_like_query( params[0] ) return rhs, params @Field.register_lookup class Contains(PatternLookup): lookup_name = "contains" @Field.register_lookup class IContains(Contains): lookup_name = "icontains" @Field.register_lookup class StartsWith(PatternLookup): lookup_name = "startswith" param_pattern = "%s%%" @Field.register_lookup class IStartsWith(StartsWith): lookup_name = "istartswith" @Field.register_lookup class EndsWith(PatternLookup): lookup_name = "endswith" param_pattern = "%%%s" @Field.register_lookup class IEndsWith(EndsWith): lookup_name = "iendswith" @Field.register_lookup class Range(FieldGetDbPrepValueIterableMixin, BuiltinLookup): lookup_name = "range" def get_rhs_op(self, connection, rhs): return "BETWEEN %s AND %s" % (rhs[0], rhs[1]) @Field.register_lookup class IsNull(BuiltinLookup): lookup_name = "isnull" prepare_rhs = False def as_sql(self, compiler, connection): if not isinstance(self.rhs, bool): raise ValueError( "The QuerySet value for an isnull lookup must be True or False." ) sql, params = compiler.compile(self.lhs) if self.rhs: return "%s IS NULL" % sql, params else: return "%s IS NOT NULL" % sql, params @Field.register_lookup class Regex(BuiltinLookup): lookup_name = "regex" prepare_rhs = False def as_sql(self, compiler, connection): if self.lookup_name in connection.operators: return super().as_sql(compiler, connection) else: lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) sql_template = connection.ops.regex_lookup(self.lookup_name) return sql_template % (lhs, rhs), lhs_params + rhs_params @Field.register_lookup class IRegex(Regex): lookup_name = "iregex" class YearLookup(Lookup): def year_lookup_bounds(self, connection, year): from django.db.models.functions import ExtractIsoYear iso_year = isinstance(self.lhs, ExtractIsoYear) output_field = self.lhs.lhs.output_field if isinstance(output_field, DateTimeField): bounds = connection.ops.year_lookup_bounds_for_datetime_field( year, iso_year=iso_year, ) else: bounds = connection.ops.year_lookup_bounds_for_date_field( year, iso_year=iso_year, ) return bounds def as_sql(self, compiler, connection): # Avoid the extract operation if the rhs is a direct value to allow # indexes to be used. if self.rhs_is_direct_value(): # Skip the extract part by directly using the originating field, # that is self.lhs.lhs. lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs) rhs_sql, _ = self.process_rhs(compiler, connection) rhs_sql = self.get_direct_rhs_sql(connection, rhs_sql) start, finish = self.year_lookup_bounds(connection, self.rhs) params.extend(self.get_bound_params(start, finish)) return "%s %s" % (lhs_sql, rhs_sql), params return super().as_sql(compiler, connection) def get_direct_rhs_sql(self, connection, rhs): return connection.operators[self.lookup_name] % rhs def get_bound_params(self, start, finish): raise NotImplementedError( "subclasses of YearLookup must provide a get_bound_params() method" ) class YearExact(YearLookup, Exact): def get_direct_rhs_sql(self, connection, rhs): return "BETWEEN %s AND %s" def get_bound_params(self, start, finish): return (start, finish) class YearGt(YearLookup, GreaterThan): def get_bound_params(self, start, finish): return (finish,) class YearGte(YearLookup, GreaterThanOrEqual): def get_bound_params(self, start, finish): return (start,) class YearLt(YearLookup, LessThan): def get_bound_params(self, start, finish): return (start,) class YearLte(YearLookup, LessThanOrEqual): def get_bound_params(self, start, finish): return (finish,) class UUIDTextMixin: """ Strip hyphens from a value when filtering a UUIDField on backends without a native datatype for UUID. """ def process_rhs(self, qn, connection): if not connection.features.has_native_uuid_field: from django.db.models.functions import Replace if self.rhs_is_direct_value(): self.rhs = Value(self.rhs) self.rhs = Replace( self.rhs, Value("-"), Value(""), output_field=CharField() ) rhs, params = super().process_rhs(qn, connection) return rhs, params @UUIDField.register_lookup class UUIDIExact(UUIDTextMixin, IExact): pass @UUIDField.register_lookup class UUIDContains(UUIDTextMixin, Contains): pass @UUIDField.register_lookup class UUIDIContains(UUIDTextMixin, IContains): pass @UUIDField.register_lookup class UUIDStartsWith(UUIDTextMixin, StartsWith): pass @UUIDField.register_lookup class UUIDIStartsWith(UUIDTextMixin, IStartsWith): pass @UUIDField.register_lookup class UUIDEndsWith(UUIDTextMixin, EndsWith): pass @UUIDField.register_lookup class UUIDIEndsWith(UUIDTextMixin, IEndsWith): pass
95d6b8ff215c1227dac6ca6c14d3e518e3c98d40284c76a97a2d9a3be12d10ce
from enum import Enum from django.core.exceptions import FieldError, ValidationError from django.db import connections from django.db.models.expressions import Exists, ExpressionList, F from django.db.models.indexes import IndexExpression from django.db.models.lookups import Exact from django.db.models.query_utils import Q from django.db.models.sql.query import Query from django.db.utils import DEFAULT_DB_ALIAS from django.utils.translation import gettext_lazy as _ __all__ = ["BaseConstraint", "CheckConstraint", "Deferrable", "UniqueConstraint"] class BaseConstraint: default_violation_error_message = _("Constraint “%(name)s” is violated.") violation_error_message = None def __init__(self, name, violation_error_message=None): self.name = name if violation_error_message is not None: self.violation_error_message = violation_error_message else: self.violation_error_message = self.default_violation_error_message @property def contains_expressions(self): return False def constraint_sql(self, model, schema_editor): raise NotImplementedError("This method must be implemented by a subclass.") def create_sql(self, model, schema_editor): raise NotImplementedError("This method must be implemented by a subclass.") def remove_sql(self, model, schema_editor): raise NotImplementedError("This method must be implemented by a subclass.") def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS): raise NotImplementedError("This method must be implemented by a subclass.") def get_violation_error_message(self): return self.violation_error_message % {"name": self.name} def deconstruct(self): path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) path = path.replace("django.db.models.constraints", "django.db.models") kwargs = {"name": self.name} if ( self.violation_error_message is not None and self.violation_error_message != self.default_violation_error_message ): kwargs["violation_error_message"] = self.violation_error_message return (path, (), kwargs) def clone(self): _, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) class CheckConstraint(BaseConstraint): def __init__(self, *, check, name, violation_error_message=None): self.check = check if not getattr(check, "conditional", False): raise TypeError( "CheckConstraint.check must be a Q instance or boolean expression." ) super().__init__(name, violation_error_message=violation_error_message) def _get_check_sql(self, model, schema_editor): query = Query(model=model, alias_cols=False) where = query.build_where(self.check) compiler = query.get_compiler(connection=schema_editor.connection) sql, params = where.as_sql(compiler, schema_editor.connection) return sql % tuple(schema_editor.quote_value(p) for p in params) def constraint_sql(self, model, schema_editor): check = self._get_check_sql(model, schema_editor) return schema_editor._check_sql(self.name, check) def create_sql(self, model, schema_editor): check = self._get_check_sql(model, schema_editor) return schema_editor._create_check_sql(model, self.name, check) def remove_sql(self, model, schema_editor): return schema_editor._delete_check_sql(model, self.name) def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS): against = instance._get_field_value_map(meta=model._meta, exclude=exclude) try: if not Q(self.check).check(against, using=using): raise ValidationError(self.get_violation_error_message()) except FieldError: pass def __repr__(self): return "<%s: check=%s name=%s>" % ( self.__class__.__qualname__, self.check, repr(self.name), ) def __eq__(self, other): if isinstance(other, CheckConstraint): return ( self.name == other.name and self.check == other.check and self.violation_error_message == other.violation_error_message ) return super().__eq__(other) def deconstruct(self): path, args, kwargs = super().deconstruct() kwargs["check"] = self.check return path, args, kwargs class Deferrable(Enum): DEFERRED = "deferred" IMMEDIATE = "immediate" # A similar format was proposed for Python 3.10. def __repr__(self): return f"{self.__class__.__qualname__}.{self._name_}" class UniqueConstraint(BaseConstraint): def __init__( self, *expressions, fields=(), name=None, condition=None, deferrable=None, include=None, opclasses=(), violation_error_message=None, ): if not name: raise ValueError("A unique constraint must be named.") if not expressions and not fields: raise ValueError( "At least one field or expression is required to define a " "unique constraint." ) if expressions and fields: raise ValueError( "UniqueConstraint.fields and expressions are mutually exclusive." ) if not isinstance(condition, (type(None), Q)): raise ValueError("UniqueConstraint.condition must be a Q instance.") if condition and deferrable: raise ValueError("UniqueConstraint with conditions cannot be deferred.") if include and deferrable: raise ValueError("UniqueConstraint with include fields cannot be deferred.") if opclasses and deferrable: raise ValueError("UniqueConstraint with opclasses cannot be deferred.") if expressions and deferrable: raise ValueError("UniqueConstraint with expressions cannot be deferred.") if expressions and opclasses: raise ValueError( "UniqueConstraint.opclasses cannot be used with expressions. " "Use django.contrib.postgres.indexes.OpClass() instead." ) if not isinstance(deferrable, (type(None), Deferrable)): raise ValueError( "UniqueConstraint.deferrable must be a Deferrable instance." ) if not isinstance(include, (type(None), list, tuple)): raise ValueError("UniqueConstraint.include must be a list or tuple.") if not isinstance(opclasses, (list, tuple)): raise ValueError("UniqueConstraint.opclasses must be a list or tuple.") if opclasses and len(fields) != len(opclasses): raise ValueError( "UniqueConstraint.fields and UniqueConstraint.opclasses must " "have the same number of elements." ) self.fields = tuple(fields) self.condition = condition self.deferrable = deferrable self.include = tuple(include) if include else () self.opclasses = opclasses self.expressions = tuple( F(expression) if isinstance(expression, str) else expression for expression in expressions ) super().__init__(name, violation_error_message=violation_error_message) @property def contains_expressions(self): return bool(self.expressions) def _get_condition_sql(self, model, schema_editor): if self.condition is None: return None query = Query(model=model, alias_cols=False) where = query.build_where(self.condition) compiler = query.get_compiler(connection=schema_editor.connection) sql, params = where.as_sql(compiler, schema_editor.connection) return sql % tuple(schema_editor.quote_value(p) for p in params) def _get_index_expressions(self, model, schema_editor): if not self.expressions: return None index_expressions = [] for expression in self.expressions: index_expression = IndexExpression(expression) index_expression.set_wrapper_classes(schema_editor.connection) index_expressions.append(index_expression) return ExpressionList(*index_expressions).resolve_expression( Query(model, alias_cols=False), ) def constraint_sql(self, model, schema_editor): fields = [model._meta.get_field(field_name) for field_name in self.fields] include = [ model._meta.get_field(field_name).column for field_name in self.include ] condition = self._get_condition_sql(model, schema_editor) expressions = self._get_index_expressions(model, schema_editor) return schema_editor._unique_sql( model, fields, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, expressions=expressions, ) def create_sql(self, model, schema_editor): fields = [model._meta.get_field(field_name) for field_name in self.fields] include = [ model._meta.get_field(field_name).column for field_name in self.include ] condition = self._get_condition_sql(model, schema_editor) expressions = self._get_index_expressions(model, schema_editor) return schema_editor._create_unique_sql( model, fields, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, expressions=expressions, ) def remove_sql(self, model, schema_editor): condition = self._get_condition_sql(model, schema_editor) include = [ model._meta.get_field(field_name).column for field_name in self.include ] expressions = self._get_index_expressions(model, schema_editor) return schema_editor._delete_unique_sql( model, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, expressions=expressions, ) def __repr__(self): return "<%s:%s%s%s%s%s%s%s>" % ( self.__class__.__qualname__, "" if not self.fields else " fields=%s" % repr(self.fields), "" if not self.expressions else " expressions=%s" % repr(self.expressions), " name=%s" % repr(self.name), "" if self.condition is None else " condition=%s" % self.condition, "" if self.deferrable is None else " deferrable=%r" % self.deferrable, "" if not self.include else " include=%s" % repr(self.include), "" if not self.opclasses else " opclasses=%s" % repr(self.opclasses), ) def __eq__(self, other): if isinstance(other, UniqueConstraint): return ( self.name == other.name and self.fields == other.fields and self.condition == other.condition and self.deferrable == other.deferrable and self.include == other.include and self.opclasses == other.opclasses and self.expressions == other.expressions and self.violation_error_message == other.violation_error_message ) return super().__eq__(other) def deconstruct(self): path, args, kwargs = super().deconstruct() if self.fields: kwargs["fields"] = self.fields if self.condition: kwargs["condition"] = self.condition if self.deferrable: kwargs["deferrable"] = self.deferrable if self.include: kwargs["include"] = self.include if self.opclasses: kwargs["opclasses"] = self.opclasses return path, self.expressions, kwargs def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS): queryset = model._default_manager.using(using) if self.fields: lookup_kwargs = {} for field_name in self.fields: if exclude and field_name in exclude: return field = model._meta.get_field(field_name) lookup_value = getattr(instance, field.attname) if lookup_value is None or ( lookup_value == "" and connections[using].features.interprets_empty_strings_as_nulls ): # A composite constraint containing NULL value cannot cause # a violation since NULL != NULL in SQL. return lookup_kwargs[field.name] = lookup_value queryset = queryset.filter(**lookup_kwargs) else: # Ignore constraints with excluded fields. if exclude: for expression in self.expressions: if hasattr(expression, "flatten"): for expr in expression.flatten(): if isinstance(expr, F) and expr.name in exclude: return elif isinstance(expression, F) and expression.name in exclude: return replacements = { F(field): value for field, value in instance._get_field_value_map( meta=model._meta, exclude=exclude ).items() } expressions = [ Exact(expr, expr.replace_expressions(replacements)) for expr in self.expressions ] queryset = queryset.filter(*expressions) model_class_pk = instance._get_pk_val(model._meta) if not instance._state.adding and model_class_pk is not None: queryset = queryset.exclude(pk=model_class_pk) if not self.condition: if queryset.exists(): if self.expressions: raise ValidationError(self.get_violation_error_message()) # When fields are defined, use the unique_error_message() for # backward compatibility. for model, constraints in instance.get_constraints(): for constraint in constraints: if constraint is self: raise ValidationError( instance.unique_error_message(model, self.fields) ) else: against = instance._get_field_value_map(meta=model._meta, exclude=exclude) try: if (self.condition & Exists(queryset.filter(self.condition))).check( against, using=using ): raise ValidationError(self.get_violation_error_message()) except FieldError: pass
6723f039c36f874980c328c7750f428e0cd1ddded65a70ebbe20527d771fbd89
from django.db import models from django.db.migrations.operations.base import Operation from django.db.migrations.state import ModelState from django.db.migrations.utils import field_references, resolve_relation from django.db.models.options import normalize_together from django.utils.functional import cached_property from .fields import AddField, AlterField, FieldOperation, RemoveField, RenameField def _check_for_duplicates(arg_name, objs): used_vals = set() for val in objs: if val in used_vals: raise ValueError( "Found duplicate value %s in CreateModel %s argument." % (val, arg_name) ) used_vals.add(val) class ModelOperation(Operation): def __init__(self, name): self.name = name @cached_property def name_lower(self): return self.name.lower() def references_model(self, name, app_label): return name.lower() == self.name_lower def reduce(self, operation, app_label): return super().reduce(operation, app_label) or self.can_reduce_through( operation, app_label ) def can_reduce_through(self, operation, app_label): return not operation.references_model(self.name, app_label) class CreateModel(ModelOperation): """Create a model's table.""" serialization_expand_args = ["fields", "options", "managers"] def __init__(self, name, fields, options=None, bases=None, managers=None): self.fields = fields self.options = options or {} self.bases = bases or (models.Model,) self.managers = managers or [] super().__init__(name) # Sanity-check that there are no duplicated field names, bases, or # manager names _check_for_duplicates("fields", (name for name, _ in self.fields)) _check_for_duplicates( "bases", ( base._meta.label_lower if hasattr(base, "_meta") else base.lower() if isinstance(base, str) else base for base in self.bases ), ) _check_for_duplicates("managers", (name for name, _ in self.managers)) def deconstruct(self): kwargs = { "name": self.name, "fields": self.fields, } if self.options: kwargs["options"] = self.options if self.bases and self.bases != (models.Model,): kwargs["bases"] = self.bases if self.managers and self.managers != [("objects", models.Manager())]: kwargs["managers"] = self.managers return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.add_model( ModelState( app_label, self.name, list(self.fields), dict(self.options), tuple(self.bases), list(self.managers), ) ) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def describe(self): return "Create %smodel %s" % ( "proxy " if self.options.get("proxy", False) else "", self.name, ) @property def migration_name_fragment(self): return self.name_lower def references_model(self, name, app_label): name_lower = name.lower() if name_lower == self.name_lower: return True # Check we didn't inherit from the model reference_model_tuple = (app_label, name_lower) for base in self.bases: if ( base is not models.Model and isinstance(base, (models.base.ModelBase, str)) and resolve_relation(base, app_label) == reference_model_tuple ): return True # Check we have no FKs/M2Ms with it for _name, field in self.fields: if field_references( (app_label, self.name_lower), field, reference_model_tuple ): return True return False def reduce(self, operation, app_label): if ( isinstance(operation, DeleteModel) and self.name_lower == operation.name_lower and not self.options.get("proxy", False) ): return [] elif ( isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower ): return [ CreateModel( operation.new_name, fields=self.fields, options=self.options, bases=self.bases, managers=self.managers, ), ] elif ( isinstance(operation, AlterModelOptions) and self.name_lower == operation.name_lower ): options = {**self.options, **operation.options} for key in operation.ALTER_OPTION_KEYS: if key not in operation.options: options.pop(key, None) return [ CreateModel( self.name, fields=self.fields, options=options, bases=self.bases, managers=self.managers, ), ] elif ( isinstance(operation, AlterModelManagers) and self.name_lower == operation.name_lower ): return [ CreateModel( self.name, fields=self.fields, options=self.options, bases=self.bases, managers=operation.managers, ), ] elif ( isinstance(operation, AlterTogetherOptionOperation) and self.name_lower == operation.name_lower ): return [ CreateModel( self.name, fields=self.fields, options={ **self.options, **{operation.option_name: operation.option_value}, }, bases=self.bases, managers=self.managers, ), ] elif ( isinstance(operation, AlterOrderWithRespectTo) and self.name_lower == operation.name_lower ): return [ CreateModel( self.name, fields=self.fields, options={ **self.options, "order_with_respect_to": operation.order_with_respect_to, }, bases=self.bases, managers=self.managers, ), ] elif ( isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower ): if isinstance(operation, AddField): return [ CreateModel( self.name, fields=self.fields + [(operation.name, operation.field)], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, AlterField): return [ CreateModel( self.name, fields=[ (n, operation.field if n == operation.name else v) for n, v in self.fields ], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RemoveField): options = self.options.copy() for option_name in ("unique_together", "index_together"): option = options.pop(option_name, None) if option: option = set( filter( bool, ( tuple( f for f in fields if f != operation.name_lower ) for fields in option ), ) ) if option: options[option_name] = option order_with_respect_to = options.get("order_with_respect_to") if order_with_respect_to == operation.name_lower: del options["order_with_respect_to"] return [ CreateModel( self.name, fields=[ (n, v) for n, v in self.fields if n.lower() != operation.name_lower ], options=options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RenameField): options = self.options.copy() for option_name in ("unique_together", "index_together"): option = options.get(option_name) if option: options[option_name] = { tuple( operation.new_name if f == operation.old_name else f for f in fields ) for fields in option } order_with_respect_to = options.get("order_with_respect_to") if order_with_respect_to == operation.old_name: options["order_with_respect_to"] = operation.new_name return [ CreateModel( self.name, fields=[ (operation.new_name if n == operation.old_name else n, v) for n, v in self.fields ], options=options, bases=self.bases, managers=self.managers, ), ] return super().reduce(operation, app_label) class DeleteModel(ModelOperation): """Drop a model's table.""" def deconstruct(self): kwargs = { "name": self.name, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.remove_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def references_model(self, name, app_label): # The deleted model could be referencing the specified model through # related fields. return True def describe(self): return "Delete model %s" % self.name @property def migration_name_fragment(self): return "delete_%s" % self.name_lower class RenameModel(ModelOperation): """Rename a model.""" def __init__(self, old_name, new_name): self.old_name = old_name self.new_name = new_name super().__init__(old_name) @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { "old_name": self.old_name, "new_name": self.new_name, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.rename_model(app_label, self.old_name, self.new_name) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.new_name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.old_name) # Move the main table schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Alter the fields pointing to us for related_object in old_model._meta.related_objects: if related_object.related_model == old_model: model = new_model related_key = (app_label, self.new_name_lower) else: model = related_object.related_model related_key = ( related_object.related_model._meta.app_label, related_object.related_model._meta.model_name, ) to_field = to_state.apps.get_model(*related_key)._meta.get_field( related_object.field.name ) schema_editor.alter_field( model, related_object.field, to_field, ) # Rename M2M fields whose name is based on this model's name. fields = zip( old_model._meta.local_many_to_many, new_model._meta.local_many_to_many ) for (old_field, new_field) in fields: # Skip self-referential fields as these are renamed above. if ( new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created ): continue # Rename the M2M table that's based on this model's name. old_m2m_model = old_field.remote_field.through new_m2m_model = new_field.remote_field.through schema_editor.alter_db_table( new_m2m_model, old_m2m_model._meta.db_table, new_m2m_model._meta.db_table, ) # Rename the column in the M2M table that's based on this # model's name. schema_editor.alter_field( new_m2m_model, old_m2m_model._meta.get_field(old_model._meta.model_name), new_m2m_model._meta.get_field(new_model._meta.model_name), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.new_name_lower, self.old_name_lower = ( self.old_name_lower, self.new_name_lower, ) self.new_name, self.old_name = self.old_name, self.new_name self.database_forwards(app_label, schema_editor, from_state, to_state) self.new_name_lower, self.old_name_lower = ( self.old_name_lower, self.new_name_lower, ) self.new_name, self.old_name = self.old_name, self.new_name def references_model(self, name, app_label): return ( name.lower() == self.old_name_lower or name.lower() == self.new_name_lower ) def describe(self): return "Rename model %s to %s" % (self.old_name, self.new_name) @property def migration_name_fragment(self): return "rename_%s_%s" % (self.old_name_lower, self.new_name_lower) def reduce(self, operation, app_label): if ( isinstance(operation, RenameModel) and self.new_name_lower == operation.old_name_lower ): return [ RenameModel( self.old_name, operation.new_name, ), ] # Skip `ModelOperation.reduce` as we want to run `references_model` # against self.new_name. return super(ModelOperation, self).reduce( operation, app_label ) or not operation.references_model(self.new_name, app_label) class ModelOptionOperation(ModelOperation): def reduce(self, operation, app_label): if ( isinstance(operation, (self.__class__, DeleteModel)) and self.name_lower == operation.name_lower ): return [operation] return super().reduce(operation, app_label) class AlterModelTable(ModelOptionOperation): """Rename a model's table.""" def __init__(self, name, table): self.table = table super().__init__(name) def deconstruct(self): kwargs = { "name": self.name, "table": self.table, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.alter_model_options(app_label, self.name_lower, {"db_table": self.table}) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Rename M2M fields whose name is based on this model's db_table for (old_field, new_field) in zip( old_model._meta.local_many_to_many, new_model._meta.local_many_to_many ): if new_field.remote_field.through._meta.auto_created: schema_editor.alter_db_table( new_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def describe(self): return "Rename table for %s to %s" % ( self.name, self.table if self.table is not None else "(default)", ) @property def migration_name_fragment(self): return "alter_%s_table" % self.name_lower class AlterTogetherOptionOperation(ModelOptionOperation): option_name = None def __init__(self, name, option_value): if option_value: option_value = set(normalize_together(option_value)) setattr(self, self.option_name, option_value) super().__init__(name) @cached_property def option_value(self): return getattr(self, self.option_name) def deconstruct(self): kwargs = { "name": self.name, self.option_name: self.option_value, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.alter_model_options( app_label, self.name_lower, {self.option_name: self.option_value}, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) alter_together = getattr(schema_editor, "alter_%s" % self.option_name) alter_together( new_model, getattr(old_model._meta, self.option_name, set()), getattr(new_model._meta, self.option_name, set()), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label): return self.references_model(model_name, app_label) and ( not self.option_value or any((name in fields) for fields in self.option_value) ) def describe(self): return "Alter %s for %s (%s constraint(s))" % ( self.option_name, self.name, len(self.option_value or ""), ) @property def migration_name_fragment(self): return "alter_%s_%s" % (self.name_lower, self.option_name) def can_reduce_through(self, operation, app_label): return super().can_reduce_through(operation, app_label) or ( isinstance(operation, AlterTogetherOptionOperation) and type(operation) is not type(self) ) class AlterUniqueTogether(AlterTogetherOptionOperation): """ Change the value of unique_together to the target one. Input value of unique_together must be a set of tuples. """ option_name = "unique_together" def __init__(self, name, unique_together): super().__init__(name, unique_together) class AlterIndexTogether(AlterTogetherOptionOperation): """ Change the value of index_together to the target one. Input value of index_together must be a set of tuples. """ option_name = "index_together" def __init__(self, name, index_together): super().__init__(name, index_together) class AlterOrderWithRespectTo(ModelOptionOperation): """Represent a change with the order_with_respect_to option.""" option_name = "order_with_respect_to" def __init__(self, name, order_with_respect_to): self.order_with_respect_to = order_with_respect_to super().__init__(name) def deconstruct(self): kwargs = { "name": self.name, "order_with_respect_to": self.order_with_respect_to, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.alter_model_options( app_label, self.name_lower, {self.option_name: self.order_with_respect_to}, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.name) # Remove a field if we need to if ( from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to ): schema_editor.remove_field( from_model, from_model._meta.get_field("_order") ) # Add a field if we need to (altering the column is untouched as # it's likely a rename) elif ( to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to ): field = to_model._meta.get_field("_order") if not field.has_default(): field.default = 0 schema_editor.add_field( from_model, field, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label): return self.references_model(model_name, app_label) and ( self.order_with_respect_to is None or name == self.order_with_respect_to ) def describe(self): return "Set order_with_respect_to on %s to %s" % ( self.name, self.order_with_respect_to, ) @property def migration_name_fragment(self): return "alter_%s_order_with_respect_to" % self.name_lower class AlterModelOptions(ModelOptionOperation): """ Set new model options that don't directly affect the database schema (like verbose_name, permissions, ordering). Python code in migrations may still need them. """ # Model options we want to compare and preserve in an AlterModelOptions op ALTER_OPTION_KEYS = [ "base_manager_name", "default_manager_name", "default_related_name", "get_latest_by", "managed", "ordering", "permissions", "default_permissions", "select_on_save", "verbose_name", "verbose_name_plural", ] def __init__(self, name, options): self.options = options super().__init__(name) def deconstruct(self): kwargs = { "name": self.name, "options": self.options, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.alter_model_options( app_label, self.name_lower, self.options, self.ALTER_OPTION_KEYS, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def describe(self): return "Change Meta options on %s" % self.name @property def migration_name_fragment(self): return "alter_%s_options" % self.name_lower class AlterModelManagers(ModelOptionOperation): """Alter the model's managers.""" serialization_expand_args = ["managers"] def __init__(self, name, managers): self.managers = managers super().__init__(name) def deconstruct(self): return (self.__class__.__qualname__, [self.name, self.managers], {}) def state_forwards(self, app_label, state): state.alter_model_managers(app_label, self.name_lower, self.managers) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def describe(self): return "Change managers on %s" % self.name @property def migration_name_fragment(self): return "alter_%s_managers" % self.name_lower class IndexOperation(Operation): option_name = "indexes" @cached_property def model_name_lower(self): return self.model_name.lower() class AddIndex(IndexOperation): """Add an index on a model.""" def __init__(self, model_name, index): self.model_name = model_name if not index.name: raise ValueError( "Indexes passed to AddIndex operations require a name " "argument. %r doesn't have one." % index ) self.index = index def state_forwards(self, app_label, state): state.add_index(app_label, self.model_name_lower, self.index) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.add_index(model, self.index) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.remove_index(model, self.index) def deconstruct(self): kwargs = { "model_name": self.model_name, "index": self.index, } return ( self.__class__.__qualname__, [], kwargs, ) def describe(self): if self.index.expressions: return "Create index %s on %s on model %s" % ( self.index.name, ", ".join([str(expression) for expression in self.index.expressions]), self.model_name, ) return "Create index %s on field(s) %s of model %s" % ( self.index.name, ", ".join(self.index.fields), self.model_name, ) @property def migration_name_fragment(self): return "%s_%s" % (self.model_name_lower, self.index.name.lower()) class RemoveIndex(IndexOperation): """Remove an index from a model.""" def __init__(self, model_name, name): self.model_name = model_name self.name = name def state_forwards(self, app_label, state): state.remove_index(app_label, self.model_name_lower, self.name) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): from_model_state = from_state.models[app_label, self.model_name_lower] index = from_model_state.get_index_by_name(self.name) schema_editor.remove_index(model, index) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): to_model_state = to_state.models[app_label, self.model_name_lower] index = to_model_state.get_index_by_name(self.name) schema_editor.add_index(model, index) def deconstruct(self): kwargs = { "model_name": self.model_name, "name": self.name, } return ( self.__class__.__qualname__, [], kwargs, ) def describe(self): return "Remove index %s from %s" % (self.name, self.model_name) @property def migration_name_fragment(self): return "remove_%s_%s" % (self.model_name_lower, self.name.lower()) class RenameIndex(IndexOperation): """Rename an index.""" def __init__(self, model_name, new_name, old_name=None, old_fields=None): if not old_name and not old_fields: raise ValueError( "RenameIndex requires one of old_name and old_fields arguments to be " "set." ) if old_name and old_fields: raise ValueError( "RenameIndex.old_name and old_fields are mutually exclusive." ) self.model_name = model_name self.new_name = new_name self.old_name = old_name self.old_fields = old_fields @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { "model_name": self.model_name, "new_name": self.new_name, } if self.old_name: kwargs["old_name"] = self.old_name if self.old_fields: kwargs["old_fields"] = self.old_fields return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): if self.old_fields: state.add_index( app_label, self.model_name_lower, models.Index(fields=self.old_fields, name=self.new_name), ) state.remove_model_options( app_label, self.model_name_lower, AlterIndexTogether.option_name, self.old_fields, ) else: state.rename_index( app_label, self.model_name_lower, self.old_name, self.new_name ) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if not self.allow_migrate_model(schema_editor.connection.alias, model): return if self.old_fields: from_model = from_state.apps.get_model(app_label, self.model_name) columns = [ from_model._meta.get_field(field).column for field in self.old_fields ] matching_index_name = schema_editor._constraint_names( from_model, column_names=columns, index=True ) if len(matching_index_name) != 1: raise ValueError( "Found wrong number (%s) of indexes for %s(%s)." % ( len(matching_index_name), from_model._meta.db_table, ", ".join(columns), ) ) old_index = models.Index( fields=self.old_fields, name=matching_index_name[0], ) else: from_model_state = from_state.models[app_label, self.model_name_lower] old_index = from_model_state.get_index_by_name(self.old_name) # Don't alter when the index name is not changed. if old_index.name == self.new_name: return to_model_state = to_state.models[app_label, self.model_name_lower] new_index = to_model_state.get_index_by_name(self.new_name) schema_editor.rename_index(model, old_index, new_index) def database_backwards(self, app_label, schema_editor, from_state, to_state): if self.old_fields: # Backward operation with unnamed index is a no-op. return self.new_name_lower, self.old_name_lower = ( self.old_name_lower, self.new_name_lower, ) self.new_name, self.old_name = self.old_name, self.new_name self.database_forwards(app_label, schema_editor, from_state, to_state) self.new_name_lower, self.old_name_lower = ( self.old_name_lower, self.new_name_lower, ) self.new_name, self.old_name = self.old_name, self.new_name def describe(self): if self.old_name: return ( f"Rename index {self.old_name} on {self.model_name} to {self.new_name}" ) return ( f"Rename unnamed index for {self.old_fields} on {self.model_name} to " f"{self.new_name}" ) @property def migration_name_fragment(self): if self.old_name: return "rename_%s_%s" % (self.old_name_lower, self.new_name_lower) return "rename_%s_%s_%s" % ( self.model_name_lower, "_".join(self.old_fields), self.new_name_lower, ) def reduce(self, operation, app_label): if ( isinstance(operation, RenameIndex) and self.model_name_lower == operation.model_name_lower and operation.old_name and self.new_name_lower == operation.old_name_lower ): return [ RenameIndex( self.model_name, new_name=operation.new_name, old_name=self.old_name, old_fields=self.old_fields, ) ] return super().reduce(operation, app_label) class AddConstraint(IndexOperation): option_name = "constraints" def __init__(self, model_name, constraint): self.model_name = model_name self.constraint = constraint def state_forwards(self, app_label, state): state.add_constraint(app_label, self.model_name_lower, self.constraint) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.add_constraint(model, self.constraint) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.remove_constraint(model, self.constraint) def deconstruct(self): return ( self.__class__.__name__, [], { "model_name": self.model_name, "constraint": self.constraint, }, ) def describe(self): return "Create constraint %s on model %s" % ( self.constraint.name, self.model_name, ) @property def migration_name_fragment(self): return "%s_%s" % (self.model_name_lower, self.constraint.name.lower()) class RemoveConstraint(IndexOperation): option_name = "constraints" def __init__(self, model_name, name): self.model_name = model_name self.name = name def state_forwards(self, app_label, state): state.remove_constraint(app_label, self.model_name_lower, self.name) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): from_model_state = from_state.models[app_label, self.model_name_lower] constraint = from_model_state.get_constraint_by_name(self.name) schema_editor.remove_constraint(model, constraint) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): to_model_state = to_state.models[app_label, self.model_name_lower] constraint = to_model_state.get_constraint_by_name(self.name) schema_editor.add_constraint(model, constraint) def deconstruct(self): return ( self.__class__.__name__, [], { "model_name": self.model_name, "name": self.name, }, ) def describe(self): return "Remove constraint %s from model %s" % (self.name, self.model_name) @property def migration_name_fragment(self): return "remove_%s_%s" % (self.model_name_lower, self.name.lower())
e57adc31298decde6ea928ef17a7f5456e8fe9663a8fccea74935ed7b5f837b5
import warnings from django.db.models.lookups import ( Exact, GreaterThan, GreaterThanOrEqual, In, IsNull, LessThan, LessThanOrEqual, ) from django.utils.deprecation import RemovedInDjango50Warning class MultiColSource: contains_aggregate = False contains_over_clause = False def __init__(self, alias, targets, sources, field): self.targets, self.sources, self.field, self.alias = ( targets, sources, field, alias, ) self.output_field = self.field def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.alias, self.field) def relabeled_clone(self, relabels): return self.__class__( relabels.get(self.alias, self.alias), self.targets, self.sources, self.field ) def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def resolve_expression(self, *args, **kwargs): return self def get_normalized_value(value, lhs): from django.db.models import Model if isinstance(value, Model): if value.pk is None: # When the deprecation ends, replace with: # raise ValueError( # "Model instances passed to related filters must be saved." # ) warnings.warn( "Passing unsaved model instances to related filters is deprecated.", RemovedInDjango50Warning, ) value_list = [] sources = lhs.output_field.path_infos[-1].target_fields for source in sources: while not isinstance(value, source.model) and source.remote_field: source = source.remote_field.model._meta.get_field( source.remote_field.field_name ) try: value_list.append(getattr(value, source.attname)) except AttributeError: # A case like Restaurant.objects.filter(place=restaurant_instance), # where place is a OneToOneField and the primary key of Restaurant. return (value.pk,) return tuple(value_list) if not isinstance(value, tuple): return (value,) return value class RelatedIn(In): def get_prep_lookup(self): if not isinstance(self.lhs, MultiColSource): if self.rhs_is_direct_value(): # If we get here, we are dealing with single-column relations. self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs] # We need to run the related field's get_prep_value(). Consider # case ForeignKey to IntegerField given value 'abc'. The # ForeignKey itself doesn't have validation for non-integers, # so we must run validation using the target field. if hasattr(self.lhs.output_field, "path_infos"): # Run the target field's get_prep_value. We can safely # assume there is only one as we don't get to the direct # value branch otherwise. target_field = self.lhs.output_field.path_infos[-1].target_fields[ -1 ] self.rhs = [target_field.get_prep_value(v) for v in self.rhs] elif not getattr(self.rhs, "has_select_fields", True) and not getattr( self.lhs.field.target_field, "primary_key", False ): if ( getattr(self.lhs.output_field, "primary_key", False) and self.lhs.output_field.model == self.rhs.model ): # A case like # Restaurant.objects.filter(place__in=restaurant_qs), where # place is a OneToOneField and the primary key of # Restaurant. target_field = self.lhs.field.name else: target_field = self.lhs.field.target_field.name self.rhs.set_values([target_field]) return super().get_prep_lookup() def as_sql(self, compiler, connection): if isinstance(self.lhs, MultiColSource): # For multicolumn lookups we need to build a multicolumn where clause. # This clause is either a SubqueryConstraint (for values that need # to be compiled to SQL) or an OR-combined list of # (col1 = val1 AND col2 = val2 AND ...) clauses. from django.db.models.sql.where import ( AND, OR, SubqueryConstraint, WhereNode, ) root_constraint = WhereNode(connector=OR) if self.rhs_is_direct_value(): values = [get_normalized_value(value, self.lhs) for value in self.rhs] for value in values: value_constraint = WhereNode() for source, target, val in zip( self.lhs.sources, self.lhs.targets, value ): lookup_class = target.get_lookup("exact") lookup = lookup_class( target.get_col(self.lhs.alias, source), val ) value_constraint.add(lookup, AND) root_constraint.add(value_constraint, OR) else: root_constraint.add( SubqueryConstraint( self.lhs.alias, [target.column for target in self.lhs.targets], [source.name for source in self.lhs.sources], self.rhs, ), AND, ) return root_constraint.as_sql(compiler, connection) return super().as_sql(compiler, connection) class RelatedLookupMixin: def get_prep_lookup(self): if not isinstance(self.lhs, MultiColSource) and not hasattr( self.rhs, "resolve_expression" ): # If we get here, we are dealing with single-column relations. self.rhs = get_normalized_value(self.rhs, self.lhs)[0] # We need to run the related field's get_prep_value(). Consider case # ForeignKey to IntegerField given value 'abc'. The ForeignKey itself # doesn't have validation for non-integers, so we must run validation # using the target field. if self.prepare_rhs and hasattr(self.lhs.output_field, "path_infos"): # Get the target field. We can safely assume there is only one # as we don't get to the direct value branch otherwise. target_field = self.lhs.output_field.path_infos[-1].target_fields[-1] self.rhs = target_field.get_prep_value(self.rhs) return super().get_prep_lookup() def as_sql(self, compiler, connection): if isinstance(self.lhs, MultiColSource): assert self.rhs_is_direct_value() self.rhs = get_normalized_value(self.rhs, self.lhs) from django.db.models.sql.where import AND, WhereNode root_constraint = WhereNode() for target, source, val in zip( self.lhs.targets, self.lhs.sources, self.rhs ): lookup_class = target.get_lookup(self.lookup_name) root_constraint.add( lookup_class(target.get_col(self.lhs.alias, source), val), AND ) return root_constraint.as_sql(compiler, connection) return super().as_sql(compiler, connection) class RelatedExact(RelatedLookupMixin, Exact): pass class RelatedLessThan(RelatedLookupMixin, LessThan): pass class RelatedGreaterThan(RelatedLookupMixin, GreaterThan): pass class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual): pass class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual): pass class RelatedIsNull(RelatedLookupMixin, IsNull): pass
b3305eaa3a18c5c42ef38899d040c27c153f3bf4b847246a51cf297522c662bd
import collections.abc import copy import datetime import decimal import operator import uuid import warnings from base64 import b64decode, b64encode from functools import partialmethod, total_ordering from django import forms from django.apps import apps from django.conf import settings from django.core import checks, exceptions, validators from django.db import connection, connections, router from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin from django.utils import timezone from django.utils.datastructures import DictWrapper from django.utils.dateparse import ( parse_date, parse_datetime, parse_duration, parse_time, ) from django.utils.duration import duration_microseconds, duration_string from django.utils.functional import Promise, cached_property from django.utils.ipv6 import clean_ipv6_address from django.utils.itercompat import is_iterable from django.utils.text import capfirst from django.utils.translation import gettext_lazy as _ __all__ = [ "AutoField", "BLANK_CHOICE_DASH", "BigAutoField", "BigIntegerField", "BinaryField", "BooleanField", "CharField", "CommaSeparatedIntegerField", "DateField", "DateTimeField", "DecimalField", "DurationField", "EmailField", "Empty", "Field", "FilePathField", "FloatField", "GenericIPAddressField", "IPAddressField", "IntegerField", "NOT_PROVIDED", "NullBooleanField", "PositiveBigIntegerField", "PositiveIntegerField", "PositiveSmallIntegerField", "SlugField", "SmallAutoField", "SmallIntegerField", "TextField", "TimeField", "URLField", "UUIDField", ] class Empty: pass class NOT_PROVIDED: pass # The values to use for "blank" in SelectFields. Will be appended to the start # of most "choices" lists. BLANK_CHOICE_DASH = [("", "---------")] def _load_field(app_label, model_name, field_name): return apps.get_model(app_label, model_name)._meta.get_field(field_name) # A guide to Field parameters: # # * name: The name of the field specified in the model. # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * db_column: The db_column specified in the model (or None). # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. # # Code that introspects values, or does other dynamic things, should use # attname. For example, this gets the primary key value of object "obj": # # getattr(obj, opts.pk.attname) def _empty(of_cls): new = Empty() new.__class__ = of_cls return new def return_None(): return None @total_ordering class Field(RegisterLookupMixin): """Base class for all field types""" # Designates whether empty strings fundamentally are allowed at the # database level. empty_strings_allowed = True empty_values = list(validators.EMPTY_VALUES) # These track each time a Field instance is created. Used to retain order. # The auto_creation_counter is used for fields that Django implicitly # creates, creation_counter is used for all user-specified fields. creation_counter = 0 auto_creation_counter = -1 default_validators = [] # Default set of validators default_error_messages = { "invalid_choice": _("Value %(value)r is not a valid choice."), "null": _("This field cannot be null."), "blank": _("This field cannot be blank."), "unique": _("%(model_name)s with this %(field_label)s already exists."), "unique_for_date": _( # Translators: The 'lookup_type' is one of 'date', 'year' or # 'month'. Eg: "Title must be unique for pub_date year" "%(field_label)s must be unique for " "%(date_field_label)s %(lookup_type)s." ), } system_check_deprecated_details = None system_check_removed_details = None # Attributes that don't affect a column definition. # These attributes are ignored when altering the field. non_db_attrs = ( "blank", "choices", "db_column", "editable", "error_messages", "help_text", "limit_choices_to", # Database-level options are not supported, see #21961. "on_delete", "related_name", "related_query_name", "validators", "verbose_name", ) # Field flags hidden = False many_to_many = None many_to_one = None one_to_many = None one_to_one = None related_model = None descriptor_class = DeferredAttribute # Generic field type description, usually overridden by subclasses def _description(self): return _("Field of type: %(field_type)s") % { "field_type": self.__class__.__name__ } description = property(_description) def __init__( self, verbose_name=None, name=None, primary_key=False, max_length=None, unique=False, blank=False, null=False, db_index=False, rel=None, default=NOT_PROVIDED, editable=True, serialize=True, unique_for_date=None, unique_for_month=None, unique_for_year=None, choices=None, help_text="", db_column=None, db_tablespace=None, auto_created=False, validators=(), error_messages=None, ): self.name = name self.verbose_name = verbose_name # May be set by set_attributes_from_name self._verbose_name = verbose_name # Store original for deconstruction self.primary_key = primary_key self.max_length, self._unique = max_length, unique self.blank, self.null = blank, null self.remote_field = rel self.is_relation = self.remote_field is not None self.default = default self.editable = editable self.serialize = serialize self.unique_for_date = unique_for_date self.unique_for_month = unique_for_month self.unique_for_year = unique_for_year if isinstance(choices, collections.abc.Iterator): choices = list(choices) self.choices = choices self.help_text = help_text self.db_index = db_index self.db_column = db_column self._db_tablespace = db_tablespace self.auto_created = auto_created # Adjust the appropriate creation counter, and save our local copy. if auto_created: self.creation_counter = Field.auto_creation_counter Field.auto_creation_counter -= 1 else: self.creation_counter = Field.creation_counter Field.creation_counter += 1 self._validators = list(validators) # Store for deconstruction later self._error_messages = error_messages # Store for deconstruction later def __str__(self): """ Return "app_label.model_label.field_name" for fields attached to models. """ if not hasattr(self, "model"): return super().__str__() model = self.model return "%s.%s" % (model._meta.label, self.name) def __repr__(self): """Display the module, class, and name of the field.""" path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) name = getattr(self, "name", None) if name is not None: return "<%s: %s>" % (path, name) return "<%s>" % path def check(self, **kwargs): return [ *self._check_field_name(), *self._check_choices(), *self._check_db_index(), *self._check_null_allowed_for_primary_keys(), *self._check_backend_specific_checks(**kwargs), *self._check_validators(), *self._check_deprecation_details(), ] def _check_field_name(self): """ Check if field name is valid, i.e. 1) does not end with an underscore, 2) does not contain "__" and 3) is not "pk". """ if self.name.endswith("_"): return [ checks.Error( "Field names must not end with an underscore.", obj=self, id="fields.E001", ) ] elif LOOKUP_SEP in self.name: return [ checks.Error( 'Field names must not contain "%s".' % LOOKUP_SEP, obj=self, id="fields.E002", ) ] elif self.name == "pk": return [ checks.Error( "'pk' is a reserved word that cannot be used as a field name.", obj=self, id="fields.E003", ) ] else: return [] @classmethod def _choices_is_value(cls, value): return isinstance(value, (str, Promise)) or not is_iterable(value) def _check_choices(self): if not self.choices: return [] if not is_iterable(self.choices) or isinstance(self.choices, str): return [ checks.Error( "'choices' must be an iterable (e.g., a list or tuple).", obj=self, id="fields.E004", ) ] choice_max_length = 0 # Expect [group_name, [value, display]] for choices_group in self.choices: try: group_name, group_choices = choices_group except (TypeError, ValueError): # Containing non-pairs break try: if not all( self._choices_is_value(value) and self._choices_is_value(human_name) for value, human_name in group_choices ): break if self.max_length is not None and group_choices: choice_max_length = max( [ choice_max_length, *( len(value) for value, _ in group_choices if isinstance(value, str) ), ] ) except (TypeError, ValueError): # No groups, choices in the form [value, display] value, human_name = group_name, group_choices if not self._choices_is_value(value) or not self._choices_is_value( human_name ): break if self.max_length is not None and isinstance(value, str): choice_max_length = max(choice_max_length, len(value)) # Special case: choices=['ab'] if isinstance(choices_group, str): break else: if self.max_length is not None and choice_max_length > self.max_length: return [ checks.Error( "'max_length' is too small to fit the longest value " "in 'choices' (%d characters)." % choice_max_length, obj=self, id="fields.E009", ), ] return [] return [ checks.Error( "'choices' must be an iterable containing " "(actual value, human readable name) tuples.", obj=self, id="fields.E005", ) ] def _check_db_index(self): if self.db_index not in (None, True, False): return [ checks.Error( "'db_index' must be None, True or False.", obj=self, id="fields.E006", ) ] else: return [] def _check_null_allowed_for_primary_keys(self): if ( self.primary_key and self.null and not connection.features.interprets_empty_strings_as_nulls ): # We cannot reliably check this for backends like Oracle which # consider NULL and '' to be equal (and thus set up # character-based fields a little differently). return [ checks.Error( "Primary keys must not have null=True.", hint=( "Set null=False on the field, or " "remove primary_key=True argument." ), obj=self, id="fields.E007", ) ] else: return [] def _check_backend_specific_checks(self, databases=None, **kwargs): if databases is None: return [] app_label = self.model._meta.app_label errors = [] for alias in databases: if router.allow_migrate( alias, app_label, model_name=self.model._meta.model_name ): errors.extend(connections[alias].validation.check_field(self, **kwargs)) return errors def _check_validators(self): errors = [] for i, validator in enumerate(self.validators): if not callable(validator): errors.append( checks.Error( "All 'validators' must be callable.", hint=( "validators[{i}] ({repr}) isn't a function or " "instance of a validator class.".format( i=i, repr=repr(validator), ) ), obj=self, id="fields.E008", ) ) return errors def _check_deprecation_details(self): if self.system_check_removed_details is not None: return [ checks.Error( self.system_check_removed_details.get( "msg", "%s has been removed except for support in historical " "migrations." % self.__class__.__name__, ), hint=self.system_check_removed_details.get("hint"), obj=self, id=self.system_check_removed_details.get("id", "fields.EXXX"), ) ] elif self.system_check_deprecated_details is not None: return [ checks.Warning( self.system_check_deprecated_details.get( "msg", "%s has been deprecated." % self.__class__.__name__ ), hint=self.system_check_deprecated_details.get("hint"), obj=self, id=self.system_check_deprecated_details.get("id", "fields.WXXX"), ) ] return [] def get_col(self, alias, output_field=None): if alias == self.model._meta.db_table and ( output_field is None or output_field == self ): return self.cached_col from django.db.models.expressions import Col return Col(alias, self, output_field) @cached_property def cached_col(self): from django.db.models.expressions import Col return Col(self.model._meta.db_table, self) def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, GIS columns need to be selected as AsText(table.col) on MySQL as the table.col data can't be used by Django. """ return sql, params def deconstruct(self): """ Return enough information to recreate the field as a 4-tuple: * The name of the field on the model, if contribute_to_class() has been run. * The import path of the field, including the class, e.g. django.db.models.IntegerField. This should be the most portable version, so less specific may be better. * A list of positional arguments. * A dict of keyword arguments. Note that the positional or keyword arguments must contain values of the following types (including inner values of collection types): * None, bool, str, int, float, complex, set, frozenset, list, tuple, dict * UUID * datetime.datetime (naive), datetime.date * top-level classes, top-level functions - will be referenced by their full import path * Storage instances - these have their own deconstruct() method This is because the values here must be serialized into a text format (possibly new Python code, possibly JSON) and these are the only types with encoding handlers defined. There's no need to return the exact way the field was instantiated this time, just ensure that the resulting field is the same - prefer keyword arguments over positional ones, and omit parameters with their default values. """ # Short-form way of fetching all the default parameters keywords = {} possibles = { "verbose_name": None, "primary_key": False, "max_length": None, "unique": False, "blank": False, "null": False, "db_index": False, "default": NOT_PROVIDED, "editable": True, "serialize": True, "unique_for_date": None, "unique_for_month": None, "unique_for_year": None, "choices": None, "help_text": "", "db_column": None, "db_tablespace": None, "auto_created": False, "validators": [], "error_messages": None, } attr_overrides = { "unique": "_unique", "error_messages": "_error_messages", "validators": "_validators", "verbose_name": "_verbose_name", "db_tablespace": "_db_tablespace", } equals_comparison = {"choices", "validators"} for name, default in possibles.items(): value = getattr(self, attr_overrides.get(name, name)) # Unroll anything iterable for choices into a concrete list if name == "choices" and isinstance(value, collections.abc.Iterable): value = list(value) # Do correct kind of comparison if name in equals_comparison: if value != default: keywords[name] = value else: if value is not default: keywords[name] = value # Work out path - we shorten it for known Django core fields path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) if path.startswith("django.db.models.fields.related"): path = path.replace("django.db.models.fields.related", "django.db.models") elif path.startswith("django.db.models.fields.files"): path = path.replace("django.db.models.fields.files", "django.db.models") elif path.startswith("django.db.models.fields.json"): path = path.replace("django.db.models.fields.json", "django.db.models") elif path.startswith("django.db.models.fields.proxy"): path = path.replace("django.db.models.fields.proxy", "django.db.models") elif path.startswith("django.db.models.fields"): path = path.replace("django.db.models.fields", "django.db.models") # Return basic info - other fields should override this. return (self.name, path, [], keywords) def clone(self): """ Uses deconstruct() to clone a new copy of this Field. Will not preserve any class attachments/attribute names. """ name, path, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) def __eq__(self, other): # Needed for @total_ordering if isinstance(other, Field): return self.creation_counter == other.creation_counter and getattr( self, "model", None ) == getattr(other, "model", None) return NotImplemented def __lt__(self, other): # This is needed because bisect does not take a comparison function. # Order by creation_counter first for backward compatibility. if isinstance(other, Field): if ( self.creation_counter != other.creation_counter or not hasattr(self, "model") and not hasattr(other, "model") ): return self.creation_counter < other.creation_counter elif hasattr(self, "model") != hasattr(other, "model"): return not hasattr(self, "model") # Order no-model fields first else: # creation_counter's are equal, compare only models. return (self.model._meta.app_label, self.model._meta.model_name) < ( other.model._meta.app_label, other.model._meta.model_name, ) return NotImplemented def __hash__(self): return hash(self.creation_counter) def __deepcopy__(self, memodict): # We don't have to deepcopy very much here, since most things are not # intended to be altered after initial creation. obj = copy.copy(self) if self.remote_field: obj.remote_field = copy.copy(self.remote_field) if hasattr(self.remote_field, "field") and self.remote_field.field is self: obj.remote_field.field = obj memodict[id(self)] = obj return obj def __copy__(self): # We need to avoid hitting __reduce__, so define this # slightly weird copy construct. obj = Empty() obj.__class__ = self.__class__ obj.__dict__ = self.__dict__.copy() return obj def __reduce__(self): """ Pickling should return the model._meta.fields instance of the field, not a new copy of that field. So, use the app registry to load the model and then the field back. """ if not hasattr(self, "model"): # Fields are sometimes used without attaching them to models (for # example in aggregation). In this case give back a plain field # instance. The code below will create a new empty instance of # class self.__class__, then update its dict with self.__dict__ # values - so, this is very close to normal pickle. state = self.__dict__.copy() # The _get_default cached_property can't be pickled due to lambda # usage. state.pop("_get_default", None) return _empty, (self.__class__,), state return _load_field, ( self.model._meta.app_label, self.model._meta.object_name, self.name, ) def get_pk_value_on_save(self, instance): """ Hook to generate new PK values on save. This method is called when saving instances with no primary key value set. If this method returns something else than None, then the returned value is used when saving the new instance. """ if self.default: return self.get_default() return None def to_python(self, value): """ Convert the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Return the converted value. Subclasses should override this. """ return value @cached_property def error_messages(self): messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, "default_error_messages", {})) messages.update(self._error_messages or {}) return messages @cached_property def validators(self): """ Some validators can't be created at field initialization time. This method provides a way to delay their creation until required. """ return [*self.default_validators, *self._validators] def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except exceptions.ValidationError as e: if hasattr(e, "code") and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise exceptions.ValidationError(errors) def validate(self, value, model_instance): """ Validate value and raise ValidationError if necessary. Subclasses should override this to provide validation logic. """ if not self.editable: # Skip validation for non-editable fields. return if self.choices is not None and value not in self.empty_values: for option_key, option_value in self.choices: if isinstance(option_value, (list, tuple)): # This is an optgroup, so look inside the group for # options. for optgroup_key, optgroup_value in option_value: if value == optgroup_key: return elif value == option_key: return raise exceptions.ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": value}, ) if value is None and not self.null: raise exceptions.ValidationError(self.error_messages["null"], code="null") if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages["blank"], code="blank") def clean(self, value, model_instance): """ Convert the value's type and run validation. Validation errors from to_python() and validate() are propagated. Return the correct value if no error is raised. """ value = self.to_python(value) self.validate(value, model_instance) self.run_validators(value) return value def db_type_parameters(self, connection): return DictWrapper(self.__dict__, connection.ops.quote_name, "qn_") def db_check(self, connection): """ Return the database column check constraint for this field, for the provided connection. Works the same way as db_type() for the case that get_internal_type() does not map to a preexisting model field. """ data = self.db_type_parameters(connection) try: return ( connection.data_type_check_constraints[self.get_internal_type()] % data ) except KeyError: return None def db_type(self, connection): """ Return the database column data type for this field, for the provided connection. """ # The default implementation of this method looks at the # backend-specific data_types dictionary, looking up the field by its # "internal type". # # A Field class can implement the get_internal_type() method to specify # which *preexisting* Django Field class it's most similar to -- i.e., # a custom field might be represented by a TEXT column type, which is # the same as the TextField Django field type, which means the custom # field's get_internal_type() returns 'TextField'. # # But the limitation of the get_internal_type() / data_types approach # is that it cannot handle database column types that aren't already # mapped to one of the built-in Django field types. In this case, you # can implement db_type() instead of get_internal_type() to specify # exactly which wacky database column type you want to use. data = self.db_type_parameters(connection) try: return connection.data_types[self.get_internal_type()] % data except KeyError: return None def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. For example, this method is called by ForeignKey and OneToOneField to determine its data type. """ return self.db_type(connection) def cast_db_type(self, connection): """Return the data type to use in the Cast() function.""" db_type = connection.ops.cast_data_types.get(self.get_internal_type()) if db_type: return db_type % self.db_type_parameters(connection) return self.db_type(connection) def db_parameters(self, connection): """ Extension of db_type(), providing a range of different return values (type, checks). This will look at db_type(), allowing custom model fields to override it. """ type_string = self.db_type(connection) check_string = self.db_check(connection) return { "type": type_string, "check": check_string, } def db_type_suffix(self, connection): return connection.data_types_suffix.get(self.get_internal_type()) def get_db_converters(self, connection): if hasattr(self, "from_db_value"): return [self.from_db_value] return [] @property def unique(self): return self._unique or self.primary_key @property def db_tablespace(self): return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE @property def db_returning(self): """ Private API intended only to be used by Django itself. Currently only the PostgreSQL backend supports returning multiple fields on a model. """ return False def set_attributes_from_name(self, name): self.name = self.name or name self.attname, self.column = self.get_attname_column() self.concrete = self.column is not None if self.verbose_name is None and self.name: self.verbose_name = self.name.replace("_", " ") def contribute_to_class(self, cls, name, private_only=False): """ Register the field with the model class it belongs to. If private_only is True, create a separate instance of this field for every subclass of cls, even if cls is not an abstract model. """ self.set_attributes_from_name(name) self.model = cls cls._meta.add_field(self, private=private_only) if self.column: setattr(cls, self.attname, self.descriptor_class(self)) if self.choices is not None: # Don't override a get_FOO_display() method defined explicitly on # this class, but don't check methods derived from inheritance, to # allow overriding inherited choices. For more complex inheritance # structures users should override contribute_to_class(). if "get_%s_display" % self.name not in cls.__dict__: setattr( cls, "get_%s_display" % self.name, partialmethod(cls._get_FIELD_display, field=self), ) def get_filter_kwargs_for_object(self, obj): """ Return a dict that when passed as kwargs to self.model.filter(), would yield all instances having the same value for this field as obj has. """ return {self.name: getattr(obj, self.attname)} def get_attname(self): return self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_internal_type(self): return self.__class__.__name__ def pre_save(self, model_instance, add): """Return field's value just before saving.""" return getattr(model_instance, self.attname) def get_prep_value(self, value): """Perform preliminary non-db specific value checks and conversions.""" if isinstance(value, Promise): value = value._proxy____cast() return value def get_db_prep_value(self, value, connection, prepared=False): """ Return field's value prepared for interacting with the database backend. Used by the default implementations of get_db_prep_save(). """ if not prepared: value = self.get_prep_value(value) return value def get_db_prep_save(self, value, connection): """Return field's value prepared for saving into a database.""" return self.get_db_prep_value(value, connection=connection, prepared=False) def has_default(self): """Return a boolean of whether this field has a default value.""" return self.default is not NOT_PROVIDED def get_default(self): """Return the default value for this field.""" return self._get_default() @cached_property def _get_default(self): if self.has_default(): if callable(self.default): return self.default return lambda: self.default if ( not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls ): return return_None return str # return empty string def get_choices( self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=(), ): """ Return choices with a default blank choices included, for use as <select> choices for this field. """ if self.choices is not None: choices = list(self.choices) if include_blank: blank_defined = any( choice in ("", None) for choice, _ in self.flatchoices ) if not blank_defined: choices = blank_choice + choices return choices rel_model = self.remote_field.model limit_choices_to = limit_choices_to or self.get_limit_choices_to() choice_func = operator.attrgetter( self.remote_field.get_related_field().attname if hasattr(self.remote_field, "get_related_field") else "pk" ) qs = rel_model._default_manager.complex_filter(limit_choices_to) if ordering: qs = qs.order_by(*ordering) return (blank_choice if include_blank else []) + [ (choice_func(x), str(x)) for x in qs ] def value_to_string(self, obj): """ Return a string value of this field from the passed obj. This is used by the serialization framework. """ return str(self.value_from_object(obj)) def _get_flatchoices(self): """Flattened version of choices tuple.""" if self.choices is None: return [] flat = [] for choice, value in self.choices: if isinstance(value, (list, tuple)): flat.extend(value) else: flat.append((choice, value)) return flat flatchoices = property(_get_flatchoices) def save_form_data(self, instance, data): setattr(instance, self.name, data) def formfield(self, form_class=None, choices_form_class=None, **kwargs): """Return a django.forms.Field instance for this field.""" defaults = { "required": not self.blank, "label": capfirst(self.verbose_name), "help_text": self.help_text, } if self.has_default(): if callable(self.default): defaults["initial"] = self.default defaults["show_hidden_initial"] = True else: defaults["initial"] = self.get_default() if self.choices is not None: # Fields with choices get special treatment. include_blank = self.blank or not ( self.has_default() or "initial" in kwargs ) defaults["choices"] = self.get_choices(include_blank=include_blank) defaults["coerce"] = self.to_python if self.null: defaults["empty_value"] = None if choices_form_class is not None: form_class = choices_form_class else: form_class = forms.TypedChoiceField # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in list(kwargs): if k not in ( "coerce", "empty_value", "choices", "required", "widget", "label", "initial", "help_text", "error_messages", "show_hidden_initial", "disabled", ): del kwargs[k] defaults.update(kwargs) if form_class is None: form_class = forms.CharField return form_class(**defaults) def value_from_object(self, obj): """Return the value of this field in the given model instance.""" return getattr(obj, self.attname) class BooleanField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be either True or False."), "invalid_nullable": _("“%(value)s” value must be either True, False, or None."), } description = _("Boolean (Either True or False)") def get_internal_type(self): return "BooleanField" def to_python(self, value): if self.null and value in self.empty_values: return None if value in (True, False): # 1/0 are equal to True/False. bool() converts former to latter. return bool(value) if value in ("t", "True", "1"): return True if value in ("f", "False", "0"): return False raise exceptions.ValidationError( self.error_messages["invalid_nullable" if self.null else "invalid"], code="invalid", params={"value": value}, ) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return self.to_python(value) def formfield(self, **kwargs): if self.choices is not None: include_blank = not (self.has_default() or "initial" in kwargs) defaults = {"choices": self.get_choices(include_blank=include_blank)} else: form_class = forms.NullBooleanField if self.null else forms.BooleanField # In HTML checkboxes, 'required' means "must be checked" which is # different from the choices case ("must select some value"). # required=False allows unchecked checkboxes. defaults = {"form_class": form_class, "required": False} return super().formfield(**{**defaults, **kwargs}) def select_format(self, compiler, sql, params): sql, params = super().select_format(compiler, sql, params) # Filters that match everything are handled as empty strings in the # WHERE clause, but in SELECT or GROUP BY list they must use a # predicate that's always True. if sql == "": sql = "1" return sql, params class CharField(Field): description = _("String (up to %(max_length)s)") def __init__(self, *args, db_collation=None, **kwargs): super().__init__(*args, **kwargs) self.db_collation = db_collation if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): databases = kwargs.get("databases") or [] return [ *super().check(**kwargs), *self._check_db_collation(databases), *self._check_max_length_attribute(**kwargs), ] def _check_max_length_attribute(self, **kwargs): if self.max_length is None: return [ checks.Error( "CharFields must define a 'max_length' attribute.", obj=self, id="fields.E120", ) ] elif ( not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or self.max_length <= 0 ): return [ checks.Error( "'max_length' must be a positive integer.", obj=self, id="fields.E121", ) ] else: return [] def _check_db_collation(self, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if not ( self.db_collation is None or "supports_collation_on_charfield" in self.model._meta.required_db_features or connection.features.supports_collation_on_charfield ): errors.append( checks.Error( "%s does not support a database collation on " "CharFields." % connection.display_name, obj=self, id="fields.E190", ), ) return errors def cast_db_type(self, connection): if self.max_length is None: return connection.ops.cast_char_field_without_max_length return super().cast_db_type(connection) def db_parameters(self, connection): db_params = super().db_parameters(connection) db_params["collation"] = self.db_collation return db_params def get_internal_type(self): return "CharField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). defaults = {"max_length": self.max_length} # TODO: Handle multiple backends with different feature flags. if self.null and not connection.features.interprets_empty_strings_as_nulls: defaults["empty_value"] = None defaults.update(kwargs) return super().formfield(**defaults) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.db_collation: kwargs["db_collation"] = self.db_collation return name, path, args, kwargs class CommaSeparatedIntegerField(CharField): default_validators = [validators.validate_comma_separated_integer_list] description = _("Comma-separated integers") system_check_removed_details = { "msg": ( "CommaSeparatedIntegerField is removed except for support in " "historical migrations." ), "hint": ( "Use CharField(validators=[validate_comma_separated_integer_list]) " "instead." ), "id": "fields.E901", } def _to_naive(value): if timezone.is_aware(value): value = timezone.make_naive(value, datetime.timezone.utc) return value def _get_naive_now(): return _to_naive(timezone.now()) class DateTimeCheckMixin: def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_mutually_exclusive_options(), *self._check_fix_default_value(), ] def _check_mutually_exclusive_options(self): # auto_now, auto_now_add, and default are mutually exclusive # options. The use of more than one of these options together # will trigger an Error mutually_exclusive_options = [ self.auto_now_add, self.auto_now, self.has_default(), ] enabled_options = [ option not in (None, False) for option in mutually_exclusive_options ].count(True) if enabled_options > 1: return [ checks.Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", obj=self, id="fields.E160", ) ] else: return [] def _check_fix_default_value(self): return [] # Concrete subclasses use this in their implementations of # _check_fix_default_value(). def _check_if_value_fixed(self, value, now=None): """ Check if the given value appears to have been provided as a "fixed" time value, and include a warning in the returned list if it does. The value argument must be a date object or aware/naive datetime object. If now is provided, it must be a naive datetime object. """ if now is None: now = _get_naive_now() offset = datetime.timedelta(seconds=10) lower = now - offset upper = now + offset if isinstance(value, datetime.datetime): value = _to_naive(value) else: assert isinstance(value, datetime.date) lower = lower.date() upper = upper.date() if lower <= value <= upper: return [ checks.Warning( "Fixed default value provided.", hint=( "It seems you set a fixed date / time / datetime " "value as default for this field. This may not be " "what you want. If you want to have the current date " "as default, use `django.utils.timezone.now`" ), obj=self, id="fields.W161", ) ] return [] class DateField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid date format. It must be " "in YYYY-MM-DD format." ), "invalid_date": _( "“%(value)s” value has the correct format (YYYY-MM-DD) " "but it is an invalid date." ), } description = _("Date (without time)") def __init__( self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs ): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs["editable"] = False kwargs["blank"] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, datetime.datetime): value = _to_naive(value).date() elif isinstance(value, datetime.date): pass else: # No explicit date / datetime value -- no checks necessary return [] # At this point, value is a date object. return self._check_if_value_fixed(value) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now: kwargs["auto_now"] = True if self.auto_now_add: kwargs["auto_now_add"] = True if self.auto_now or self.auto_now_add: del kwargs["editable"] del kwargs["blank"] return name, path, args, kwargs def get_internal_type(self): return "DateField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): if settings.USE_TZ and timezone.is_aware(value): # Convert aware datetimes to the default time zone # before casting them to dates (#17742). default_timezone = timezone.get_default_timezone() value = timezone.make_naive(value, default_timezone) return value.date() if isinstance(value, datetime.date): return value try: parsed = parse_date(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_date"], code="invalid_date", params={"value": value}, ) raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.date.today() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) if not self.null: setattr( cls, "get_next_by_%s" % self.name, partialmethod( cls._get_next_or_previous_by_FIELD, field=self, is_next=True ), ) setattr( cls, "get_previous_by_%s" % self.name, partialmethod( cls._get_next_or_previous_by_FIELD, field=self, is_next=False ), ) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts dates into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.DateField, **kwargs, } ) class DateTimeField(DateField): empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid format. It must be in " "YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format." ), "invalid_date": _( "“%(value)s” value has the correct format " "(YYYY-MM-DD) but it is an invalid date." ), "invalid_datetime": _( "“%(value)s” value has the correct format " "(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) " "but it is an invalid date/time." ), } description = _("Date (with time)") # __init__ is inherited from DateField def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, (datetime.datetime, datetime.date)): return self._check_if_value_fixed(value) # No explicit date / datetime value -- no checks necessary. return [] def get_internal_type(self): return "DateTimeField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): return value if isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) if settings.USE_TZ: # For backwards compatibility, interpret naive datetimes in # local time. This won't work during DST change, but we can't # do much about it, so we let the exceptions percolate up the # call stack. warnings.warn( "DateTimeField %s.%s received a naive datetime " "(%s) while time zone support is active." % (self.model.__name__, self.name, value), RuntimeWarning, ) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value try: parsed = parse_datetime(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_datetime"], code="invalid_datetime", params={"value": value}, ) try: parsed = parse_date(value) if parsed is not None: return datetime.datetime(parsed.year, parsed.month, parsed.day) except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_date"], code="invalid_date", params={"value": value}, ) raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = timezone.now() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) # contribute_to_class is inherited from DateField, it registers # get_next_by_FOO and get_prev_by_FOO def get_prep_value(self, value): value = super().get_prep_value(value) value = self.to_python(value) if value is not None and settings.USE_TZ and timezone.is_naive(value): # For backwards compatibility, interpret naive datetimes in local # time. This won't work during DST change, but we can't do much # about it, so we let the exceptions percolate up the call stack. try: name = "%s.%s" % (self.model.__name__, self.name) except AttributeError: name = "(unbound)" warnings.warn( "DateTimeField %s received a naive datetime (%s)" " while time zone support is active." % (name, value), RuntimeWarning, ) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value def get_db_prep_value(self, value, connection, prepared=False): # Casts datetimes into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datetimefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.DateTimeField, **kwargs, } ) class DecimalField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be a decimal number."), } description = _("Decimal number") def __init__( self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs, ): self.max_digits, self.decimal_places = max_digits, decimal_places super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super().check(**kwargs) digits_errors = [ *self._check_decimal_places(), *self._check_max_digits(), ] if not digits_errors: errors.extend(self._check_decimal_places_and_max_digits(**kwargs)) else: errors.extend(digits_errors) return errors def _check_decimal_places(self): try: decimal_places = int(self.decimal_places) if decimal_places < 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'decimal_places' attribute.", obj=self, id="fields.E130", ) ] except ValueError: return [ checks.Error( "'decimal_places' must be a non-negative integer.", obj=self, id="fields.E131", ) ] else: return [] def _check_max_digits(self): try: max_digits = int(self.max_digits) if max_digits <= 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'max_digits' attribute.", obj=self, id="fields.E132", ) ] except ValueError: return [ checks.Error( "'max_digits' must be a positive integer.", obj=self, id="fields.E133", ) ] else: return [] def _check_decimal_places_and_max_digits(self, **kwargs): if int(self.decimal_places) > int(self.max_digits): return [ checks.Error( "'max_digits' must be greater or equal to 'decimal_places'.", obj=self, id="fields.E134", ) ] return [] @cached_property def validators(self): return super().validators + [ validators.DecimalValidator(self.max_digits, self.decimal_places) ] @cached_property def context(self): return decimal.Context(prec=self.max_digits) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.max_digits is not None: kwargs["max_digits"] = self.max_digits if self.decimal_places is not None: kwargs["decimal_places"] = self.decimal_places return name, path, args, kwargs def get_internal_type(self): return "DecimalField" def to_python(self, value): if value is None: return value try: if isinstance(value, float): decimal_value = self.context.create_decimal_from_float(value) else: decimal_value = decimal.Decimal(value) except (decimal.InvalidOperation, TypeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) if not decimal_value.is_finite(): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) return decimal_value def get_db_prep_save(self, value, connection): return connection.ops.adapt_decimalfield_value( self.to_python(value), self.max_digits, self.decimal_places ) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): return super().formfield( **{ "max_digits": self.max_digits, "decimal_places": self.decimal_places, "form_class": forms.DecimalField, **kwargs, } ) class DurationField(Field): """ Store timedelta objects. Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint of microseconds on other databases. """ empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid format. It must be in " "[DD] [[HH:]MM:]ss[.uuuuuu] format." ) } description = _("Duration") def get_internal_type(self): return "DurationField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.timedelta): return value try: parsed = parse_duration(value) except ValueError: pass else: if parsed is not None: return parsed raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def get_db_prep_value(self, value, connection, prepared=False): if connection.features.has_native_duration_field: return value if value is None: return None return duration_microseconds(value) def get_db_converters(self, connection): converters = [] if not connection.features.has_native_duration_field: converters.append(connection.ops.convert_durationfield_value) return converters + super().get_db_converters(connection) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else duration_string(val) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.DurationField, **kwargs, } ) class EmailField(CharField): default_validators = [validators.validate_email] description = _("Email address") def __init__(self, *args, **kwargs): # max_length=254 to be compliant with RFCs 3696 and 5321 kwargs.setdefault("max_length", 254) super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() # We do not exclude max_length if it matches default as we want to change # the default in future. return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause email validation to be performed # twice. return super().formfield( **{ "form_class": forms.EmailField, **kwargs, } ) class FilePathField(Field): description = _("File path") def __init__( self, verbose_name=None, name=None, path="", match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs, ): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders kwargs.setdefault("max_length", 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_allowing_files_or_folders(**kwargs), ] def _check_allowing_files_or_folders(self, **kwargs): if not self.allow_files and not self.allow_folders: return [ checks.Error( "FilePathFields must have either 'allow_files' or 'allow_folders' " "set to True.", obj=self, id="fields.E140", ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.path != "": kwargs["path"] = self.path if self.match is not None: kwargs["match"] = self.match if self.recursive is not False: kwargs["recursive"] = self.recursive if self.allow_files is not True: kwargs["allow_files"] = self.allow_files if self.allow_folders is not False: kwargs["allow_folders"] = self.allow_folders if kwargs.get("max_length") == 100: del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def formfield(self, **kwargs): return super().formfield( **{ "path": self.path() if callable(self.path) else self.path, "match": self.match, "recursive": self.recursive, "form_class": forms.FilePathField, "allow_files": self.allow_files, "allow_folders": self.allow_folders, **kwargs, } ) def get_internal_type(self): return "FilePathField" class FloatField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be a float."), } description = _("Floating point number") def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return float(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "FloatField" def to_python(self, value): if value is None: return value try: return float(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.FloatField, **kwargs, } ) class IntegerField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be an integer."), } description = _("Integer") def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_warning(), ] def _check_max_length_warning(self): if self.max_length is not None: return [ checks.Warning( "'max_length' is ignored when used with %s." % self.__class__.__name__, hint="Remove 'max_length' from field", obj=self, id="fields.W122", ) ] return [] @cached_property def validators(self): # These validators can't be added at field initialization time since # they're based on values retrieved from `connection`. validators_ = super().validators internal_type = self.get_internal_type() min_value, max_value = connection.ops.integer_field_range(internal_type) if min_value is not None and not any( ( isinstance(validator, validators.MinValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) >= min_value ) for validator in validators_ ): validators_.append(validators.MinValueValidator(min_value)) if max_value is not None and not any( ( isinstance(validator, validators.MaxValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) <= max_value ) for validator in validators_ ): validators_.append(validators.MaxValueValidator(max_value)) return validators_ def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return int(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "IntegerField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.IntegerField, **kwargs, } ) class BigIntegerField(IntegerField): description = _("Big (8 byte) integer") MAX_BIGINT = 9223372036854775807 def get_internal_type(self): return "BigIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": -BigIntegerField.MAX_BIGINT - 1, "max_value": BigIntegerField.MAX_BIGINT, **kwargs, } ) class SmallIntegerField(IntegerField): description = _("Small integer") def get_internal_type(self): return "SmallIntegerField" class IPAddressField(Field): empty_strings_allowed = False description = _("IPv4 address") system_check_removed_details = { "msg": ( "IPAddressField has been removed except for support in " "historical migrations." ), "hint": "Use GenericIPAddressField instead.", "id": "fields.E900", } def __init__(self, *args, **kwargs): kwargs["max_length"] = 15 super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def get_internal_type(self): return "IPAddressField" class GenericIPAddressField(Field): empty_strings_allowed = False description = _("IP address") default_error_messages = {} def __init__( self, verbose_name=None, name=None, protocol="both", unpack_ipv4=False, *args, **kwargs, ): self.unpack_ipv4 = unpack_ipv4 self.protocol = protocol ( self.default_validators, invalid_error_message, ) = validators.ip_address_validators(protocol, unpack_ipv4) self.default_error_messages["invalid"] = invalid_error_message kwargs["max_length"] = 39 super().__init__(verbose_name, name, *args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_blank_and_null_values(**kwargs), ] def _check_blank_and_null_values(self, **kwargs): if not getattr(self, "null", False) and getattr(self, "blank", False): return [ checks.Error( "GenericIPAddressFields cannot have blank=True if null=False, " "as blank values are stored as nulls.", obj=self, id="fields.E150", ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.unpack_ipv4 is not False: kwargs["unpack_ipv4"] = self.unpack_ipv4 if self.protocol != "both": kwargs["protocol"] = self.protocol if kwargs.get("max_length") == 39: del kwargs["max_length"] return name, path, args, kwargs def get_internal_type(self): return "GenericIPAddressField" def to_python(self, value): if value is None: return None if not isinstance(value, str): value = str(value) value = value.strip() if ":" in value: return clean_ipv6_address( value, self.unpack_ipv4, self.error_messages["invalid"] ) return value def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_ipaddressfield_value(value) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None if value and ":" in value: try: return clean_ipv6_address(value, self.unpack_ipv4) except exceptions.ValidationError: pass return str(value) def formfield(self, **kwargs): return super().formfield( **{ "protocol": self.protocol, "form_class": forms.GenericIPAddressField, **kwargs, } ) class NullBooleanField(BooleanField): default_error_messages = { "invalid": _("“%(value)s” value must be either None, True or False."), "invalid_nullable": _("“%(value)s” value must be either None, True or False."), } description = _("Boolean (Either True, False or None)") system_check_removed_details = { "msg": ( "NullBooleanField is removed except for support in historical " "migrations." ), "hint": "Use BooleanField(null=True) instead.", "id": "fields.E903", } def __init__(self, *args, **kwargs): kwargs["null"] = True kwargs["blank"] = True super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["null"] del kwargs["blank"] return name, path, args, kwargs class PositiveIntegerRelDbTypeMixin: def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if not hasattr(cls, "integer_field_class"): cls.integer_field_class = next( ( parent for parent in cls.__mro__[1:] if issubclass(parent, IntegerField) ), None, ) def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. In most cases, a foreign key pointing to a positive integer primary key will have an integer column data type but some databases (e.g. MySQL) have an unsigned integer type. In that case (related_fields_match_type=True), the primary key should return its db_type. """ if connection.features.related_fields_match_type: return self.db_type(connection) else: return self.integer_field_class().db_type(connection=connection) class PositiveBigIntegerField(PositiveIntegerRelDbTypeMixin, BigIntegerField): description = _("Positive big integer") def get_internal_type(self): return "PositiveBigIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": 0, **kwargs, } ) class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive integer") def get_internal_type(self): return "PositiveIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": 0, **kwargs, } ) class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, SmallIntegerField): description = _("Positive small integer") def get_internal_type(self): return "PositiveSmallIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": 0, **kwargs, } ) class SlugField(CharField): default_validators = [validators.validate_slug] description = _("Slug (up to %(max_length)s)") def __init__( self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs ): self.allow_unicode = allow_unicode if self.allow_unicode: self.default_validators = [validators.validate_unicode_slug] super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 50: del kwargs["max_length"] if self.db_index is False: kwargs["db_index"] = False else: del kwargs["db_index"] if self.allow_unicode is not False: kwargs["allow_unicode"] = self.allow_unicode return name, path, args, kwargs def get_internal_type(self): return "SlugField" def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.SlugField, "allow_unicode": self.allow_unicode, **kwargs, } ) class TextField(Field): description = _("Text") def __init__(self, *args, db_collation=None, **kwargs): super().__init__(*args, **kwargs) self.db_collation = db_collation def check(self, **kwargs): databases = kwargs.get("databases") or [] return [ *super().check(**kwargs), *self._check_db_collation(databases), ] def _check_db_collation(self, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if not ( self.db_collation is None or "supports_collation_on_textfield" in self.model._meta.required_db_features or connection.features.supports_collation_on_textfield ): errors.append( checks.Error( "%s does not support a database collation on " "TextFields." % connection.display_name, obj=self, id="fields.E190", ), ) return errors def db_parameters(self, connection): db_params = super().db_parameters(connection) db_params["collation"] = self.db_collation return db_params def get_internal_type(self): return "TextField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). return super().formfield( **{ "max_length": self.max_length, **({} if self.choices is not None else {"widget": forms.Textarea}), **kwargs, } ) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.db_collation: kwargs["db_collation"] = self.db_collation return name, path, args, kwargs class TimeField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid format. It must be in " "HH:MM[:ss[.uuuuuu]] format." ), "invalid_time": _( "“%(value)s” value has the correct format " "(HH:MM[:ss[.uuuuuu]]) but it is an invalid time." ), } description = _("Time") def __init__( self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs ): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs["editable"] = False kwargs["blank"] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, datetime.datetime): now = None elif isinstance(value, datetime.time): now = _get_naive_now() # This will not use the right date in the race condition where now # is just before the date change and value is just past 0:00. value = datetime.datetime.combine(now.date(), value) else: # No explicit time / datetime value -- no checks necessary return [] # At this point, value is a datetime object. return self._check_if_value_fixed(value, now=now) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now is not False: kwargs["auto_now"] = self.auto_now if self.auto_now_add is not False: kwargs["auto_now_add"] = self.auto_now_add if self.auto_now or self.auto_now_add: del kwargs["blank"] del kwargs["editable"] return name, path, args, kwargs def get_internal_type(self): return "TimeField" def to_python(self, value): if value is None: return None if isinstance(value, datetime.time): return value if isinstance(value, datetime.datetime): # Not usually a good idea to pass in a datetime here (it loses # information), but this can be a side-effect of interacting with a # database backend (e.g. Oracle), so we'll be accommodating. return value.time() try: parsed = parse_time(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_time"], code="invalid_time", params={"value": value}, ) raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.datetime.now().time() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts times into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_timefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.TimeField, **kwargs, } ) class URLField(CharField): default_validators = [validators.URLValidator()] description = _("URL") def __init__(self, verbose_name=None, name=None, **kwargs): kwargs.setdefault("max_length", 200) super().__init__(verbose_name, name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 200: del kwargs["max_length"] return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause URL validation to be performed # twice. return super().formfield( **{ "form_class": forms.URLField, **kwargs, } ) class BinaryField(Field): description = _("Raw binary data") empty_values = [None, b""] def __init__(self, *args, **kwargs): kwargs.setdefault("editable", False) super().__init__(*args, **kwargs) if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [*super().check(**kwargs), *self._check_str_default_value()] def _check_str_default_value(self): if self.has_default() and isinstance(self.default, str): return [ checks.Error( "BinaryField's default cannot be a string. Use bytes " "content instead.", obj=self, id="fields.E170", ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.editable: kwargs["editable"] = True else: del kwargs["editable"] return name, path, args, kwargs def get_internal_type(self): return "BinaryField" def get_placeholder(self, value, compiler, connection): return connection.ops.binary_placeholder_sql(value) def get_default(self): if self.has_default() and not callable(self.default): return self.default default = super().get_default() if default == "": return b"" return default def get_db_prep_value(self, value, connection, prepared=False): value = super().get_db_prep_value(value, connection, prepared) if value is not None: return connection.Database.Binary(value) return value def value_to_string(self, obj): """Binary data is serialized as base64""" return b64encode(self.value_from_object(obj)).decode("ascii") def to_python(self, value): # If it's a string, it should be base64-encoded data if isinstance(value, str): return memoryview(b64decode(value.encode("ascii"))) return value class UUIDField(Field): default_error_messages = { "invalid": _("“%(value)s” is not a valid UUID."), } description = _("Universally unique identifier") empty_strings_allowed = False def __init__(self, verbose_name=None, **kwargs): kwargs["max_length"] = 32 super().__init__(verbose_name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["max_length"] return name, path, args, kwargs def get_internal_type(self): return "UUIDField" def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): if value is None: return None if not isinstance(value, uuid.UUID): value = self.to_python(value) if connection.features.has_native_uuid_field: return value return value.hex def to_python(self, value): if value is not None and not isinstance(value, uuid.UUID): input_form = "int" if isinstance(value, int) else "hex" try: return uuid.UUID(**{input_form: value}) except (AttributeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) return value def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.UUIDField, **kwargs, } ) class AutoFieldMixin: db_returning = True def __init__(self, *args, **kwargs): kwargs["blank"] = True super().__init__(*args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), ] def _check_primary_key(self): if not self.primary_key: return [ checks.Error( "AutoFields must set primary_key=True.", obj=self, id="fields.E100", ), ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["blank"] kwargs["primary_key"] = True return name, path, args, kwargs def validate(self, value, model_instance): pass def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) value = connection.ops.validate_autopk_value(value) return value def contribute_to_class(self, cls, name, **kwargs): if cls._meta.auto_field: raise ValueError( "Model %s can't have more than one auto-generated field." % cls._meta.label ) super().contribute_to_class(cls, name, **kwargs) cls._meta.auto_field = self def formfield(self, **kwargs): return None class AutoFieldMeta(type): """ Metaclass to maintain backward inheritance compatibility for AutoField. It is intended that AutoFieldMixin become public API when it is possible to create a non-integer automatically-generated field using column defaults stored in the database. In many areas Django also relies on using isinstance() to check for an automatically-generated field as a subclass of AutoField. A new flag needs to be implemented on Field to be used instead. When these issues have been addressed, this metaclass could be used to deprecate inheritance from AutoField and use of isinstance() with AutoField for detecting automatically-generated fields. """ @property def _subclasses(self): return (BigAutoField, SmallAutoField) def __instancecheck__(self, instance): return isinstance(instance, self._subclasses) or super().__instancecheck__( instance ) def __subclasscheck__(self, subclass): return issubclass(subclass, self._subclasses) or super().__subclasscheck__( subclass ) class AutoField(AutoFieldMixin, IntegerField, metaclass=AutoFieldMeta): def get_internal_type(self): return "AutoField" def rel_db_type(self, connection): return IntegerField().db_type(connection=connection) class BigAutoField(AutoFieldMixin, BigIntegerField): def get_internal_type(self): return "BigAutoField" def rel_db_type(self, connection): return BigIntegerField().db_type(connection=connection) class SmallAutoField(AutoFieldMixin, SmallIntegerField): def get_internal_type(self): return "SmallAutoField" def rel_db_type(self, connection): return SmallIntegerField().db_type(connection=connection)
716673d07eec9280975910a1a78c4b5973e5169d1bdb9d193d4e39a2b21accd0
import functools import inspect from functools import partial from django import forms from django.apps import apps from django.conf import SettingsReference, settings from django.core import checks, exceptions from django.db import connection, router from django.db.backends import utils from django.db.models import Q from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL from django.db.models.query_utils import PathInfo from django.db.models.utils import make_model_tuple from django.utils.functional import cached_property from django.utils.translation import gettext_lazy as _ from . import Field from .mixins import FieldCacheMixin from .related_descriptors import ( ForeignKeyDeferredAttribute, ForwardManyToOneDescriptor, ForwardOneToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from .related_lookups import ( RelatedExact, RelatedGreaterThan, RelatedGreaterThanOrEqual, RelatedIn, RelatedIsNull, RelatedLessThan, RelatedLessThanOrEqual, ) from .reverse_related import ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel RECURSIVE_RELATIONSHIP_CONSTANT = "self" def resolve_relation(scope_model, relation): """ Transform relation into a model or fully-qualified model string of the form "app_label.ModelName", relative to scope_model. The relation argument can be: * RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case the model argument will be returned. * A bare model name without an app_label, in which case scope_model's app_label will be prepended. * An "app_label.ModelName" string. * A model class, which will be returned unchanged. """ # Check for recursive relations if relation == RECURSIVE_RELATIONSHIP_CONSTANT: relation = scope_model # Look for an "app.Model" relation if isinstance(relation, str): if "." not in relation: relation = "%s.%s" % (scope_model._meta.app_label, relation) return relation def lazy_related_operation(function, model, *related_models, **kwargs): """ Schedule `function` to be called once `model` and all `related_models` have been imported and registered with the app registry. `function` will be called with the newly-loaded model classes as its positional arguments, plus any optional keyword arguments. The `model` argument must be a model class. Each subsequent positional argument is another model, or a reference to another model - see `resolve_relation()` for the various forms these may take. Any relative references will be resolved relative to `model`. This is a convenience wrapper for `Apps.lazy_model_operation` - the app registry model used is the one found in `model._meta.apps`. """ models = [model] + [resolve_relation(model, rel) for rel in related_models] model_keys = (make_model_tuple(m) for m in models) apps = model._meta.apps return apps.lazy_model_operation(partial(function, **kwargs), *model_keys) class RelatedField(FieldCacheMixin, Field): """Base class that all relational fields inherit from.""" # Field flags one_to_many = False one_to_one = False many_to_many = False many_to_one = False def __init__( self, related_name=None, related_query_name=None, limit_choices_to=None, **kwargs, ): self._related_name = related_name self._related_query_name = related_query_name self._limit_choices_to = limit_choices_to super().__init__(**kwargs) @cached_property def related_model(self): # Can't cache this property until all the models are loaded. apps.check_models_ready() return self.remote_field.model def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_related_name_is_valid(), *self._check_related_query_name_is_valid(), *self._check_relation_model_exists(), *self._check_referencing_to_swapped_model(), *self._check_clashes(), ] def _check_related_name_is_valid(self): import keyword related_name = self.remote_field.related_name if related_name is None: return [] is_valid_id = ( not keyword.iskeyword(related_name) and related_name.isidentifier() ) if not (is_valid_id or related_name.endswith("+")): return [ checks.Error( "The name '%s' is invalid related_name for field %s.%s" % ( self.remote_field.related_name, self.model._meta.object_name, self.name, ), hint=( "Related name must be a valid Python identifier or end with a " "'+'" ), obj=self, id="fields.E306", ) ] return [] def _check_related_query_name_is_valid(self): if self.remote_field.is_hidden(): return [] rel_query_name = self.related_query_name() errors = [] if rel_query_name.endswith("_"): errors.append( checks.Error( "Reverse query name '%s' must not end with an underscore." % rel_query_name, hint=( "Add or change a related_name or related_query_name " "argument for this field." ), obj=self, id="fields.E308", ) ) if LOOKUP_SEP in rel_query_name: errors.append( checks.Error( "Reverse query name '%s' must not contain '%s'." % (rel_query_name, LOOKUP_SEP), hint=( "Add or change a related_name or related_query_name " "argument for this field." ), obj=self, id="fields.E309", ) ) return errors def _check_relation_model_exists(self): rel_is_missing = self.remote_field.model not in self.opts.apps.get_models() rel_is_string = isinstance(self.remote_field.model, str) model_name = ( self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name ) if rel_is_missing and ( rel_is_string or not self.remote_field.model._meta.swapped ): return [ checks.Error( "Field defines a relation with model '%s', which is either " "not installed, or is abstract." % model_name, obj=self, id="fields.E300", ) ] return [] def _check_referencing_to_swapped_model(self): if ( self.remote_field.model not in self.opts.apps.get_models() and not isinstance(self.remote_field.model, str) and self.remote_field.model._meta.swapped ): return [ checks.Error( "Field defines a relation with the model '%s', which has " "been swapped out." % self.remote_field.model._meta.label, hint="Update the relation to point at 'settings.%s'." % self.remote_field.model._meta.swappable, obj=self, id="fields.E301", ) ] return [] def _check_clashes(self): """Check accessor and reverse query name clashes.""" from django.db.models.base import ModelBase errors = [] opts = self.model._meta # f.remote_field.model may be a string instead of a model. Skip if # model name is not resolved. if not isinstance(self.remote_field.model, ModelBase): return [] # Consider that we are checking field `Model.foreign` and the models # are: # # class Target(models.Model): # model = models.IntegerField() # model_set = models.IntegerField() # # class Model(models.Model): # foreign = models.ForeignKey(Target) # m2m = models.ManyToManyField(Target) # rel_opts.object_name == "Target" rel_opts = self.remote_field.model._meta # If the field doesn't install a backward relation on the target model # (so `is_hidden` returns True), then there are no clashes to check # and we can skip these fields. rel_is_hidden = self.remote_field.is_hidden() rel_name = self.remote_field.get_accessor_name() # i. e. "model_set" rel_query_name = self.related_query_name() # i. e. "model" # i.e. "app_label.Model.field". field_name = "%s.%s" % (opts.label, self.name) # Check clashes between accessor or reverse query name of `field` # and any other field name -- i.e. accessor for Model.foreign is # model_set and it clashes with Target.model_set. potential_clashes = rel_opts.fields + rel_opts.many_to_many for clash_field in potential_clashes: # i.e. "app_label.Target.model_set". clash_name = "%s.%s" % (rel_opts.label, clash_field.name) if not rel_is_hidden and clash_field.name == rel_name: errors.append( checks.Error( f"Reverse accessor '{rel_opts.object_name}.{rel_name}' " f"for '{field_name}' clashes with field name " f"'{clash_name}'.", hint=( "Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'." ) % (clash_name, field_name), obj=self, id="fields.E302", ) ) if clash_field.name == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name), hint=( "Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'." ) % (clash_name, field_name), obj=self, id="fields.E303", ) ) # Check clashes between accessors/reverse query names of `field` and # any other field accessor -- i. e. Model.foreign accessor clashes with # Model.m2m accessor. potential_clashes = (r for r in rel_opts.related_objects if r.field is not self) for clash_field in potential_clashes: # i.e. "app_label.Model.m2m". clash_name = "%s.%s" % ( clash_field.related_model._meta.label, clash_field.field.name, ) if not rel_is_hidden and clash_field.get_accessor_name() == rel_name: errors.append( checks.Error( f"Reverse accessor '{rel_opts.object_name}.{rel_name}' " f"for '{field_name}' clashes with reverse accessor for " f"'{clash_name}'.", hint=( "Add or change a related_name argument " "to the definition for '%s' or '%s'." ) % (field_name, clash_name), obj=self, id="fields.E304", ) ) if clash_field.get_accessor_name() == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with reverse query name " "for '%s'." % (field_name, clash_name), hint=( "Add or change a related_name argument " "to the definition for '%s' or '%s'." ) % (field_name, clash_name), obj=self, id="fields.E305", ) ) return errors def db_type(self, connection): # By default related field will not have a column as it relates to # columns from another table. return None def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) self.opts = cls._meta if not cls._meta.abstract: if self.remote_field.related_name: related_name = self.remote_field.related_name else: related_name = self.opts.default_related_name if related_name: related_name = related_name % { "class": cls.__name__.lower(), "model_name": cls._meta.model_name.lower(), "app_label": cls._meta.app_label.lower(), } self.remote_field.related_name = related_name if self.remote_field.related_query_name: related_query_name = self.remote_field.related_query_name % { "class": cls.__name__.lower(), "app_label": cls._meta.app_label.lower(), } self.remote_field.related_query_name = related_query_name def resolve_related_class(model, related, field): field.remote_field.model = related field.do_related_class(related, model) lazy_related_operation( resolve_related_class, cls, self.remote_field.model, field=self ) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self._limit_choices_to: kwargs["limit_choices_to"] = self._limit_choices_to if self._related_name is not None: kwargs["related_name"] = self._related_name if self._related_query_name is not None: kwargs["related_query_name"] = self._related_query_name return name, path, args, kwargs def get_forward_related_filter(self, obj): """ Return the keyword arguments that when supplied to self.model.object.filter(), would select all instances related through this field to the remote obj. This is used to build the querysets returned by related descriptors. obj is an instance of self.related_field.model. """ return { "%s__%s" % (self.name, rh_field.name): getattr(obj, rh_field.attname) for _, rh_field in self.related_fields } def get_reverse_related_filter(self, obj): """ Complement to get_forward_related_filter(). Return the keyword arguments that when passed to self.related_field.model.object.filter() select all instances of self.related_field.model related through this field to obj. obj is an instance of self.model. """ base_q = Q.create( [ (rh_field.attname, getattr(obj, lh_field.attname)) for lh_field, rh_field in self.related_fields ] ) descriptor_filter = self.get_extra_descriptor_filter(obj) if isinstance(descriptor_filter, dict): return base_q & Q(**descriptor_filter) elif descriptor_filter: return base_q & descriptor_filter return base_q @property def swappable_setting(self): """ Get the setting that this is powered from for swapping, or None if it's not swapped in / marked with swappable=False. """ if self.swappable: # Work out string form of "to" if isinstance(self.remote_field.model, str): to_string = self.remote_field.model else: to_string = self.remote_field.model._meta.label return apps.get_swappable_settings_name(to_string) return None def set_attributes_from_rel(self): self.name = self.name or ( self.remote_field.model._meta.model_name + "_" + self.remote_field.model._meta.pk.name ) if self.verbose_name is None: self.verbose_name = self.remote_field.model._meta.verbose_name self.remote_field.set_field_name() def do_related_class(self, other, cls): self.set_attributes_from_rel() self.contribute_to_related_class(other, self.remote_field) def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this model field. If it is a callable, it will be invoked and the result will be returned. """ if callable(self.remote_field.limit_choices_to): return self.remote_field.limit_choices_to() return self.remote_field.limit_choices_to def formfield(self, **kwargs): """ Pass ``limit_choices_to`` to the field being constructed. Only passes it if there is a type that supports related fields. This is a similar strategy used to pass the ``queryset`` to the field being constructed. """ defaults = {} if hasattr(self.remote_field, "get_related_field"): # If this is a callable, do not invoke it here. Just pass # it in the defaults for when the form class will later be # instantiated. limit_choices_to = self.remote_field.limit_choices_to defaults.update( { "limit_choices_to": limit_choices_to, } ) defaults.update(kwargs) return super().formfield(**defaults) def related_query_name(self): """ Define the name that can be used to identify this related object in a table-spanning query. """ return ( self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name ) @property def target_field(self): """ When filtering against this relation, return the field on the remote model against which the filtering should happen. """ target_fields = self.path_infos[-1].target_fields if len(target_fields) > 1: raise exceptions.FieldError( "The relation has multiple target fields, but only single target field " "was asked for" ) return target_fields[0] def get_cache_name(self): return self.name class ForeignObject(RelatedField): """ Abstraction of the ForeignKey relation to support multi-column relations. """ # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False requires_unique_target = True related_accessor_class = ReverseManyToOneDescriptor forward_related_accessor_class = ForwardManyToOneDescriptor rel_class = ForeignObjectRel def __init__( self, to, on_delete, from_fields, to_fields, rel=None, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, swappable=True, **kwargs, ): if rel is None: rel = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) super().__init__( rel=rel, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, **kwargs, ) self.from_fields = from_fields self.to_fields = to_fields self.swappable = swappable def __copy__(self): obj = super().__copy__() # Remove any cached PathInfo values. obj.__dict__.pop("path_infos", None) obj.__dict__.pop("reverse_path_infos", None) return obj def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_to_fields_exist(), *self._check_unique_target(), ] def _check_to_fields_exist(self): # Skip nonexistent models. if isinstance(self.remote_field.model, str): return [] errors = [] for to_field in self.to_fields: if to_field: try: self.remote_field.model._meta.get_field(to_field) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The to_field '%s' doesn't exist on the related " "model '%s'." % (to_field, self.remote_field.model._meta.label), obj=self, id="fields.E312", ) ) return errors def _check_unique_target(self): rel_is_string = isinstance(self.remote_field.model, str) if rel_is_string or not self.requires_unique_target: return [] try: self.foreign_related_fields except exceptions.FieldDoesNotExist: return [] if not self.foreign_related_fields: return [] unique_foreign_fields = { frozenset([f.name]) for f in self.remote_field.model._meta.get_fields() if getattr(f, "unique", False) } unique_foreign_fields.update( {frozenset(ut) for ut in self.remote_field.model._meta.unique_together} ) unique_foreign_fields.update( { frozenset(uc.fields) for uc in self.remote_field.model._meta.total_unique_constraints } ) foreign_fields = {f.name for f in self.foreign_related_fields} has_unique_constraint = any(u <= foreign_fields for u in unique_foreign_fields) if not has_unique_constraint and len(self.foreign_related_fields) > 1: field_combination = ", ".join( "'%s'" % rel_field.name for rel_field in self.foreign_related_fields ) model_name = self.remote_field.model.__name__ return [ checks.Error( "No subset of the fields %s on model '%s' is unique." % (field_combination, model_name), hint=( "Mark a single field as unique=True or add a set of " "fields to a unique constraint (via unique_together " "or a UniqueConstraint (without condition) in the " "model Meta.constraints)." ), obj=self, id="fields.E310", ) ] elif not has_unique_constraint: field_name = self.foreign_related_fields[0].name model_name = self.remote_field.model.__name__ return [ checks.Error( "'%s.%s' must be unique because it is referenced by " "a foreign key." % (model_name, field_name), hint=( "Add unique=True to this field or add a " "UniqueConstraint (without condition) in the model " "Meta.constraints." ), obj=self, id="fields.E311", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() kwargs["on_delete"] = self.remote_field.on_delete kwargs["from_fields"] = self.from_fields kwargs["to_fields"] = self.to_fields if self.remote_field.parent_link: kwargs["parent_link"] = self.remote_field.parent_link if isinstance(self.remote_field.model, str): if "." in self.remote_field.model: app_label, model_name = self.remote_field.model.split(".") kwargs["to"] = "%s.%s" % (app_label, model_name.lower()) else: kwargs["to"] = self.remote_field.model.lower() else: kwargs["to"] = self.remote_field.model._meta.label_lower # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error if hasattr(kwargs["to"], "setting_name"): if kwargs["to"].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ForeignKey pointing to a model " "that is swapped in place of more than one model (%s and %s)" % (kwargs["to"].setting_name, swappable_setting) ) # Set it kwargs["to"] = SettingsReference( kwargs["to"], swappable_setting, ) return name, path, args, kwargs def resolve_related_fields(self): if not self.from_fields or len(self.from_fields) != len(self.to_fields): raise ValueError( "Foreign Object from and to fields must be the same non-zero length" ) if isinstance(self.remote_field.model, str): raise ValueError( "Related model %r cannot be resolved" % self.remote_field.model ) related_fields = [] for index in range(len(self.from_fields)): from_field_name = self.from_fields[index] to_field_name = self.to_fields[index] from_field = ( self if from_field_name == RECURSIVE_RELATIONSHIP_CONSTANT else self.opts.get_field(from_field_name) ) to_field = ( self.remote_field.model._meta.pk if to_field_name is None else self.remote_field.model._meta.get_field(to_field_name) ) related_fields.append((from_field, to_field)) return related_fields @cached_property def related_fields(self): return self.resolve_related_fields() @cached_property def reverse_related_fields(self): return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields] @cached_property def local_related_fields(self): return tuple(lhs_field for lhs_field, rhs_field in self.related_fields) @cached_property def foreign_related_fields(self): return tuple( rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field ) def get_local_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.local_related_fields) def get_foreign_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.foreign_related_fields) @staticmethod def get_instance_value_for_fields(instance, fields): ret = [] opts = instance._meta for field in fields: # Gotcha: in some cases (like fixture loading) a model can have # different values in parent_ptr_id and parent's id. So, use # instance.pk (that is, parent_ptr_id) when asked for instance.id. if field.primary_key: possible_parent_link = opts.get_ancestor_link(field.model) if ( not possible_parent_link or possible_parent_link.primary_key or possible_parent_link.model._meta.abstract ): ret.append(instance.pk) continue ret.append(getattr(instance, field.attname)) return tuple(ret) def get_attname_column(self): attname, column = super().get_attname_column() return attname, None def get_joining_columns(self, reverse_join=False): source = self.reverse_related_fields if reverse_join else self.related_fields return tuple( (lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source ) def get_reverse_joining_columns(self): return self.get_joining_columns(reverse_join=True) def get_extra_descriptor_filter(self, instance): """ Return an extra filter condition for related object fetching when user does 'instance.fieldname', that is the extra filter is used in the descriptor of the field. The filter should be either a dict usable in .filter(**kwargs) call or a Q-object. The condition will be ANDed together with the relation's joining columns. A parallel method is get_extra_restriction() which is used in JOIN and subquery conditions. """ return {} def get_extra_restriction(self, alias, related_alias): """ Return a pair condition used for joining and subquery pushdown. The condition is something that responds to as_sql(compiler, connection) method. Note that currently referring both the 'alias' and 'related_alias' will not work in some conditions, like subquery pushdown. A parallel method is get_extra_descriptor_filter() which is used in instance.fieldname related object fetching. """ return None def get_path_info(self, filtered_relation=None): """Get path from this field to the related model.""" opts = self.remote_field.model._meta from_opts = self.model._meta return [ PathInfo( from_opts=from_opts, to_opts=opts, target_fields=self.foreign_related_fields, join_field=self, m2m=False, direct=True, filtered_relation=filtered_relation, ) ] @cached_property def path_infos(self): return self.get_path_info() def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [ PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, ) ] @cached_property def reverse_path_infos(self): return self.get_reverse_path_info() @classmethod @functools.lru_cache(maxsize=None) def get_class_lookups(cls): bases = inspect.getmro(cls) bases = bases[: bases.index(ForeignObject) + 1] class_lookups = [parent.__dict__.get("class_lookups", {}) for parent in bases] return cls.merge_dicts(class_lookups) def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) setattr(cls, self.name, self.forward_related_accessor_class(self)) def contribute_to_related_class(self, cls, related): # Internal FK's - i.e., those with a related name ending with '+' - # and swapped models don't get a related descriptor. if ( not self.remote_field.is_hidden() and not related.related_model._meta.swapped ): setattr( cls._meta.concrete_model, related.get_accessor_name(), self.related_accessor_class(related), ) # While 'limit_choices_to' might be a callable, simply pass # it along for later - this is too early because it's still # model load time. if self.remote_field.limit_choices_to: cls._meta.related_fkey_lookups.append( self.remote_field.limit_choices_to ) ForeignObject.register_lookup(RelatedIn) ForeignObject.register_lookup(RelatedExact) ForeignObject.register_lookup(RelatedLessThan) ForeignObject.register_lookup(RelatedGreaterThan) ForeignObject.register_lookup(RelatedGreaterThanOrEqual) ForeignObject.register_lookup(RelatedLessThanOrEqual) ForeignObject.register_lookup(RelatedIsNull) class ForeignKey(ForeignObject): """ Provide a many-to-one relation by adding a column to the local model to hold the remote value. By default ForeignKey will target the pk of the remote model but this behavior can be changed by using the ``to_field`` argument. """ descriptor_class = ForeignKeyDeferredAttribute # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False rel_class = ManyToOneRel empty_strings_allowed = False default_error_messages = { "invalid": _("%(model)s instance with %(field)s %(value)r does not exist.") } description = _("Foreign Key (type determined by related field)") def __init__( self, to, on_delete, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, to_field=None, db_constraint=True, **kwargs, ): try: to._meta.model_name except AttributeError: if not isinstance(to, str): raise TypeError( "%s(%r) is invalid. First parameter to ForeignKey must be " "either a model, a model name, or the string %r" % ( self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT, ) ) else: # For backwards compatibility purposes, we need to *try* and set # the to_field during FK construction. It won't be guaranteed to # be correct until contribute_to_class is called. Refs #12190. to_field = to_field or (to._meta.pk and to._meta.pk.name) if not callable(on_delete): raise TypeError("on_delete must be callable.") kwargs["rel"] = self.rel_class( self, to, to_field, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) kwargs.setdefault("db_index", True) super().__init__( to, on_delete, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, from_fields=[RECURSIVE_RELATIONSHIP_CONSTANT], to_fields=[to_field], **kwargs, ) self.db_constraint = db_constraint def __class_getitem__(cls, *args, **kwargs): return cls def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_on_delete(), *self._check_unique(), ] def _check_on_delete(self): on_delete = getattr(self.remote_field, "on_delete", None) if on_delete == SET_NULL and not self.null: return [ checks.Error( "Field specifies on_delete=SET_NULL, but cannot be null.", hint=( "Set null=True argument on the field, or change the on_delete " "rule." ), obj=self, id="fields.E320", ) ] elif on_delete == SET_DEFAULT and not self.has_default(): return [ checks.Error( "Field specifies on_delete=SET_DEFAULT, but has no default value.", hint="Set a default value, or change the on_delete rule.", obj=self, id="fields.E321", ) ] else: return [] def _check_unique(self, **kwargs): return ( [ checks.Warning( "Setting unique=True on a ForeignKey has the same effect as using " "a OneToOneField.", hint=( "ForeignKey(unique=True) is usually better served by a " "OneToOneField." ), obj=self, id="fields.W342", ) ] if self.unique else [] ) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["to_fields"] del kwargs["from_fields"] # Handle the simpler arguments if self.db_index: del kwargs["db_index"] else: kwargs["db_index"] = False if self.db_constraint is not True: kwargs["db_constraint"] = self.db_constraint # Rel needs more work. to_meta = getattr(self.remote_field.model, "_meta", None) if self.remote_field.field_name and ( not to_meta or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name) ): kwargs["to_field"] = self.remote_field.field_name return name, path, args, kwargs def to_python(self, value): return self.target_field.to_python(value) @property def target_field(self): return self.foreign_related_fields[0] def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [ PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, ) ] def validate(self, value, model_instance): if self.remote_field.parent_link: return super().validate(value, model_instance) if value is None: return using = router.db_for_read(self.remote_field.model, instance=model_instance) qs = self.remote_field.model._base_manager.using(using).filter( **{self.remote_field.field_name: value} ) qs = qs.complex_filter(self.get_limit_choices_to()) if not qs.exists(): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={ "model": self.remote_field.model._meta.verbose_name, "pk": value, "field": self.remote_field.field_name, "value": value, }, # 'pk' is included for backwards compatibility ) def resolve_related_fields(self): related_fields = super().resolve_related_fields() for from_field, to_field in related_fields: if ( to_field and to_field.model != self.remote_field.model._meta.concrete_model ): raise exceptions.FieldError( "'%s.%s' refers to field '%s' which is not local to model " "'%s'." % ( self.model._meta.label, self.name, to_field.name, self.remote_field.model._meta.concrete_model._meta.label, ) ) return related_fields def get_attname(self): return "%s_id" % self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_default(self): """Return the to_field if the default value is an object.""" field_default = super().get_default() if isinstance(field_default, self.remote_field.model): return getattr(field_default, self.target_field.attname) return field_default def get_db_prep_save(self, value, connection): if value is None or ( value == "" and ( not self.target_field.empty_strings_allowed or connection.features.interprets_empty_strings_as_nulls ) ): return None else: return self.target_field.get_db_prep_save(value, connection=connection) def get_db_prep_value(self, value, connection, prepared=False): return self.target_field.get_db_prep_value(value, connection, prepared) def get_prep_value(self, value): return self.target_field.get_prep_value(value) def contribute_to_related_class(self, cls, related): super().contribute_to_related_class(cls, related) if self.remote_field.field_name is None: self.remote_field.field_name = cls._meta.pk.name def formfield(self, *, using=None, **kwargs): if isinstance(self.remote_field.model, str): raise ValueError( "Cannot create form field for %r yet, because " "its related model %r has not been loaded yet" % (self.name, self.remote_field.model) ) return super().formfield( **{ "form_class": forms.ModelChoiceField, "queryset": self.remote_field.model._default_manager.using(using), "to_field_name": self.remote_field.field_name, **kwargs, "blank": self.blank, } ) def db_check(self, connection): return None def db_type(self, connection): return self.target_field.rel_db_type(connection=connection) def db_parameters(self, connection): target_db_parameters = self.target_field.db_parameters(connection) return { "type": self.db_type(connection), "check": self.db_check(connection), "collation": target_db_parameters.get("collation"), } def convert_empty_strings(self, value, expression, connection): if (not value) and isinstance(value, str): return None return value def get_db_converters(self, connection): converters = super().get_db_converters(connection) if connection.features.interprets_empty_strings_as_nulls: converters += [self.convert_empty_strings] return converters def get_col(self, alias, output_field=None): if output_field is None: output_field = self.target_field while isinstance(output_field, ForeignKey): output_field = output_field.target_field if output_field is self: raise ValueError("Cannot resolve output_field.") return super().get_col(alias, output_field) class OneToOneField(ForeignKey): """ A OneToOneField is essentially the same as a ForeignKey, with the exception that it always carries a "unique" constraint with it and the reverse relation always returns the object pointed to (since there will only ever be one), rather than returning a list. """ # Field flags many_to_many = False many_to_one = False one_to_many = False one_to_one = True related_accessor_class = ReverseOneToOneDescriptor forward_related_accessor_class = ForwardOneToOneDescriptor rel_class = OneToOneRel description = _("One-to-one relationship") def __init__(self, to, on_delete, to_field=None, **kwargs): kwargs["unique"] = True super().__init__(to, on_delete, to_field=to_field, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if "unique" in kwargs: del kwargs["unique"] return name, path, args, kwargs def formfield(self, **kwargs): if self.remote_field.parent_link: return None return super().formfield(**kwargs) def save_form_data(self, instance, data): if isinstance(data, self.remote_field.model): setattr(instance, self.name, data) else: setattr(instance, self.attname, data) # Remote field object must be cleared otherwise Model.save() # will reassign attname using the related object pk. if data is None: setattr(instance, self.name, data) def _check_unique(self, **kwargs): # Override ForeignKey since check isn't applicable here. return [] def create_many_to_many_intermediary_model(field, klass): from django.db import models def set_managed(model, related, through): through._meta.managed = model._meta.managed or related._meta.managed to_model = resolve_relation(klass, field.remote_field.model) name = "%s_%s" % (klass._meta.object_name, field.name) lazy_related_operation(set_managed, klass, to_model, name) to = make_model_tuple(to_model)[1] from_ = klass._meta.model_name if to == from_: to = "to_%s" % to from_ = "from_%s" % from_ meta = type( "Meta", (), { "db_table": field._get_m2m_db_table(klass._meta), "auto_created": klass, "app_label": klass._meta.app_label, "db_tablespace": klass._meta.db_tablespace, "unique_together": (from_, to), "verbose_name": _("%(from)s-%(to)s relationship") % {"from": from_, "to": to}, "verbose_name_plural": _("%(from)s-%(to)s relationships") % {"from": from_, "to": to}, "apps": field.model._meta.apps, }, ) # Construct and return the new class. return type( name, (models.Model,), { "Meta": meta, "__module__": klass.__module__, from_: models.ForeignKey( klass, related_name="%s+" % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ), to: models.ForeignKey( to_model, related_name="%s+" % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ), }, ) class ManyToManyField(RelatedField): """ Provide a many-to-many relation by using an intermediary model that holds two ForeignKey fields pointed at the two sides of the relation. Unless a ``through`` model was provided, ManyToManyField will use the create_many_to_many_intermediary_model factory to automatically generate the intermediary model. """ # Field flags many_to_many = True many_to_one = False one_to_many = False one_to_one = False rel_class = ManyToManyRel description = _("Many-to-many relationship") def __init__( self, to, related_name=None, related_query_name=None, limit_choices_to=None, symmetrical=None, through=None, through_fields=None, db_constraint=True, db_table=None, swappable=True, **kwargs, ): try: to._meta except AttributeError: if not isinstance(to, str): raise TypeError( "%s(%r) is invalid. First parameter to ManyToManyField " "must be either a model, a model name, or the string %r" % ( self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT, ) ) if symmetrical is None: symmetrical = to == RECURSIVE_RELATIONSHIP_CONSTANT if through is not None and db_table is not None: raise ValueError( "Cannot specify a db_table if an intermediary model is used." ) kwargs["rel"] = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, symmetrical=symmetrical, through=through, through_fields=through_fields, db_constraint=db_constraint, ) self.has_null_arg = "null" in kwargs super().__init__( related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, **kwargs, ) self.db_table = db_table self.swappable = swappable def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_unique(**kwargs), *self._check_relationship_model(**kwargs), *self._check_ignored_options(**kwargs), *self._check_table_uniqueness(**kwargs), ] def _check_unique(self, **kwargs): if self.unique: return [ checks.Error( "ManyToManyFields cannot be unique.", obj=self, id="fields.E330", ) ] return [] def _check_ignored_options(self, **kwargs): warnings = [] if self.has_null_arg: warnings.append( checks.Warning( "null has no effect on ManyToManyField.", obj=self, id="fields.W340", ) ) if self._validators: warnings.append( checks.Warning( "ManyToManyField does not support validators.", obj=self, id="fields.W341", ) ) if self.remote_field.symmetrical and self._related_name: warnings.append( checks.Warning( "related_name has no effect on ManyToManyField " 'with a symmetrical relationship, e.g. to "self".', obj=self, id="fields.W345", ) ) return warnings def _check_relationship_model(self, from_model=None, **kwargs): if hasattr(self.remote_field.through, "_meta"): qualified_model_name = "%s.%s" % ( self.remote_field.through._meta.app_label, self.remote_field.through.__name__, ) else: qualified_model_name = self.remote_field.through errors = [] if self.remote_field.through not in self.opts.apps.get_models( include_auto_created=True ): # The relationship model is not installed. errors.append( checks.Error( "Field specifies a many-to-many relation through model " "'%s', which has not been installed." % qualified_model_name, obj=self, id="fields.E331", ) ) else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) # Set some useful local variables to_model = resolve_relation(from_model, self.remote_field.model) from_model_name = from_model._meta.object_name if isinstance(to_model, str): to_model_name = to_model else: to_model_name = to_model._meta.object_name relationship_model_name = self.remote_field.through._meta.object_name self_referential = from_model == to_model # Count foreign keys in intermediate model if self_referential: seen_self = sum( from_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) if seen_self > 2 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than two foreign keys " "to '%s', which is ambiguous. You must specify " "which two foreign keys Django should use via the " "through_fields keyword argument." % (self, from_model_name), hint=( "Use through_fields to specify which two foreign keys " "Django should use." ), obj=self.remote_field.through, id="fields.E333", ) ) else: # Count foreign keys in relationship model seen_from = sum( from_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) seen_to = sum( to_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) if seen_from > 1 and not self.remote_field.through_fields: errors.append( checks.Error( ( "The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "from '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument." ) % (self, from_model_name), hint=( "If you want to create a recursive relationship, " 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id="fields.E334", ) ) if seen_to > 1 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "to '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument." % (self, to_model_name), hint=( "If you want to create a recursive relationship, " 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id="fields.E335", ) ) if seen_from == 0 or seen_to == 0: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it does not have a foreign key to '%s' or '%s'." % (self, from_model_name, to_model_name), obj=self.remote_field.through, id="fields.E336", ) ) # Validate `through_fields`. if self.remote_field.through_fields is not None: # Validate that we're given an iterable of at least two items # and that none of them is "falsy". if not ( len(self.remote_field.through_fields) >= 2 and self.remote_field.through_fields[0] and self.remote_field.through_fields[1] ): errors.append( checks.Error( "Field specifies 'through_fields' but does not provide " "the names of the two link fields that should be used " "for the relation through model '%s'." % qualified_model_name, hint=( "Make sure you specify 'through_fields' as " "through_fields=('field1', 'field2')" ), obj=self, id="fields.E337", ) ) # Validate the given through fields -- they should be actual # fields on the through model, and also be foreign keys to the # expected models. else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) source, through, target = ( from_model, self.remote_field.through, self.remote_field.model, ) source_field_name, target_field_name = self.remote_field.through_fields[ :2 ] for field_name, related_model in ( (source_field_name, source), (target_field_name, target), ): possible_field_names = [] for f in through._meta.fields: if ( hasattr(f, "remote_field") and getattr(f.remote_field, "model", None) == related_model ): possible_field_names.append(f.name) if possible_field_names: hint = ( "Did you mean one of the following foreign keys to '%s': " "%s?" % ( related_model._meta.object_name, ", ".join(possible_field_names), ) ) else: hint = None try: field = through._meta.get_field(field_name) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The intermediary model '%s' has no field '%s'." % (qualified_model_name, field_name), hint=hint, obj=self, id="fields.E338", ) ) else: if not ( hasattr(field, "remote_field") and getattr(field.remote_field, "model", None) == related_model ): errors.append( checks.Error( "'%s.%s' is not a foreign key to '%s'." % ( through._meta.object_name, field_name, related_model._meta.object_name, ), hint=hint, obj=self, id="fields.E339", ) ) return errors def _check_table_uniqueness(self, **kwargs): if ( isinstance(self.remote_field.through, str) or not self.remote_field.through._meta.managed ): return [] registered_tables = { model._meta.db_table: model for model in self.opts.apps.get_models(include_auto_created=True) if model != self.remote_field.through and model._meta.managed } m2m_db_table = self.m2m_db_table() model = registered_tables.get(m2m_db_table) # The second condition allows multiple m2m relations on a model if # some point to a through model that proxies another through model. if ( model and model._meta.concrete_model != self.remote_field.through._meta.concrete_model ): if model._meta.auto_created: def _get_field_name(model): for field in model._meta.auto_created._meta.many_to_many: if field.remote_field.through is model: return field.name opts = model._meta.auto_created._meta clashing_obj = "%s.%s" % (opts.label, _get_field_name(model)) else: clashing_obj = model._meta.label if settings.DATABASE_ROUTERS: error_class, error_id = checks.Warning, "fields.W344" error_hint = ( "You have configured settings.DATABASE_ROUTERS. Verify " "that the table of %r is correctly routed to a separate " "database." % clashing_obj ) else: error_class, error_id = checks.Error, "fields.E340" error_hint = None return [ error_class( "The field's intermediary table '%s' clashes with the " "table name of '%s'." % (m2m_db_table, clashing_obj), obj=self, hint=error_hint, id=error_id, ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() # Handle the simpler arguments. if self.db_table is not None: kwargs["db_table"] = self.db_table if self.remote_field.db_constraint is not True: kwargs["db_constraint"] = self.remote_field.db_constraint # Lowercase model names as they should be treated as case-insensitive. if isinstance(self.remote_field.model, str): if "." in self.remote_field.model: app_label, model_name = self.remote_field.model.split(".") kwargs["to"] = "%s.%s" % (app_label, model_name.lower()) else: kwargs["to"] = self.remote_field.model.lower() else: kwargs["to"] = self.remote_field.model._meta.label_lower if getattr(self.remote_field, "through", None) is not None: if isinstance(self.remote_field.through, str): kwargs["through"] = self.remote_field.through elif not self.remote_field.through._meta.auto_created: kwargs["through"] = self.remote_field.through._meta.label # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error. if hasattr(kwargs["to"], "setting_name"): if kwargs["to"].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ManyToManyField pointing to a " "model that is swapped in place of more than one model " "(%s and %s)" % (kwargs["to"].setting_name, swappable_setting) ) kwargs["to"] = SettingsReference( kwargs["to"], swappable_setting, ) return name, path, args, kwargs def _get_path_info(self, direct=False, filtered_relation=None): """Called by both direct and indirect m2m traversal.""" int_model = self.remote_field.through linkfield1 = int_model._meta.get_field(self.m2m_field_name()) linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = linkfield1.reverse_path_infos if filtered_relation: join2infos = linkfield2.get_path_info(filtered_relation) else: join2infos = linkfield2.path_infos else: join1infos = linkfield2.reverse_path_infos if filtered_relation: join2infos = linkfield1.get_path_info(filtered_relation) else: join2infos = linkfield1.path_infos # Get join infos between the last model of join 1 and the first model # of join 2. Assume the only reason these may differ is due to model # inheritance. join1_final = join1infos[-1].to_opts join2_initial = join2infos[0].from_opts if join1_final is join2_initial: intermediate_infos = [] elif issubclass(join1_final.model, join2_initial.model): intermediate_infos = join1_final.get_path_to_parent(join2_initial.model) else: intermediate_infos = join2_initial.get_path_from_parent(join1_final.model) return [*join1infos, *intermediate_infos, *join2infos] def get_path_info(self, filtered_relation=None): return self._get_path_info(direct=True, filtered_relation=filtered_relation) @cached_property def path_infos(self): return self.get_path_info() def get_reverse_path_info(self, filtered_relation=None): return self._get_path_info(direct=False, filtered_relation=filtered_relation) @cached_property def reverse_path_infos(self): return self.get_reverse_path_info() def _get_m2m_db_table(self, opts): """ Function that can be curried to provide the m2m table name for this relation. """ if self.remote_field.through is not None: return self.remote_field.through._meta.db_table elif self.db_table: return self.db_table else: m2m_table_name = "%s_%s" % (utils.strip_quotes(opts.db_table), self.name) return utils.truncate_name(m2m_table_name, connection.ops.max_name_length()) def _get_m2m_attr(self, related, attr): """ Function that can be curried to provide the source accessor or DB column name for the m2m table. """ cache_attr = "_m2m_%s_cache" % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[0] else: link_field_name = None for f in self.remote_field.through._meta.fields: if ( f.is_relation and f.remote_field.model == related.related_model and (link_field_name is None or link_field_name == f.name) ): setattr(self, cache_attr, getattr(f, attr)) return getattr(self, cache_attr) def _get_m2m_reverse_attr(self, related, attr): """ Function that can be curried to provide the related accessor or DB column name for the m2m table. """ cache_attr = "_m2m_reverse_%s_cache" % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) found = False if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[1] else: link_field_name = None for f in self.remote_field.through._meta.fields: if f.is_relation and f.remote_field.model == related.model: if link_field_name is None and related.related_model == related.model: # If this is an m2m-intermediate to self, # the first foreign key you find will be # the source column. Keep searching for # the second foreign key. if found: setattr(self, cache_attr, getattr(f, attr)) break else: found = True elif link_field_name is None or link_field_name == f.name: setattr(self, cache_attr, getattr(f, attr)) break return getattr(self, cache_attr) def contribute_to_class(self, cls, name, **kwargs): # To support multiple relations to self, it's useful to have a non-None # related name on symmetrical relations for internal reasons. The # concept doesn't make a lot of sense externally ("you want me to # specify *what* on my non-reversible relation?!"), so we set it up # automatically. The funky name reduces the chance of an accidental # clash. if self.remote_field.symmetrical and ( self.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT or self.remote_field.model == cls._meta.object_name ): self.remote_field.related_name = "%s_rel_+" % name elif self.remote_field.is_hidden(): # If the backwards relation is disabled, replace the original # related_name with one generated from the m2m field name. Django # still uses backwards relations internally and we need to avoid # clashes between multiple m2m fields with related_name == '+'. self.remote_field.related_name = "_%s_%s_%s_+" % ( cls._meta.app_label, cls.__name__.lower(), name, ) super().contribute_to_class(cls, name, **kwargs) # The intermediate m2m model is not auto created if: # 1) There is a manually specified intermediate, or # 2) The class owning the m2m field is abstract. # 3) The class owning the m2m field has been swapped out. if not cls._meta.abstract: if self.remote_field.through: def resolve_through_model(_, model, field): field.remote_field.through = model lazy_related_operation( resolve_through_model, cls, self.remote_field.through, field=self ) elif not cls._meta.swapped: self.remote_field.through = create_many_to_many_intermediary_model( self, cls ) # Add the descriptor for the m2m relation. setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False)) # Set up the accessor for the m2m table name for the relation. self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta) def contribute_to_related_class(self, cls, related): # Internal M2Ms (i.e., those with a related name ending with '+') # and swapped models don't get a related descriptor. if ( not self.remote_field.is_hidden() and not related.related_model._meta.swapped ): setattr( cls, related.get_accessor_name(), ManyToManyDescriptor(self.remote_field, reverse=True), ) # Set up the accessors for the column names on the m2m table. self.m2m_column_name = partial(self._get_m2m_attr, related, "column") self.m2m_reverse_name = partial(self._get_m2m_reverse_attr, related, "column") self.m2m_field_name = partial(self._get_m2m_attr, related, "name") self.m2m_reverse_field_name = partial( self._get_m2m_reverse_attr, related, "name" ) get_m2m_rel = partial(self._get_m2m_attr, related, "remote_field") self.m2m_target_field_name = lambda: get_m2m_rel().field_name get_m2m_reverse_rel = partial( self._get_m2m_reverse_attr, related, "remote_field" ) self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name def set_attributes_from_rel(self): pass def value_from_object(self, obj): return [] if obj.pk is None else list(getattr(obj, self.attname).all()) def save_form_data(self, instance, data): getattr(instance, self.attname).set(data) def formfield(self, *, using=None, **kwargs): defaults = { "form_class": forms.ModelMultipleChoiceField, "queryset": self.remote_field.model._default_manager.using(using), **kwargs, } # If initial is passed in, it's a list of related objects, but the # MultipleChoiceField takes a list of IDs. if defaults.get("initial") is not None: initial = defaults["initial"] if callable(initial): initial = initial() defaults["initial"] = [i.pk for i in initial] return super().formfield(**defaults) def db_check(self, connection): return None def db_type(self, connection): # A ManyToManyField is not represented by a single column, # so return None. return None def db_parameters(self, connection): return {"type": None, "check": None}
234b32c397b69d4e6b1d1ef7777631f6209f2210b95087a6b0b1247512d576ae
import json from django import forms from django.core import checks, exceptions from django.db import NotSupportedError, connections, router from django.db.models import lookups from django.db.models.constants import LOOKUP_SEP from django.db.models.fields import TextField from django.db.models.lookups import PostgresOperatorLookup, Transform from django.utils.translation import gettext_lazy as _ from . import Field from .mixins import CheckFieldDefaultMixin __all__ = ["JSONField"] class JSONField(CheckFieldDefaultMixin, Field): empty_strings_allowed = False description = _("A JSON object") default_error_messages = { "invalid": _("Value must be valid JSON."), } _default_hint = ("dict", "{}") def __init__( self, verbose_name=None, name=None, encoder=None, decoder=None, **kwargs, ): if encoder and not callable(encoder): raise ValueError("The encoder parameter must be a callable object.") if decoder and not callable(decoder): raise ValueError("The decoder parameter must be a callable object.") self.encoder = encoder self.decoder = decoder super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super().check(**kwargs) databases = kwargs.get("databases") or [] errors.extend(self._check_supported(databases)) return errors def _check_supported(self, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if ( self.model._meta.required_db_vendor and self.model._meta.required_db_vendor != connection.vendor ): continue if not ( "supports_json_field" in self.model._meta.required_db_features or connection.features.supports_json_field ): errors.append( checks.Error( "%s does not support JSONFields." % connection.display_name, obj=self.model, id="fields.E180", ) ) return errors def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.encoder is not None: kwargs["encoder"] = self.encoder if self.decoder is not None: kwargs["decoder"] = self.decoder return name, path, args, kwargs def from_db_value(self, value, expression, connection): if value is None: return value # Some backends (SQLite at least) extract non-string values in their # SQL datatypes. if isinstance(expression, KeyTransform) and not isinstance(value, str): return value try: return json.loads(value, cls=self.decoder) except json.JSONDecodeError: return value def get_internal_type(self): return "JSONField" def get_prep_value(self, value): if value is None: return value return json.dumps(value, cls=self.encoder) def get_transform(self, name): transform = super().get_transform(name) if transform: return transform return KeyTransformFactory(name) def validate(self, value, model_instance): super().validate(value, model_instance) try: json.dumps(value, cls=self.encoder) except TypeError: raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def value_to_string(self, obj): return self.value_from_object(obj) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.JSONField, "encoder": self.encoder, "decoder": self.decoder, **kwargs, } ) def compile_json_path(key_transforms, include_root=True): path = ["$"] if include_root else [] for key_transform in key_transforms: try: num = int(key_transform) except ValueError: # non-integer path.append(".") path.append(json.dumps(key_transform)) else: path.append("[%s]" % num) return "".join(path) class DataContains(PostgresOperatorLookup): lookup_name = "contains" postgres_operator = "@>" def as_sql(self, compiler, connection): if not connection.features.supports_json_field_contains: raise NotSupportedError( "contains lookup is not supported on this database backend." ) lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(lhs_params) + tuple(rhs_params) return "JSON_CONTAINS(%s, %s)" % (lhs, rhs), params class ContainedBy(PostgresOperatorLookup): lookup_name = "contained_by" postgres_operator = "<@" def as_sql(self, compiler, connection): if not connection.features.supports_json_field_contains: raise NotSupportedError( "contained_by lookup is not supported on this database backend." ) lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(rhs_params) + tuple(lhs_params) return "JSON_CONTAINS(%s, %s)" % (rhs, lhs), params class HasKeyLookup(PostgresOperatorLookup): logical_operator = None def compile_json_path_final_key(self, key_transform): # Compile the final key without interpreting ints as array elements. return ".%s" % json.dumps(key_transform) def as_sql(self, compiler, connection, template=None): # Process JSON path from the left-hand side. if isinstance(self.lhs, KeyTransform): lhs, lhs_params, lhs_key_transforms = self.lhs.preprocess_lhs( compiler, connection ) lhs_json_path = compile_json_path(lhs_key_transforms) else: lhs, lhs_params = self.process_lhs(compiler, connection) lhs_json_path = "$" sql = template % lhs # Process JSON path from the right-hand side. rhs = self.rhs rhs_params = [] if not isinstance(rhs, (list, tuple)): rhs = [rhs] for key in rhs: if isinstance(key, KeyTransform): *_, rhs_key_transforms = key.preprocess_lhs(compiler, connection) else: rhs_key_transforms = [key] *rhs_key_transforms, final_key = rhs_key_transforms rhs_json_path = compile_json_path(rhs_key_transforms, include_root=False) rhs_json_path += self.compile_json_path_final_key(final_key) rhs_params.append(lhs_json_path + rhs_json_path) # Add condition for each key. if self.logical_operator: sql = "(%s)" % self.logical_operator.join([sql] * len(rhs_params)) return sql, tuple(lhs_params) + tuple(rhs_params) def as_mysql(self, compiler, connection): return self.as_sql( compiler, connection, template="JSON_CONTAINS_PATH(%s, 'one', %%s)" ) def as_oracle(self, compiler, connection): sql, params = self.as_sql( compiler, connection, template="JSON_EXISTS(%s, '%%s')" ) # Add paths directly into SQL because path expressions cannot be passed # as bind variables on Oracle. return sql % tuple(params), [] def as_postgresql(self, compiler, connection): if isinstance(self.rhs, KeyTransform): *_, rhs_key_transforms = self.rhs.preprocess_lhs(compiler, connection) for key in rhs_key_transforms[:-1]: self.lhs = KeyTransform(key, self.lhs) self.rhs = rhs_key_transforms[-1] return super().as_postgresql(compiler, connection) def as_sqlite(self, compiler, connection): return self.as_sql( compiler, connection, template="JSON_TYPE(%s, %%s) IS NOT NULL" ) class HasKey(HasKeyLookup): lookup_name = "has_key" postgres_operator = "?" prepare_rhs = False class HasKeys(HasKeyLookup): lookup_name = "has_keys" postgres_operator = "?&" logical_operator = " AND " def get_prep_lookup(self): return [str(item) for item in self.rhs] class HasAnyKeys(HasKeys): lookup_name = "has_any_keys" postgres_operator = "?|" logical_operator = " OR " class HasKeyOrArrayIndex(HasKey): def compile_json_path_final_key(self, key_transform): return compile_json_path([key_transform], include_root=False) class CaseInsensitiveMixin: """ Mixin to allow case-insensitive comparison of JSON values on MySQL. MySQL handles strings used in JSON context using the utf8mb4_bin collation. Because utf8mb4_bin is a binary collation, comparison of JSON values is case-sensitive. """ def process_lhs(self, compiler, connection): lhs, lhs_params = super().process_lhs(compiler, connection) if connection.vendor == "mysql": return "LOWER(%s)" % lhs, lhs_params return lhs, lhs_params def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) if connection.vendor == "mysql": return "LOWER(%s)" % rhs, rhs_params return rhs, rhs_params class JSONExact(lookups.Exact): can_use_none_as_rhs = True def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) # Treat None lookup values as null. if rhs == "%s" and rhs_params == [None]: rhs_params = ["null"] if connection.vendor == "mysql": func = ["JSON_EXTRACT(%s, '$')"] * len(rhs_params) rhs = rhs % tuple(func) return rhs, rhs_params class JSONIContains(CaseInsensitiveMixin, lookups.IContains): pass JSONField.register_lookup(DataContains) JSONField.register_lookup(ContainedBy) JSONField.register_lookup(HasKey) JSONField.register_lookup(HasKeys) JSONField.register_lookup(HasAnyKeys) JSONField.register_lookup(JSONExact) JSONField.register_lookup(JSONIContains) class KeyTransform(Transform): postgres_operator = "->" postgres_nested_operator = "#>" def __init__(self, key_name, *args, **kwargs): super().__init__(*args, **kwargs) self.key_name = str(key_name) def preprocess_lhs(self, compiler, connection): key_transforms = [self.key_name] previous = self.lhs while isinstance(previous, KeyTransform): key_transforms.insert(0, previous.key_name) previous = previous.lhs lhs, params = compiler.compile(previous) if connection.vendor == "oracle": # Escape string-formatting. key_transforms = [key.replace("%", "%%") for key in key_transforms] return lhs, params, key_transforms def as_mysql(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) return "JSON_EXTRACT(%s, %%s)" % lhs, tuple(params) + (json_path,) def as_oracle(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) return ( "COALESCE(JSON_QUERY(%s, '%s'), JSON_VALUE(%s, '%s'))" % ((lhs, json_path) * 2) ), tuple(params) * 2 def as_postgresql(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) if len(key_transforms) > 1: sql = "(%s %s %%s)" % (lhs, self.postgres_nested_operator) return sql, tuple(params) + (key_transforms,) try: lookup = int(self.key_name) except ValueError: lookup = self.key_name return "(%s %s %%s)" % (lhs, self.postgres_operator), tuple(params) + (lookup,) def as_sqlite(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) datatype_values = ",".join( [repr(datatype) for datatype in connection.ops.jsonfield_datatype_values] ) return ( "(CASE WHEN JSON_TYPE(%s, %%s) IN (%s) " "THEN JSON_TYPE(%s, %%s) ELSE JSON_EXTRACT(%s, %%s) END)" ) % (lhs, datatype_values, lhs, lhs), (tuple(params) + (json_path,)) * 3 class KeyTextTransform(KeyTransform): postgres_operator = "->>" postgres_nested_operator = "#>>" output_field = TextField() def as_mysql(self, compiler, connection): if connection.mysql_is_mariadb: # MariaDB doesn't support -> and ->> operators (see MDEV-13594). sql, params = super().as_mysql(compiler, connection) return "JSON_UNQUOTE(%s)" % sql, params else: lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) return "(%s ->> %%s)" % lhs, tuple(params) + (json_path,) @classmethod def from_lookup(cls, lookup): transform, *keys = lookup.split(LOOKUP_SEP) if not keys: raise ValueError("Lookup must contain key or index transforms.") for key in keys: transform = cls(key, transform) return transform KT = KeyTextTransform.from_lookup class KeyTransformTextLookupMixin: """ Mixin for combining with a lookup expecting a text lhs from a JSONField key lookup. On PostgreSQL, make use of the ->> operator instead of casting key values to text and performing the lookup on the resulting representation. """ def __init__(self, key_transform, *args, **kwargs): if not isinstance(key_transform, KeyTransform): raise TypeError( "Transform should be an instance of KeyTransform in order to " "use this lookup." ) key_text_transform = KeyTextTransform( key_transform.key_name, *key_transform.source_expressions, **key_transform.extra, ) super().__init__(key_text_transform, *args, **kwargs) class KeyTransformIsNull(lookups.IsNull): # key__isnull=False is the same as has_key='key' def as_oracle(self, compiler, connection): sql, params = HasKeyOrArrayIndex( self.lhs.lhs, self.lhs.key_name, ).as_oracle(compiler, connection) if not self.rhs: return sql, params # Column doesn't have a key or IS NULL. lhs, lhs_params, _ = self.lhs.preprocess_lhs(compiler, connection) return "(NOT %s OR %s IS NULL)" % (sql, lhs), tuple(params) + tuple(lhs_params) def as_sqlite(self, compiler, connection): template = "JSON_TYPE(%s, %%s) IS NULL" if not self.rhs: template = "JSON_TYPE(%s, %%s) IS NOT NULL" return HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name).as_sql( compiler, connection, template=template, ) class KeyTransformIn(lookups.In): def resolve_expression_parameter(self, compiler, connection, sql, param): sql, params = super().resolve_expression_parameter( compiler, connection, sql, param, ) if ( not hasattr(param, "as_sql") and not connection.features.has_native_json_field ): if connection.vendor == "oracle": value = json.loads(param) sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')" if isinstance(value, (list, dict)): sql = sql % "JSON_QUERY" else: sql = sql % "JSON_VALUE" elif connection.vendor == "mysql" or ( connection.vendor == "sqlite" and params[0] not in connection.ops.jsonfield_datatype_values ): sql = "JSON_EXTRACT(%s, '$')" if connection.vendor == "mysql" and connection.mysql_is_mariadb: sql = "JSON_UNQUOTE(%s)" % sql return sql, params class KeyTransformExact(JSONExact): def process_rhs(self, compiler, connection): if isinstance(self.rhs, KeyTransform): return super(lookups.Exact, self).process_rhs(compiler, connection) rhs, rhs_params = super().process_rhs(compiler, connection) if connection.vendor == "oracle": func = [] sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')" for value in rhs_params: value = json.loads(value) if isinstance(value, (list, dict)): func.append(sql % "JSON_QUERY") else: func.append(sql % "JSON_VALUE") rhs = rhs % tuple(func) elif connection.vendor == "sqlite": func = [] for value in rhs_params: if value in connection.ops.jsonfield_datatype_values: func.append("%s") else: func.append("JSON_EXTRACT(%s, '$')") rhs = rhs % tuple(func) return rhs, rhs_params def as_oracle(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) if rhs_params == ["null"]: # Field has key and it's NULL. has_key_expr = HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name) has_key_sql, has_key_params = has_key_expr.as_oracle(compiler, connection) is_null_expr = self.lhs.get_lookup("isnull")(self.lhs, True) is_null_sql, is_null_params = is_null_expr.as_sql(compiler, connection) return ( "%s AND %s" % (has_key_sql, is_null_sql), tuple(has_key_params) + tuple(is_null_params), ) return super().as_sql(compiler, connection) class KeyTransformIExact( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IExact ): pass class KeyTransformIContains( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IContains ): pass class KeyTransformStartsWith(KeyTransformTextLookupMixin, lookups.StartsWith): pass class KeyTransformIStartsWith( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IStartsWith ): pass class KeyTransformEndsWith(KeyTransformTextLookupMixin, lookups.EndsWith): pass class KeyTransformIEndsWith( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IEndsWith ): pass class KeyTransformRegex(KeyTransformTextLookupMixin, lookups.Regex): pass class KeyTransformIRegex( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IRegex ): pass class KeyTransformNumericLookupMixin: def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) if not connection.features.has_native_json_field: rhs_params = [json.loads(value) for value in rhs_params] return rhs, rhs_params class KeyTransformLt(KeyTransformNumericLookupMixin, lookups.LessThan): pass class KeyTransformLte(KeyTransformNumericLookupMixin, lookups.LessThanOrEqual): pass class KeyTransformGt(KeyTransformNumericLookupMixin, lookups.GreaterThan): pass class KeyTransformGte(KeyTransformNumericLookupMixin, lookups.GreaterThanOrEqual): pass KeyTransform.register_lookup(KeyTransformIn) KeyTransform.register_lookup(KeyTransformExact) KeyTransform.register_lookup(KeyTransformIExact) KeyTransform.register_lookup(KeyTransformIsNull) KeyTransform.register_lookup(KeyTransformIContains) KeyTransform.register_lookup(KeyTransformStartsWith) KeyTransform.register_lookup(KeyTransformIStartsWith) KeyTransform.register_lookup(KeyTransformEndsWith) KeyTransform.register_lookup(KeyTransformIEndsWith) KeyTransform.register_lookup(KeyTransformRegex) KeyTransform.register_lookup(KeyTransformIRegex) KeyTransform.register_lookup(KeyTransformLt) KeyTransform.register_lookup(KeyTransformLte) KeyTransform.register_lookup(KeyTransformGt) KeyTransform.register_lookup(KeyTransformGte) class KeyTransformFactory: def __init__(self, key_name): self.key_name = key_name def __call__(self, *args, **kwargs): return KeyTransform(self.key_name, *args, **kwargs)
28be8a133fe1f9325e31f178a03fc80fee1ac002d4b7e5d21d3afc995478e921
import datetime import posixpath from django import forms from django.core import checks from django.core.files.base import File from django.core.files.images import ImageFile from django.core.files.storage import Storage, default_storage from django.core.files.utils import validate_file_name from django.db.models import signals from django.db.models.fields import Field from django.db.models.query_utils import DeferredAttribute from django.utils.translation import gettext_lazy as _ class FieldFile(File): def __init__(self, instance, field, name): super().__init__(None, name) self.instance = instance self.field = field self.storage = field.storage self._committed = True def __eq__(self, other): # Older code may be expecting FileField values to be simple strings. # By overriding the == operator, it can remain backwards compatibility. if hasattr(other, "name"): return self.name == other.name return self.name == other def __hash__(self): return hash(self.name) # The standard File contains most of the necessary properties, but # FieldFiles can be instantiated without a name, so that needs to # be checked for here. def _require_file(self): if not self: raise ValueError( "The '%s' attribute has no file associated with it." % self.field.name ) def _get_file(self): self._require_file() if getattr(self, "_file", None) is None: self._file = self.storage.open(self.name, "rb") return self._file def _set_file(self, file): self._file = file def _del_file(self): del self._file file = property(_get_file, _set_file, _del_file) @property def path(self): self._require_file() return self.storage.path(self.name) @property def url(self): self._require_file() return self.storage.url(self.name) @property def size(self): self._require_file() if not self._committed: return self.file.size return self.storage.size(self.name) def open(self, mode="rb"): self._require_file() if getattr(self, "_file", None) is None: self.file = self.storage.open(self.name, mode) else: self.file.open(mode) return self # open() doesn't alter the file's contents, but it does reset the pointer open.alters_data = True # In addition to the standard File API, FieldFiles have extra methods # to further manipulate the underlying file, as well as update the # associated model instance. def save(self, name, content, save=True): name = self.field.generate_filename(self.instance, name) self.name = self.storage.save(name, content, max_length=self.field.max_length) setattr(self.instance, self.field.attname, self.name) self._committed = True # Save the object because it has changed, unless save is False if save: self.instance.save() save.alters_data = True def delete(self, save=True): if not self: return # Only close the file if it's already open, which we know by the # presence of self._file if hasattr(self, "_file"): self.close() del self.file self.storage.delete(self.name) self.name = None setattr(self.instance, self.field.attname, self.name) self._committed = False if save: self.instance.save() delete.alters_data = True @property def closed(self): file = getattr(self, "_file", None) return file is None or file.closed def close(self): file = getattr(self, "_file", None) if file is not None: file.close() def __getstate__(self): # FieldFile needs access to its associated model field, an instance and # the file's name. Everything else will be restored later, by # FileDescriptor below. return { "name": self.name, "closed": False, "_committed": True, "_file": None, "instance": self.instance, "field": self.field, } def __setstate__(self, state): self.__dict__.update(state) self.storage = self.field.storage class FileDescriptor(DeferredAttribute): """ The descriptor for the file attribute on the model instance. Return a FieldFile when accessed so you can write code like:: >>> from myapp.models import MyModel >>> instance = MyModel.objects.get(pk=1) >>> instance.file.size Assign a file object on assignment so you can do:: >>> with open('/path/to/hello.world') as f: ... instance.file = File(f) """ def __get__(self, instance, cls=None): if instance is None: return self # This is slightly complicated, so worth an explanation. # instance.file needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like # instance.file.path and have that delegated to the file storage engine. # Easy enough if we're strict about assignment in __set__, but if you # peek below you can see that we're not. So depending on the current # value of the field we have to dynamically construct some sort of # "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. file = super().__get__(instance, cls) # If this value is a string (instance.file = "path/to/file") or None # then we simply wrap it with the appropriate attribute class according # to the file field. [This is FieldFile for FileFields and # ImageFieldFile for ImageFields; it's also conceivable that user # subclasses might also want to subclass the attribute class]. This # object understands how to convert a path to a file, and also how to # handle None. if isinstance(file, str) or file is None: attr = self.field.attr_class(instance, self.field, file) instance.__dict__[self.field.attname] = attr # Other types of files may be assigned as well, but they need to have # the FieldFile interface added to them. Thus, we wrap any other type of # File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) file_copy.file = file file_copy._committed = False instance.__dict__[self.field.attname] = file_copy # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated # file. So we need to reset the details of the FieldFile in those cases. elif isinstance(file, FieldFile) and not hasattr(file, "field"): file.instance = instance file.field = self.field file.storage = self.field.storage # Make sure that the instance is correct. elif isinstance(file, FieldFile) and instance is not file.instance: file.instance = instance # That was fun, wasn't it? return instance.__dict__[self.field.attname] def __set__(self, instance, value): instance.__dict__[self.field.attname] = value class FileField(Field): # The class to wrap instance attributes in. Accessing the file object off # the instance will always return an instance of attr_class. attr_class = FieldFile # The descriptor to use for accessing the attribute off of the class. descriptor_class = FileDescriptor description = _("File") def __init__( self, verbose_name=None, name=None, upload_to="", storage=None, **kwargs ): self._primary_key_set_explicitly = "primary_key" in kwargs self.storage = storage or default_storage if callable(self.storage): # Hold a reference to the callable for deconstruct(). self._storage_callable = self.storage self.storage = self.storage() if not isinstance(self.storage, Storage): raise TypeError( "%s.storage must be a subclass/instance of %s.%s" % ( self.__class__.__qualname__, Storage.__module__, Storage.__qualname__, ) ) self.upload_to = upload_to kwargs.setdefault("max_length", 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), *self._check_upload_to(), ] def _check_primary_key(self): if self._primary_key_set_explicitly: return [ checks.Error( "'primary_key' is not a valid argument for a %s." % self.__class__.__name__, obj=self, id="fields.E201", ) ] else: return [] def _check_upload_to(self): if isinstance(self.upload_to, str) and self.upload_to.startswith("/"): return [ checks.Error( "%s's 'upload_to' argument must be a relative path, not an " "absolute path." % self.__class__.__name__, obj=self, id="fields.E202", hint="Remove the leading slash.", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 100: del kwargs["max_length"] kwargs["upload_to"] = self.upload_to if self.storage is not default_storage: kwargs["storage"] = getattr(self, "_storage_callable", self.storage) return name, path, args, kwargs def get_internal_type(self): return "FileField" def get_prep_value(self, value): value = super().get_prep_value(value) # Need to convert File objects provided via a form to string for # database insertion. if value is None: return None return str(value) def pre_save(self, model_instance, add): file = super().pre_save(model_instance, add) if file and not file._committed: # Commit the file to storage prior to saving the model file.save(file.name, file.file, save=False) return file def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) setattr(cls, self.attname, self.descriptor_class(self)) def generate_filename(self, instance, filename): """ Apply (if callable) or prepend (if a string) upload_to to the filename, then delegate further processing of the name to the storage backend. Until the storage layer, all file paths are expected to be Unix style (with forward slashes). """ if callable(self.upload_to): filename = self.upload_to(instance, filename) else: dirname = datetime.datetime.now().strftime(str(self.upload_to)) filename = posixpath.join(dirname, filename) filename = validate_file_name(filename, allow_relative_path=True) return self.storage.generate_filename(filename) def save_form_data(self, instance, data): # Important: None means "no change", other false value means "clear" # This subtle distinction (rather than a more explicit marker) is # needed because we need to consume values that are also sane for a # regular (non Model-) Form to find in its cleaned_data dictionary. if data is not None: # This value will be converted to str and stored in the # database, so leaving False as-is is not acceptable. setattr(instance, self.name, data or "") def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.FileField, "max_length": self.max_length, **kwargs, } ) class ImageFileDescriptor(FileDescriptor): """ Just like the FileDescriptor, but for ImageFields. The only difference is assigning the width/height to the width_field/height_field, if appropriate. """ def __set__(self, instance, value): previous_file = instance.__dict__.get(self.field.attname) super().__set__(instance, value) # To prevent recalculating image dimensions when we are instantiating # an object from the database (bug #11084), only update dimensions if # the field had a value before this assignment. Since the default # value for FileField subclasses is an instance of field.attr_class, # previous_file will only be None when we are called from # Model.__init__(). The ImageField.update_dimension_fields method # hooked up to the post_init signal handles the Model.__init__() cases. # Assignment happening outside of Model.__init__() will trigger the # update right here. if previous_file is not None: self.field.update_dimension_fields(instance, force=True) class ImageFieldFile(ImageFile, FieldFile): def delete(self, save=True): # Clear the image dimensions cache if hasattr(self, "_dimensions_cache"): del self._dimensions_cache super().delete(save) class ImageField(FileField): attr_class = ImageFieldFile descriptor_class = ImageFileDescriptor description = _("Image") def __init__( self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs, ): self.width_field, self.height_field = width_field, height_field super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_image_library_installed(), ] def _check_image_library_installed(self): try: from PIL import Image # NOQA except ImportError: return [ checks.Error( "Cannot use ImageField because Pillow is not installed.", hint=( "Get Pillow at https://pypi.org/project/Pillow/ " 'or run command "python -m pip install Pillow".' ), obj=self, id="fields.E210", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.width_field: kwargs["width_field"] = self.width_field if self.height_field: kwargs["height_field"] = self.height_field return name, path, args, kwargs def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) # Attach update_dimension_fields so that dimension fields declared # after their corresponding image field don't stay cleared by # Model.__init__, see bug #11196. # Only run post-initialization dimension update on non-abstract models if not cls._meta.abstract: signals.post_init.connect(self.update_dimension_fields, sender=cls) def update_dimension_fields(self, instance, force=False, *args, **kwargs): """ Update field's width and height fields, if defined. This method is hooked up to model's post_init signal to update dimensions after instantiating a model instance. However, dimensions won't be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method. """ # Nothing to update if the field doesn't have dimension fields or if # the field is deferred. has_dimension_fields = self.width_field or self.height_field if not has_dimension_fields or self.attname not in instance.__dict__: return # getattr will call the ImageFileDescriptor's __get__ method, which # coerces the assigned value into an instance of self.attr_class # (ImageFieldFile in this case). file = getattr(instance, self.attname) # Nothing to update if we have no file and not being forced to update. if not file and not force: return dimension_fields_filled = not ( (self.width_field and not getattr(instance, self.width_field)) or (self.height_field and not getattr(instance, self.height_field)) ) # When both dimension fields have values, we are most likely loading # data from the database or updating an image field that already had # an image stored. In the first case, we don't want to update the # dimension fields because we are already getting their values from the # database. In the second case, we do want to update the dimensions # fields and will skip this return because force will be True since we # were called from ImageFileDescriptor.__set__. if dimension_fields_filled and not force: return # file should be an instance of ImageFieldFile or should be None. if file: width = file.width height = file.height else: # No file, so clear dimensions fields. width = None height = None # Update the width and height fields. if self.width_field: setattr(instance, self.width_field, width) if self.height_field: setattr(instance, self.height_field, height) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.ImageField, **kwargs, } )
877abac6c5b48feaf02642296df6ed3953525261d60da64d02d1bf9b55039512
""" Accessors for related objects. When a field defines a relation between two models, each model class provides an attribute to access related instances of the other model class (unless the reverse accessor has been disabled with related_name='+'). Accessors are implemented as descriptors in order to customize access and assignment. This module defines the descriptor classes. Forward accessors follow foreign keys. Reverse accessors trace them back. For example, with the following models:: class Parent(Model): pass class Child(Model): parent = ForeignKey(Parent, related_name='children') ``child.parent`` is a forward many-to-one relation. ``parent.children`` is a reverse many-to-one relation. There are three types of relations (many-to-one, one-to-one, and many-to-many) and two directions (forward and reverse) for a total of six combinations. 1. Related instance on the forward side of a many-to-one relation: ``ForwardManyToOneDescriptor``. Uniqueness of foreign key values is irrelevant to accessing the related instance, making the many-to-one and one-to-one cases identical as far as the descriptor is concerned. The constraint is checked upstream (unicity validation in forms) or downstream (unique indexes in the database). 2. Related instance on the forward side of a one-to-one relation: ``ForwardOneToOneDescriptor``. It avoids querying the database when accessing the parent link field in a multi-table inheritance scenario. 3. Related instance on the reverse side of a one-to-one relation: ``ReverseOneToOneDescriptor``. One-to-one relations are asymmetrical, despite the apparent symmetry of the name, because they're implemented in the database with a foreign key from one table to another. As a consequence ``ReverseOneToOneDescriptor`` is slightly different from ``ForwardManyToOneDescriptor``. 4. Related objects manager for related instances on the reverse side of a many-to-one relation: ``ReverseManyToOneDescriptor``. Unlike the previous two classes, this one provides access to a collection of objects. It returns a manager rather than an instance. 5. Related objects manager for related instances on the forward or reverse sides of a many-to-many relation: ``ManyToManyDescriptor``. Many-to-many relations are symmetrical. The syntax of Django models requires declaring them on one side but that's an implementation detail. They could be declared on the other side without any change in behavior. Therefore the forward and reverse descriptors can be the same. If you're looking for ``ForwardManyToManyDescriptor`` or ``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead. """ from django.core.exceptions import FieldError from django.db import ( DEFAULT_DB_ALIAS, NotSupportedError, connections, router, transaction, ) from django.db.models import Q, Window, signals from django.db.models.functions import RowNumber from django.db.models.lookups import GreaterThan, LessThanOrEqual from django.db.models.query import QuerySet from django.db.models.query_utils import DeferredAttribute from django.db.models.utils import resolve_callables from django.utils.functional import cached_property class ForeignKeyDeferredAttribute(DeferredAttribute): def __set__(self, instance, value): if instance.__dict__.get(self.field.attname) != value and self.field.is_cached( instance ): self.field.delete_cached_value(instance) instance.__dict__[self.field.attname] = value def _filter_prefetch_queryset(queryset, field_name, instances): predicate = Q(**{f"{field_name}__in": instances}) db = queryset._db or DEFAULT_DB_ALIAS if queryset.query.is_sliced: if not connections[db].features.supports_over_clause: raise NotSupportedError( "Prefetching from a limited queryset is only supported on backends " "that support window functions." ) low_mark, high_mark = queryset.query.low_mark, queryset.query.high_mark order_by = [ expr for expr, _ in queryset.query.get_compiler(using=db).get_order_by() ] window = Window(RowNumber(), partition_by=field_name, order_by=order_by) predicate &= GreaterThan(window, low_mark) if high_mark is not None: predicate &= LessThanOrEqual(window, high_mark) queryset.query.clear_limits() return queryset.filter(predicate) class ForwardManyToOneDescriptor: """ Accessor to the related object on the forward side of a many-to-one or one-to-one (via ForwardOneToOneDescriptor subclass) relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``Child.parent`` is a ``ForwardManyToOneDescriptor`` instance. """ def __init__(self, field_with_rel): self.field = field_with_rel @cached_property def RelatedObjectDoesNotExist(self): # The exception can't be created at initialization time since the # related model might not be resolved yet; `self.field.model` might # still be a string model reference. return type( "RelatedObjectDoesNotExist", (self.field.remote_field.model.DoesNotExist, AttributeError), { "__module__": self.field.model.__module__, "__qualname__": "%s.%s.RelatedObjectDoesNotExist" % ( self.field.model.__qualname__, self.field.name, ), }, ) def is_cached(self, instance): return self.field.is_cached(instance) def get_queryset(self, **hints): return self.field.remote_field.model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = self.field.get_foreign_related_value instance_attr = self.field.get_local_related_value instances_dict = {instance_attr(inst): inst for inst in instances} related_field = self.field.foreign_related_fields[0] remote_field = self.field.remote_field # FIXME: This will need to be revisited when we introduce support for # composite fields. In the meantime we take this practical approach to # solve a regression on 1.6 when the reverse manager in hidden # (related_name ends with a '+'). Refs #21410. # The check for len(...) == 1 is a special case that allows the query # to be join-less and smaller. Refs #21760. if remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1: query = { "%s__in" % related_field.name: {instance_attr(inst)[0] for inst in instances} } else: query = {"%s__in" % self.field.related_query_name(): instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. if not remote_field.multiple: for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] remote_field.set_cached_value(rel_obj, instance) return ( queryset, rel_obj_attr, instance_attr, True, self.field.get_cache_name(), False, ) def get_object(self, instance): qs = self.get_queryset(instance=instance) # Assuming the database enforces foreign keys, this won't fail. return qs.get(self.field.get_reverse_related_filter(instance)) def __get__(self, instance, cls=None): """ Get the related instance through the forward relation. With the example above, when getting ``child.parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``cls`` is the ``Child`` class (we don't need it) """ if instance is None: return self # The related instance is loaded from the database and then cached # by the field on the model instance state. It can also be pre-cached # by the reverse accessor (ReverseOneToOneDescriptor). try: rel_obj = self.field.get_cached_value(instance) except KeyError: has_value = None not in self.field.get_local_related_value(instance) ancestor_link = ( instance._meta.get_ancestor_link(self.field.model) if has_value else None ) if ancestor_link and ancestor_link.is_cached(instance): # An ancestor link will exist if this field is defined on a # multi-table inheritance parent of the instance's class. ancestor = ancestor_link.get_cached_value(instance) # The value might be cached on an ancestor if the instance # originated from walking down the inheritance chain. rel_obj = self.field.get_cached_value(ancestor, default=None) else: rel_obj = None if rel_obj is None and has_value: rel_obj = self.get_object(instance) remote_field = self.field.remote_field # If this is a one-to-one relation, set the reverse accessor # cache on the related object to the current instance to avoid # an extra SQL query if it's accessed later on. if not remote_field.multiple: remote_field.set_cached_value(rel_obj, instance) self.field.set_cached_value(instance, rel_obj) if rel_obj is None and not self.field.null: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (self.field.model.__name__, self.field.name) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the forward relation. With the example above, when setting ``child.parent = parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``value`` is the ``parent`` instance on the right of the equal sign """ # An object must be an instance of the related class. if value is not None and not isinstance( value, self.field.remote_field.model._meta.concrete_model ): raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.field.name, self.field.remote_field.model._meta.object_name, ) ) elif value is not None: if instance._state.db is None: instance._state.db = router.db_for_write( instance.__class__, instance=value ) if value._state.db is None: value._state.db = router.db_for_write( value.__class__, instance=instance ) if not router.allow_relation(value, instance): raise ValueError( 'Cannot assign "%r": the current database router prevents this ' "relation." % value ) remote_field = self.field.remote_field # If we're setting the value of a OneToOneField to None, we need to clear # out the cache on any old related object. Otherwise, deleting the # previously-related object will also cause this object to be deleted, # which is wrong. if value is None: # Look up the previously-related object, which may still be available # since we've not yet cleared out the related field. # Use the cache directly, instead of the accessor; if we haven't # populated the cache, then we don't care - we're only accessing # the object to invalidate the accessor cache, so there's no # need to populate the cache just to expire it again. related = self.field.get_cached_value(instance, default=None) # If we've got an old related object, we need to clear out its # cache. This cache also might not exist if the related object # hasn't been accessed yet. if related is not None: remote_field.set_cached_value(related, None) for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, None) # Set the values of the related field. else: for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, getattr(value, rh_field.attname)) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. self.field.set_cached_value(instance, value) # If this is a one-to-one relation, set the reverse accessor cache on # the related object to the current instance to avoid an extra SQL # query if it's accessed later on. if value is not None and not remote_field.multiple: remote_field.set_cached_value(value, instance) def __reduce__(self): """ Pickling should return the instance attached by self.field on the model, not a new copy of that descriptor. Use getattr() to retrieve the instance directly from the model. """ return getattr, (self.field.model, self.field.name) class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor): """ Accessor to the related object on the forward side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``Restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance. """ def get_object(self, instance): if self.field.remote_field.parent_link: deferred = instance.get_deferred_fields() # Because it's a parent link, all the data is available in the # instance, so populate the parent model with this data. rel_model = self.field.remote_field.model fields = [field.attname for field in rel_model._meta.concrete_fields] # If any of the related model's fields are deferred, fallback to # fetching all fields from the related model. This avoids a query # on the related model for every deferred field. if not any(field in fields for field in deferred): kwargs = {field: getattr(instance, field) for field in fields} obj = rel_model(**kwargs) obj._state.adding = instance._state.adding obj._state.db = instance._state.db return obj return super().get_object(instance) def __set__(self, instance, value): super().__set__(instance, value) # If the primary key is a link to a parent model and a parent instance # is being set, update the value of the inherited pk(s). if self.field.primary_key and self.field.remote_field.parent_link: opts = instance._meta # Inherited primary key fields from this object's base classes. inherited_pk_fields = [ field for field in opts.concrete_fields if field.primary_key and field.remote_field ] for field in inherited_pk_fields: rel_model_pk_name = field.remote_field.model._meta.pk.attname raw_value = ( getattr(value, rel_model_pk_name) if value is not None else None ) setattr(instance, rel_model_pk_name, raw_value) class ReverseOneToOneDescriptor: """ Accessor to the related object on the reverse side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``Place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance. """ def __init__(self, related): # Following the example above, `related` is an instance of OneToOneRel # which represents the reverse restaurant field (place.restaurant). self.related = related @cached_property def RelatedObjectDoesNotExist(self): # The exception isn't created at initialization time for the sake of # consistency with `ForwardManyToOneDescriptor`. return type( "RelatedObjectDoesNotExist", (self.related.related_model.DoesNotExist, AttributeError), { "__module__": self.related.model.__module__, "__qualname__": "%s.%s.RelatedObjectDoesNotExist" % ( self.related.model.__qualname__, self.related.name, ), }, ) def is_cached(self, instance): return self.related.is_cached(instance) def get_queryset(self, **hints): return self.related.related_model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = self.related.field.get_local_related_value instance_attr = self.related.field.get_foreign_related_value instances_dict = {instance_attr(inst): inst for inst in instances} query = {"%s__in" % self.related.field.name: instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] self.related.field.set_cached_value(rel_obj, instance) return ( queryset, rel_obj_attr, instance_attr, True, self.related.get_cache_name(), False, ) def __get__(self, instance, cls=None): """ Get the related instance through the reverse relation. With the example above, when getting ``place.restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``cls`` is the ``Place`` class (unused) Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ if instance is None: return self # The related instance is loaded from the database and then cached # by the field on the model instance state. It can also be pre-cached # by the forward accessor (ForwardManyToOneDescriptor). try: rel_obj = self.related.get_cached_value(instance) except KeyError: related_pk = instance.pk if related_pk is None: rel_obj = None else: filter_args = self.related.field.get_forward_related_filter(instance) try: rel_obj = self.get_queryset(instance=instance).get(**filter_args) except self.related.related_model.DoesNotExist: rel_obj = None else: # Set the forward accessor cache on the related object to # the current instance to avoid an extra SQL query if it's # accessed later on. self.related.field.set_cached_value(rel_obj, instance) self.related.set_cached_value(instance, rel_obj) if rel_obj is None: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (instance.__class__.__name__, self.related.get_accessor_name()) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the reverse relation. With the example above, when setting ``place.restaurant = restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``value`` is the ``restaurant`` instance on the right of the equal sign Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ # The similarity of the code below to the code in # ForwardManyToOneDescriptor is annoying, but there's a bunch # of small differences that would make a common base class convoluted. if value is None: # Update the cached related instance (if any) & clear the cache. # Following the example above, this would be the cached # ``restaurant`` instance (if any). rel_obj = self.related.get_cached_value(instance, default=None) if rel_obj is not None: # Remove the ``restaurant`` instance from the ``place`` # instance cache. self.related.delete_cached_value(instance) # Set the ``place`` field on the ``restaurant`` # instance to None. setattr(rel_obj, self.related.field.name, None) elif not isinstance(value, self.related.related_model): # An object must be an instance of the related class. raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.related.get_accessor_name(), self.related.related_model._meta.object_name, ) ) else: if instance._state.db is None: instance._state.db = router.db_for_write( instance.__class__, instance=value ) if value._state.db is None: value._state.db = router.db_for_write( value.__class__, instance=instance ) if not router.allow_relation(value, instance): raise ValueError( 'Cannot assign "%r": the current database router prevents this ' "relation." % value ) related_pk = tuple( getattr(instance, field.attname) for field in self.related.field.foreign_related_fields ) # Set the value of the related field to the value of the related # object's related field. for index, field in enumerate(self.related.field.local_related_fields): setattr(value, field.attname, related_pk[index]) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. self.related.set_cached_value(instance, value) # Set the forward accessor cache on the related object to the current # instance to avoid an extra SQL query if it's accessed later on. self.related.field.set_cached_value(value, instance) def __reduce__(self): # Same purpose as ForwardManyToOneDescriptor.__reduce__(). return getattr, (self.related.model, self.related.name) class ReverseManyToOneDescriptor: """ Accessor to the related objects manager on the reverse side of a many-to-one relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``Parent.children`` is a ``ReverseManyToOneDescriptor`` instance. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel): self.rel = rel self.field = rel.field @cached_property def related_manager_cls(self): related_model = self.rel.related_model return create_reverse_many_to_one_manager( related_model._default_manager.__class__, self.rel, ) def __get__(self, instance, cls=None): """ Get the related objects through the reverse relation. With the example above, when getting ``parent.children``: - ``self`` is the descriptor managing the ``children`` attribute - ``instance`` is the ``parent`` instance - ``cls`` is the ``Parent`` class (unused) """ if instance is None: return self return self.related_manager_cls(instance) def _get_set_deprecation_msg_params(self): return ( "reverse side of a related set", self.rel.get_accessor_name(), ) def __set__(self, instance, value): raise TypeError( "Direct assignment to the %s is prohibited. Use %s.set() instead." % self._get_set_deprecation_msg_params(), ) def create_reverse_many_to_one_manager(superclass, rel): """ Create a manager for the reverse side of a many-to-one relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-one relations. """ class RelatedManager(superclass): def __init__(self, instance): super().__init__() self.instance = instance self.model = rel.related_model self.field = rel.field self.core_filters = {self.field.name: instance} def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_reverse_many_to_one_manager(manager.__class__, rel) return manager_class(self.instance) do_not_call_in_templates = True def _check_fk_val(self): for field in self.field.foreign_related_fields: if getattr(self.instance, field.attname) is None: raise ValueError( f'"{self.instance!r}" needs to have a value for field ' f'"{field.attname}" before this relationship can be used.' ) def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ db = self._db or router.db_for_read(self.model, instance=self.instance) empty_strings_as_null = connections[ db ].features.interprets_empty_strings_as_nulls queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset._defer_next_filter = True queryset = queryset.filter(**self.core_filters) for field in self.field.foreign_related_fields: val = getattr(self.instance, field.attname) if val is None or (val == "" and empty_strings_as_null): return queryset.none() if self.field.many_to_one: # Guard against field-like objects such as GenericRelation # that abuse create_reverse_many_to_one_manager() with reverse # one-to-many relationships instead and break known related # objects assignment. try: target_field = self.field.target_field except FieldError: # The relationship has multiple target fields. Use a tuple # for related object id. rel_obj_id = tuple( [ getattr(self.instance, target_field.attname) for target_field in self.field.path_infos[-1].target_fields ] ) else: rel_obj_id = getattr(self.instance, target_field.attname) queryset._known_related_objects = { self.field: {rel_obj_id: self.instance} } return queryset def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop( self.field.remote_field.get_cache_name() ) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if self.instance.pk is None: raise ValueError( f"{self.instance.__class__.__name__!r} instance needs to have a " f"primary key value before this relationship can be used." ) try: return self.instance._prefetched_objects_cache[ self.field.remote_field.get_cache_name() ] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) rel_obj_attr = self.field.get_local_related_value instance_attr = self.field.get_foreign_related_value instances_dict = {instance_attr(inst): inst for inst in instances} queryset = _filter_prefetch_queryset(queryset, self.field.name, instances) # Since we just bypassed this class' get_queryset(), we must manage # the reverse relation manually. for rel_obj in queryset: if not self.field.is_cached(rel_obj): instance = instances_dict[rel_obj_attr(rel_obj)] setattr(rel_obj, self.field.name, instance) cache_name = self.field.remote_field.get_cache_name() return queryset, rel_obj_attr, instance_attr, False, cache_name, False def add(self, *objs, bulk=True): self._check_fk_val() self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) def check_and_update_obj(obj): if not isinstance(obj, self.model): raise TypeError( "'%s' instance expected, got %r" % ( self.model._meta.object_name, obj, ) ) setattr(obj, self.field.name, self.instance) if bulk: pks = [] for obj in objs: check_and_update_obj(obj) if obj._state.adding or obj._state.db != db: raise ValueError( "%r instance isn't saved. Use bulk=False or save " "the object first." % obj ) pks.append(obj.pk) self.model._base_manager.using(db).filter(pk__in=pks).update( **{ self.field.name: self.instance, } ) else: with transaction.atomic(using=db, savepoint=False): for obj in objs: check_and_update_obj(obj) obj.save() add.alters_data = True def create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).create(**kwargs) create.alters_data = True def get_or_create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs) get_or_create.alters_data = True def update_or_create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs) update_or_create.alters_data = True # remove() and clear() are only provided if the ForeignKey can have a # value of null. if rel.field.null: def remove(self, *objs, bulk=True): if not objs: return self._check_fk_val() val = self.field.get_foreign_related_value(self.instance) old_ids = set() for obj in objs: if not isinstance(obj, self.model): raise TypeError( "'%s' instance expected, got %r" % ( self.model._meta.object_name, obj, ) ) # Is obj actually part of this descriptor set? if self.field.get_local_related_value(obj) == val: old_ids.add(obj.pk) else: raise self.field.remote_field.model.DoesNotExist( "%r is not related to %r." % (obj, self.instance) ) self._clear(self.filter(pk__in=old_ids), bulk) remove.alters_data = True def clear(self, *, bulk=True): self._check_fk_val() self._clear(self, bulk) clear.alters_data = True def _clear(self, queryset, bulk): self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) queryset = queryset.using(db) if bulk: # `QuerySet.update()` is intrinsically atomic. queryset.update(**{self.field.name: None}) else: with transaction.atomic(using=db, savepoint=False): for obj in queryset: setattr(obj, self.field.name, None) obj.save(update_fields=[self.field.name]) _clear.alters_data = True def set(self, objs, *, bulk=True, clear=False): self._check_fk_val() # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) if self.field.null: db = router.db_for_write(self.model, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear(bulk=bulk) self.add(*objs, bulk=bulk) else: old_objs = set(self.using(db).all()) new_objs = [] for obj in objs: if obj in old_objs: old_objs.remove(obj) else: new_objs.append(obj) self.remove(*old_objs, bulk=bulk) self.add(*new_objs, bulk=bulk) else: self.add(*objs, bulk=bulk) set.alters_data = True return RelatedManager class ManyToManyDescriptor(ReverseManyToOneDescriptor): """ Accessor to the related objects manager on the forward and reverse sides of a many-to-many relation. In the example:: class Pizza(Model): toppings = ManyToManyField(Topping, related_name='pizzas') ``Pizza.toppings`` and ``Topping.pizzas`` are ``ManyToManyDescriptor`` instances. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel, reverse=False): super().__init__(rel) self.reverse = reverse @property def through(self): # through is provided so that you have easy access to the through # model (Book.authors.through) for inlines, etc. This is done as # a property to ensure that the fully resolved value is returned. return self.rel.through @cached_property def related_manager_cls(self): related_model = self.rel.related_model if self.reverse else self.rel.model return create_forward_many_to_many_manager( related_model._default_manager.__class__, self.rel, reverse=self.reverse, ) def _get_set_deprecation_msg_params(self): return ( "%s side of a many-to-many set" % ("reverse" if self.reverse else "forward"), self.rel.get_accessor_name() if self.reverse else self.field.name, ) def create_forward_many_to_many_manager(superclass, rel, reverse): """ Create a manager for the either side of a many-to-many relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-many relations. """ class ManyRelatedManager(superclass): def __init__(self, instance=None): super().__init__() self.instance = instance if not reverse: self.model = rel.model self.query_field_name = rel.field.related_query_name() self.prefetch_cache_name = rel.field.name self.source_field_name = rel.field.m2m_field_name() self.target_field_name = rel.field.m2m_reverse_field_name() self.symmetrical = rel.symmetrical else: self.model = rel.related_model self.query_field_name = rel.field.name self.prefetch_cache_name = rel.field.related_query_name() self.source_field_name = rel.field.m2m_reverse_field_name() self.target_field_name = rel.field.m2m_field_name() self.symmetrical = False self.through = rel.through self.reverse = reverse self.source_field = self.through._meta.get_field(self.source_field_name) self.target_field = self.through._meta.get_field(self.target_field_name) self.core_filters = {} self.pk_field_names = {} for lh_field, rh_field in self.source_field.related_fields: core_filter_key = "%s__%s" % (self.query_field_name, rh_field.name) self.core_filters[core_filter_key] = getattr(instance, rh_field.attname) self.pk_field_names[lh_field.name] = rh_field.name self.related_val = self.source_field.get_foreign_related_value(instance) if None in self.related_val: raise ValueError( '"%r" needs to have a value for field "%s" before ' "this many-to-many relationship can be used." % (instance, self.pk_field_names[self.source_field_name]) ) # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if instance.pk is None: raise ValueError( "%r instance needs to have a primary key value before " "a many-to-many relationship can be used." % instance.__class__.__name__ ) def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_forward_many_to_many_manager( manager.__class__, rel, reverse ) return manager_class(instance=self.instance) do_not_call_in_templates = True def _build_remove_filters(self, removed_vals): filters = Q.create([(self.source_field_name, self.related_val)]) # No need to add a subquery condition if removed_vals is a QuerySet without # filters. removed_vals_filters = ( not isinstance(removed_vals, QuerySet) or removed_vals._has_filters() ) if removed_vals_filters: filters &= Q.create([(f"{self.target_field_name}__in", removed_vals)]) if self.symmetrical: symmetrical_filters = Q.create( [(self.target_field_name, self.related_val)] ) if removed_vals_filters: symmetrical_filters &= Q.create( [(f"{self.source_field_name}__in", removed_vals)] ) filters |= symmetrical_filters return filters def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset._defer_next_filter = True return queryset._next_is_sticky().filter(**self.core_filters) def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) queryset = _filter_prefetch_queryset( queryset._next_is_sticky(), self.query_field_name, instances ) # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that # there will already be a join on the join table, so we can just add # the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. fk = self.through._meta.get_field(self.source_field_name) join_table = fk.model._meta.db_table connection = connections[queryset.db] qn = connection.ops.quote_name queryset = queryset.extra( select={ "_prefetch_related_val_%s" % f.attname: "%s.%s" % (qn(join_table), qn(f.column)) for f in fk.local_related_fields } ) return ( queryset, lambda result: tuple( getattr(result, "_prefetch_related_val_%s" % f.attname) for f in fk.local_related_fields ), lambda inst: tuple( f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields ), False, self.prefetch_cache_name, False, ) def add(self, *objs, through_defaults=None): self._remove_prefetched_objects() db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): self._add_items( self.source_field_name, self.target_field_name, *objs, through_defaults=through_defaults, ) # If this is a symmetrical m2m relation to self, add the mirror # entry in the m2m table. if self.symmetrical: self._add_items( self.target_field_name, self.source_field_name, *objs, through_defaults=through_defaults, ) add.alters_data = True def remove(self, *objs): self._remove_prefetched_objects() self._remove_items(self.source_field_name, self.target_field_name, *objs) remove.alters_data = True def clear(self): db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): signals.m2m_changed.send( sender=self.through, action="pre_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) self._remove_prefetched_objects() filters = self._build_remove_filters(super().get_queryset().using(db)) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) clear.alters_data = True def set(self, objs, *, clear=False, through_defaults=None): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, through_defaults=through_defaults) else: old_ids = set( self.using(db).values_list( self.target_field.target_field.attname, flat=True ) ) new_objs = [] for obj in objs: fk_val = ( self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else self.target_field.get_prep_value(obj) ) if fk_val in old_ids: old_ids.remove(fk_val) else: new_objs.append(obj) self.remove(*old_ids) self.add(*new_objs, through_defaults=through_defaults) set.alters_data = True def create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs) self.add(new_obj, through_defaults=through_defaults) return new_obj create.alters_data = True def get_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create( **kwargs ) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created get_or_create.alters_data = True def update_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super( ManyRelatedManager, self.db_manager(db) ).update_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created update_or_create.alters_data = True def _get_target_ids(self, target_field_name, objs): """ Return the set of ids of `objs` that the target field references. """ from django.db.models import Model target_ids = set() target_field = self.through._meta.get_field(target_field_name) for obj in objs: if isinstance(obj, self.model): if not router.allow_relation(obj, self.instance): raise ValueError( 'Cannot add "%r": instance is on database "%s", ' 'value is on database "%s"' % (obj, self.instance._state.db, obj._state.db) ) target_id = target_field.get_foreign_related_value(obj)[0] if target_id is None: raise ValueError( 'Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name) ) target_ids.add(target_id) elif isinstance(obj, Model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) else: target_ids.add(target_field.get_prep_value(obj)) return target_ids def _get_missing_target_ids( self, source_field_name, target_field_name, db, target_ids ): """ Return the subset of ids of `objs` that aren't already assigned to this relationship. """ vals = ( self.through._default_manager.using(db) .values_list(target_field_name, flat=True) .filter( **{ source_field_name: self.related_val[0], "%s__in" % target_field_name: target_ids, } ) ) return target_ids.difference(vals) def _get_add_plan(self, db, source_field_name): """ Return a boolean triple of the way the add should be performed. The first element is whether or not bulk_create(ignore_conflicts) can be used, the second whether or not signals must be sent, and the third element is whether or not the immediate bulk insertion with conflicts ignored can be performed. """ # Conflicts can be ignored when the intermediary model is # auto-created as the only possible collision is on the # (source_id, target_id) tuple. The same assertion doesn't hold for # user-defined intermediary models as they could have other fields # causing conflicts which must be surfaced. can_ignore_conflicts = ( self.through._meta.auto_created is not False and connections[db].features.supports_ignore_conflicts ) # Don't send the signal when inserting duplicate data row # for symmetrical reverse entries. must_send_signals = ( self.reverse or source_field_name == self.source_field_name ) and (signals.m2m_changed.has_listeners(self.through)) # Fast addition through bulk insertion can only be performed # if no m2m_changed listeners are connected for self.through # as they require the added set of ids to be provided via # pk_set. return ( can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and not must_send_signals), ) def _add_items( self, source_field_name, target_field_name, *objs, through_defaults=None ): # source_field_name: the PK fieldname in join table for the source object # target_field_name: the PK fieldname in join table for the target object # *objs - objects to add. Either object instances, or primary keys # of object instances. if not objs: return through_defaults = dict(resolve_callables(through_defaults or {})) target_ids = self._get_target_ids(target_field_name, objs) db = router.db_for_write(self.through, instance=self.instance) can_ignore_conflicts, must_send_signals, can_fast_add = self._get_add_plan( db, source_field_name ) if can_fast_add: self.through._default_manager.using(db).bulk_create( [ self.through( **{ "%s_id" % source_field_name: self.related_val[0], "%s_id" % target_field_name: target_id, } ) for target_id in target_ids ], ignore_conflicts=True, ) return missing_target_ids = self._get_missing_target_ids( source_field_name, target_field_name, db, target_ids ) with transaction.atomic(using=db, savepoint=False): if must_send_signals: signals.m2m_changed.send( sender=self.through, action="pre_add", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) # Add the ones that aren't there already. self.through._default_manager.using(db).bulk_create( [ self.through( **through_defaults, **{ "%s_id" % source_field_name: self.related_val[0], "%s_id" % target_field_name: target_id, }, ) for target_id in missing_target_ids ], ignore_conflicts=can_ignore_conflicts, ) if must_send_signals: signals.m2m_changed.send( sender=self.through, action="post_add", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) def _remove_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK colname in join table for the source object # target_field_name: the PK colname in join table for the target object # *objs - objects to remove. Either object instances, or primary # keys of object instances. if not objs: return # Check that all the objects are of the right type old_ids = set() for obj in objs: if isinstance(obj, self.model): fk_val = self.target_field.get_foreign_related_value(obj)[0] old_ids.add(fk_val) else: old_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): # Send a signal to the other end if need be. signals.m2m_changed.send( sender=self.through, action="pre_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) target_model_qs = super().get_queryset() if target_model_qs._has_filters(): old_vals = target_model_qs.using(db).filter( **{"%s__in" % self.target_field.target_field.attname: old_ids} ) else: old_vals = old_ids filters = self._build_remove_filters(old_vals) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) return ManyRelatedManager
f411f5c8b2cfe73ffe91c7dee54b921fee1891211e044d466bcf35d36957f376
import math from django.db.models.expressions import Func, Value from django.db.models.fields import FloatField, IntegerField from django.db.models.functions import Cast from django.db.models.functions.mixins import ( FixDecimalInputMixin, NumericOutputFieldMixin, ) from django.db.models.lookups import Transform class Abs(Transform): function = "ABS" lookup_name = "abs" class ACos(NumericOutputFieldMixin, Transform): function = "ACOS" lookup_name = "acos" class ASin(NumericOutputFieldMixin, Transform): function = "ASIN" lookup_name = "asin" class ATan(NumericOutputFieldMixin, Transform): function = "ATAN" lookup_name = "atan" class ATan2(NumericOutputFieldMixin, Func): function = "ATAN2" arity = 2 def as_sqlite(self, compiler, connection, **extra_context): if not getattr( connection.ops, "spatialite", False ) or connection.ops.spatial_version >= (5, 0, 0): return self.as_sql(compiler, connection) # This function is usually ATan2(y, x), returning the inverse tangent # of y / x, but it's ATan2(x, y) on SpatiaLite < 5.0.0. # Cast integers to float to avoid inconsistent/buggy behavior if the # arguments are mixed between integer and float or decimal. # https://www.gaia-gis.it/fossil/libspatialite/tktview?name=0f72cca3a2 clone = self.copy() clone.set_source_expressions( [ Cast(expression, FloatField()) if isinstance(expression.output_field, IntegerField) else expression for expression in self.get_source_expressions()[::-1] ] ) return clone.as_sql(compiler, connection, **extra_context) class Ceil(Transform): function = "CEILING" lookup_name = "ceil" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="CEIL", **extra_context) class Cos(NumericOutputFieldMixin, Transform): function = "COS" lookup_name = "cos" class Cot(NumericOutputFieldMixin, Transform): function = "COT" lookup_name = "cot" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="(1 / TAN(%(expressions)s))", **extra_context ) class Degrees(NumericOutputFieldMixin, Transform): function = "DEGREES" lookup_name = "degrees" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="((%%(expressions)s) * 180 / %s)" % math.pi, **extra_context, ) class Exp(NumericOutputFieldMixin, Transform): function = "EXP" lookup_name = "exp" class Floor(Transform): function = "FLOOR" lookup_name = "floor" class Ln(NumericOutputFieldMixin, Transform): function = "LN" lookup_name = "ln" class Log(FixDecimalInputMixin, NumericOutputFieldMixin, Func): function = "LOG" arity = 2 def as_sqlite(self, compiler, connection, **extra_context): if not getattr(connection.ops, "spatialite", False): return self.as_sql(compiler, connection) # This function is usually Log(b, x) returning the logarithm of x to # the base b, but on SpatiaLite it's Log(x, b). clone = self.copy() clone.set_source_expressions(self.get_source_expressions()[::-1]) return clone.as_sql(compiler, connection, **extra_context) class Mod(FixDecimalInputMixin, NumericOutputFieldMixin, Func): function = "MOD" arity = 2 class Pi(NumericOutputFieldMixin, Func): function = "PI" arity = 0 def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template=str(math.pi), **extra_context ) class Power(NumericOutputFieldMixin, Func): function = "POWER" arity = 2 class Radians(NumericOutputFieldMixin, Transform): function = "RADIANS" lookup_name = "radians" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="((%%(expressions)s) * %s / 180)" % math.pi, **extra_context, ) class Random(NumericOutputFieldMixin, Func): function = "RANDOM" arity = 0 def as_mysql(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="RAND", **extra_context) def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, function="DBMS_RANDOM.VALUE", **extra_context ) def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="RAND", **extra_context) def get_group_by_cols(self): return [] class Round(FixDecimalInputMixin, Transform): function = "ROUND" lookup_name = "round" arity = None # Override Transform's arity=1 to enable passing precision. def __init__(self, expression, precision=0, **extra): super().__init__(expression, precision, **extra) def as_sqlite(self, compiler, connection, **extra_context): precision = self.get_source_expressions()[1] if isinstance(precision, Value) and precision.value < 0: raise ValueError("SQLite does not support negative precision.") return super().as_sqlite(compiler, connection, **extra_context) def _resolve_output_field(self): source = self.get_source_expressions()[0] return source.output_field class Sign(Transform): function = "SIGN" lookup_name = "sign" class Sin(NumericOutputFieldMixin, Transform): function = "SIN" lookup_name = "sin" class Sqrt(NumericOutputFieldMixin, Transform): function = "SQRT" lookup_name = "sqrt" class Tan(NumericOutputFieldMixin, Transform): function = "TAN" lookup_name = "tan"
20a1a5a9da8946da583b10c53f9cbfcfdf795eacefb183b2c0fc2cf9132b40d1
from datetime import datetime from django.conf import settings from django.db.models.expressions import Func from django.db.models.fields import ( DateField, DateTimeField, DurationField, Field, IntegerField, TimeField, ) from django.db.models.lookups import ( Transform, YearExact, YearGt, YearGte, YearLt, YearLte, ) from django.utils import timezone class TimezoneMixin: tzinfo = None def get_tzname(self): # Timezone conversions must happen to the input datetime *before* # applying a function. 2015-12-31 23:00:00 -02:00 is stored in the # database as 2016-01-01 01:00:00 +00:00. Any results should be # based on the input datetime not the stored datetime. tzname = None if settings.USE_TZ: if self.tzinfo is None: tzname = timezone.get_current_timezone_name() else: tzname = timezone._get_timezone_name(self.tzinfo) return tzname class Extract(TimezoneMixin, Transform): lookup_name = None output_field = IntegerField() def __init__(self, expression, lookup_name=None, tzinfo=None, **extra): if self.lookup_name is None: self.lookup_name = lookup_name if self.lookup_name is None: raise ValueError("lookup_name must be provided") self.tzinfo = tzinfo super().__init__(expression, **extra) def as_sql(self, compiler, connection): sql, params = compiler.compile(self.lhs) lhs_output_field = self.lhs.output_field if isinstance(lhs_output_field, DateTimeField): tzname = self.get_tzname() sql, params = connection.ops.datetime_extract_sql( self.lookup_name, sql, tuple(params), tzname ) elif self.tzinfo is not None: raise ValueError("tzinfo can only be used with DateTimeField.") elif isinstance(lhs_output_field, DateField): sql, params = connection.ops.date_extract_sql( self.lookup_name, sql, tuple(params) ) elif isinstance(lhs_output_field, TimeField): sql, params = connection.ops.time_extract_sql( self.lookup_name, sql, tuple(params) ) elif isinstance(lhs_output_field, DurationField): if not connection.features.has_native_duration_field: raise ValueError( "Extract requires native DurationField database support." ) sql, params = connection.ops.time_extract_sql( self.lookup_name, sql, tuple(params) ) else: # resolve_expression has already validated the output_field so this # assert should never be hit. assert False, "Tried to Extract from an invalid type." return sql, params def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): copy = super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) field = getattr(copy.lhs, "output_field", None) if field is None: return copy if not isinstance(field, (DateField, DateTimeField, TimeField, DurationField)): raise ValueError( "Extract input expression must be DateField, DateTimeField, " "TimeField, or DurationField." ) # Passing dates to functions expecting datetimes is most likely a mistake. if type(field) == DateField and copy.lookup_name in ( "hour", "minute", "second", ): raise ValueError( "Cannot extract time component '%s' from DateField '%s'." % (copy.lookup_name, field.name) ) if isinstance(field, DurationField) and copy.lookup_name in ( "year", "iso_year", "month", "week", "week_day", "iso_week_day", "quarter", ): raise ValueError( "Cannot extract component '%s' from DurationField '%s'." % (copy.lookup_name, field.name) ) return copy class ExtractYear(Extract): lookup_name = "year" class ExtractIsoYear(Extract): """Return the ISO-8601 week-numbering year.""" lookup_name = "iso_year" class ExtractMonth(Extract): lookup_name = "month" class ExtractDay(Extract): lookup_name = "day" class ExtractWeek(Extract): """ Return 1-52 or 53, based on ISO-8601, i.e., Monday is the first of the week. """ lookup_name = "week" class ExtractWeekDay(Extract): """ Return Sunday=1 through Saturday=7. To replicate this in Python: (mydatetime.isoweekday() % 7) + 1 """ lookup_name = "week_day" class ExtractIsoWeekDay(Extract): """Return Monday=1 through Sunday=7, based on ISO-8601.""" lookup_name = "iso_week_day" class ExtractQuarter(Extract): lookup_name = "quarter" class ExtractHour(Extract): lookup_name = "hour" class ExtractMinute(Extract): lookup_name = "minute" class ExtractSecond(Extract): lookup_name = "second" DateField.register_lookup(ExtractYear) DateField.register_lookup(ExtractMonth) DateField.register_lookup(ExtractDay) DateField.register_lookup(ExtractWeekDay) DateField.register_lookup(ExtractIsoWeekDay) DateField.register_lookup(ExtractWeek) DateField.register_lookup(ExtractIsoYear) DateField.register_lookup(ExtractQuarter) TimeField.register_lookup(ExtractHour) TimeField.register_lookup(ExtractMinute) TimeField.register_lookup(ExtractSecond) DateTimeField.register_lookup(ExtractHour) DateTimeField.register_lookup(ExtractMinute) DateTimeField.register_lookup(ExtractSecond) ExtractYear.register_lookup(YearExact) ExtractYear.register_lookup(YearGt) ExtractYear.register_lookup(YearGte) ExtractYear.register_lookup(YearLt) ExtractYear.register_lookup(YearLte) ExtractIsoYear.register_lookup(YearExact) ExtractIsoYear.register_lookup(YearGt) ExtractIsoYear.register_lookup(YearGte) ExtractIsoYear.register_lookup(YearLt) ExtractIsoYear.register_lookup(YearLte) class Now(Func): template = "CURRENT_TIMESTAMP" output_field = DateTimeField() def as_postgresql(self, compiler, connection, **extra_context): # PostgreSQL's CURRENT_TIMESTAMP means "the time at the start of the # transaction". Use STATEMENT_TIMESTAMP to be cross-compatible with # other databases. return self.as_sql( compiler, connection, template="STATEMENT_TIMESTAMP()", **extra_context ) def as_mysql(self, compiler, connection, **extra_context): return self.as_sql( compiler, connection, template="CURRENT_TIMESTAMP(6)", **extra_context ) def as_sqlite(self, compiler, connection, **extra_context): return self.as_sql( compiler, connection, template="STRFTIME('%%Y-%%m-%%d %%H:%%M:%%f', 'NOW')", **extra_context, ) class TruncBase(TimezoneMixin, Transform): kind = None tzinfo = None # RemovedInDjango50Warning: when the deprecation ends, remove is_dst # argument. def __init__( self, expression, output_field=None, tzinfo=None, is_dst=timezone.NOT_PASSED, **extra, ): self.tzinfo = tzinfo self.is_dst = is_dst super().__init__(expression, output_field=output_field, **extra) def as_sql(self, compiler, connection): sql, params = compiler.compile(self.lhs) tzname = None if isinstance(self.lhs.output_field, DateTimeField): tzname = self.get_tzname() elif self.tzinfo is not None: raise ValueError("tzinfo can only be used with DateTimeField.") if isinstance(self.output_field, DateTimeField): sql, params = connection.ops.datetime_trunc_sql( self.kind, sql, tuple(params), tzname ) elif isinstance(self.output_field, DateField): sql, params = connection.ops.date_trunc_sql( self.kind, sql, tuple(params), tzname ) elif isinstance(self.output_field, TimeField): sql, params = connection.ops.time_trunc_sql( self.kind, sql, tuple(params), tzname ) else: raise ValueError( "Trunc only valid on DateField, TimeField, or DateTimeField." ) return sql, params def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): copy = super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) field = copy.lhs.output_field # DateTimeField is a subclass of DateField so this works for both. if not isinstance(field, (DateField, TimeField)): raise TypeError( "%r isn't a DateField, TimeField, or DateTimeField." % field.name ) # If self.output_field was None, then accessing the field will trigger # the resolver to assign it to self.lhs.output_field. if not isinstance(copy.output_field, (DateField, DateTimeField, TimeField)): raise ValueError( "output_field must be either DateField, TimeField, or DateTimeField" ) # Passing dates or times to functions expecting datetimes is most # likely a mistake. class_output_field = ( self.__class__.output_field if isinstance(self.__class__.output_field, Field) else None ) output_field = class_output_field or copy.output_field has_explicit_output_field = ( class_output_field or field.__class__ is not copy.output_field.__class__ ) if type(field) == DateField and ( isinstance(output_field, DateTimeField) or copy.kind in ("hour", "minute", "second", "time") ): raise ValueError( "Cannot truncate DateField '%s' to %s." % ( field.name, output_field.__class__.__name__ if has_explicit_output_field else "DateTimeField", ) ) elif isinstance(field, TimeField) and ( isinstance(output_field, DateTimeField) or copy.kind in ("year", "quarter", "month", "week", "day", "date") ): raise ValueError( "Cannot truncate TimeField '%s' to %s." % ( field.name, output_field.__class__.__name__ if has_explicit_output_field else "DateTimeField", ) ) return copy def convert_value(self, value, expression, connection): if isinstance(self.output_field, DateTimeField): if not settings.USE_TZ: pass elif value is not None: value = value.replace(tzinfo=None) value = timezone.make_aware(value, self.tzinfo, is_dst=self.is_dst) elif not connection.features.has_zoneinfo_database: raise ValueError( "Database returned an invalid datetime value. Are time " "zone definitions for your database installed?" ) elif isinstance(value, datetime): if value is None: pass elif isinstance(self.output_field, DateField): value = value.date() elif isinstance(self.output_field, TimeField): value = value.time() return value class Trunc(TruncBase): # RemovedInDjango50Warning: when the deprecation ends, remove is_dst # argument. def __init__( self, expression, kind, output_field=None, tzinfo=None, is_dst=timezone.NOT_PASSED, **extra, ): self.kind = kind super().__init__( expression, output_field=output_field, tzinfo=tzinfo, is_dst=is_dst, **extra ) class TruncYear(TruncBase): kind = "year" class TruncQuarter(TruncBase): kind = "quarter" class TruncMonth(TruncBase): kind = "month" class TruncWeek(TruncBase): """Truncate to midnight on the Monday of the week.""" kind = "week" class TruncDay(TruncBase): kind = "day" class TruncDate(TruncBase): kind = "date" lookup_name = "date" output_field = DateField() def as_sql(self, compiler, connection): # Cast to date rather than truncate to date. sql, params = compiler.compile(self.lhs) tzname = self.get_tzname() return connection.ops.datetime_cast_date_sql(sql, tuple(params), tzname) class TruncTime(TruncBase): kind = "time" lookup_name = "time" output_field = TimeField() def as_sql(self, compiler, connection): # Cast to time rather than truncate to time. sql, params = compiler.compile(self.lhs) tzname = self.get_tzname() return connection.ops.datetime_cast_time_sql(sql, tuple(params), tzname) class TruncHour(TruncBase): kind = "hour" class TruncMinute(TruncBase): kind = "minute" class TruncSecond(TruncBase): kind = "second" DateTimeField.register_lookup(TruncDate) DateTimeField.register_lookup(TruncTime)
2ff646b29ea939fbbbaf3a2ec6c7e319c2efde6831bbec4bfae38ecef4ad1c85
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import copy import difflib import functools import sys from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import FieldDoesNotExist, FieldError from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import ( BaseExpression, Col, Exists, F, OuterRef, Ref, ResolvedOuterRef, Value, ) from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode from django.utils.functional import cached_property from django.utils.regex_helper import _lazy_re_compile from django.utils.tree import Node __all__ = ["Query", "RawQuery"] # Quotation marks ('"`[]), whitespace characters, semicolons, or inline # SQL comments are forbidden in column aliases. FORBIDDEN_ALIAS_PATTERN = _lazy_re_compile(r"['`\"\]\[;\s]|--|/\*|\*/") # Inspired from # https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS EXPLAIN_OPTIONS_PATTERN = _lazy_re_compile(r"[\w\-]+") def get_field_names_from_opts(opts): if opts is None: return set() return set( chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() ) ) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( "JoinInfo", ("final_field", "targets", "opts", "joins", "path", "transform_function"), ) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=()): self.params = params self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): if self.params is None: return None return dict if isinstance(self.params, Mapping) else tuple def __str__(self): if self.params_type is None: return self.sql return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} elif params_type is None: params = None else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) ExplainInfo = namedtuple("ExplainInfo", ("format", "options")) class Query(BaseExpression): """A single SQL query.""" alias_prefix = "T" empty_result_set_value = None subq_aliases = frozenset([alias_prefix]) compiler = "SQLCompiler" base_table_class = BaseTable join_class = Join default_cols = True default_ordering = True standard_ordering = True filter_is_sticky = False subquery = False # SQL-related attributes. # Select and related select clauses are expressions to use in the SELECT # clause of the query. The select is used for cases where we want to set up # the select clause to contain other than default fields (values(), # subqueries...). Note that annotations go to annotations dictionary. select = () # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. group_by = None order_by = () low_mark = 0 # Used for offset/limit. high_mark = None # Used for offset/limit. distinct = False distinct_fields = () select_for_update = False select_for_update_nowait = False select_for_update_skip_locked = False select_for_update_of = () select_for_no_key_update = False select_related = False has_select_fields = False # Arbitrary limit for select_related to prevents infinite recursion. max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. values_select = () # SQL annotation-related attributes. annotation_select_mask = None _annotation_select_cache = None # Set combination attributes. combinator = None combinator_all = False combined_queries = () # These are for extensions. The contents are more or less appended verbatim # to the appropriate clause. extra_select_mask = None _extra_select_cache = None extra_tables = () extra_order_by = () # A tuple that is a set of model field names and either True, if these are # the fields to defer, or False if these are the only fields to load. deferred_loading = (frozenset(), True) explain_info = None def __init__(self, model, alias_cols=True): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Whether to provide alias to columns during reference resolving. self.alias_cols = alias_cols # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. # Map external tables to whether they are aliased. self.external_aliases = {} self.table_map = {} # Maps table names to list of aliases. self.used_aliases = set() self.where = WhereNode() # Maps alias -> Annotation Expression. self.annotations = {} # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self._filtered_relations = {} @property def output_field(self): if len(self.select) == 1: select = self.select[0] return getattr(select, "target", None) or select.field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None, elide_empty=True): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)( self, connection, using, elide_empty ) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ if self.model: return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is not None: obj.annotation_select_mask = self.annotation_select_mask.copy() if self.combined_queries: obj.combined_queries = tuple( [query.clone() for query in self.combined_queries] ) # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is not None: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is not None: obj._extra_select_cache = self._extra_select_cache.copy() if self.select_related is not False: # Use deepcopy because select_related stores fields in nested # dicts. obj.select_related = copy.deepcopy(obj.select_related) if "subq_aliases" in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property, if it exists. obj.__dict__.pop("base_table", None) return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, "_setup_query"): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def _get_col(self, target, field, alias): if not self.alias_cols: alias = None return target.get_col(alias, field) def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) else: # Reuse aliases of expressions already selected in subquery. for col_alias, selected_annotation in self.annotation_select.items(): if selected_annotation is expr: new_expr = Ref(col_alias, expr) break else: # An expression that is not selected the subquery. if isinstance(expr, Col) or ( expr.contains_aggregate and not expr.is_summary ): # Reference column or another aggregate. Select it # under a non-conflicting alias. col_cnt += 1 col_alias = "__col%d" % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_expr = Ref(col_alias, expr) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} existing_annotations = [ annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ] # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if ( isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or self.distinct or self.combinator ): from django.db.models.sql.subqueries import AggregateQuery inner_query = self.clone() inner_query.subquery = True outer_query = AggregateQuery(self.model, inner_query) inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) # Queries with distinct_fields need ordering and when a limit is # applied we must take the slice from the ordered query. Otherwise # no need for ordering. inner_query.clear_ordering(force=False) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. has_existing_aggregate_annotations = any( annotation for annotation in existing_annotations if getattr(annotation, "contains_aggregate", True) ) if inner_query.default_cols and has_existing_aggregate_annotations: inner_query.group_by = ( self.model._meta.pk.get_col(inner_query.get_initial_alias()), ) inner_query.default_cols = False relabels = {t: "subquery" for t in inner_query.alias_map} relabels[None] = "subquery" # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): annotation_select_mask = inner_query.annotation_select_mask if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone( relabels ) del inner_query.annotations[alias] annotation_select_mask.remove(alias) # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if ( inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask ): # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = ( self.model._meta.pk.get_col(inner_query.get_initial_alias()), ) else: outer_query = self self.select = () self.default_cols = False self.extra = {} empty_set_result = [ expression.empty_result_set_value for expression in outer_query.annotation_select.values() ] elide_empty = not any(result is NotImplemented for result in empty_set_result) outer_query.clear_ordering(force=True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using, elide_empty=elide_empty) result = compiler.execute_sql(SINGLE) if result is None: result = empty_set_result converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count("*"), alias="__count", is_summary=True) return obj.get_aggregation(using, ["__count"])["__count"] def has_filters(self): return self.where def exists(self, limit=True): q = self.clone() if not (q.distinct and q.is_sliced): if q.group_by is True: q.add_fields( (f.attname for f in self.model._meta.concrete_fields), False ) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. q.set_group_by(allow_aliases=False) q.clear_select_clause() if q.combined_queries and q.combinator == "union": q.combined_queries = tuple( combined_query.exists(limit=False) for combined_query in q.combined_queries ) q.clear_ordering(force=True) if limit: q.set_limits(high=1) q.add_annotation(Value(1), "a") return q def has_results(self, using): q = self.exists(using) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() for option_name in options: if ( not EXPLAIN_OPTIONS_PATTERN.fullmatch(option_name) or "--" in option_name ): raise ValueError(f"Invalid option name: {option_name!r}.") q.explain_info = ExplainInfo(format, options) compiler = q.get_compiler(using=using) return "\n".join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ if self.model != rhs.model: raise TypeError("Cannot combine queries on two different base models.") if self.is_sliced: raise TypeError("Cannot combine queries once a slice has been taken.") if self.distinct != rhs.distinct: raise TypeError("Cannot combine a unique query with a non-unique query.") if self.distinct_fields != rhs.distinct_fields: raise TypeError("Cannot combine queries with different distinct fields.") # If lhs and rhs shares the same alias prefix, it is possible to have # conflicting alias changes like T4 -> T5, T5 -> T6, which might end up # as T4 -> T6 while combining two querysets. To prevent this, change an # alias prefix of the rhs and update current aliases accordingly, # except if the alias is the base table since it must be present in the # query on both sides. initial_alias = self.get_initial_alias() rhs.bump_prefix(self, exclude={initial_alias}) # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = connector == AND # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER ) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Combine subqueries aliases to ensure aliases relabelling properly # handle subqueries when combining where and select clauses. self.subq_aliases |= rhs.subq_aliases # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError( "When merging querysets using 'or', you cannot have " "extra(select=...) on both sides." ) self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def _get_defer_select_mask(self, opts, mask, select_mask=None): if select_mask is None: select_mask = {} select_mask[opts.pk] = {} # All concrete fields that are not part of the defer mask must be # loaded. If a relational field is encountered it gets added to the # mask for it be considered if `select_related` and the cycle continues # by recursively caling this function. for field in opts.concrete_fields: field_mask = mask.pop(field.name, None) if field_mask is None: select_mask.setdefault(field, {}) elif field_mask: if not field.is_relation: raise FieldError(next(iter(field_mask))) field_select_mask = select_mask.setdefault(field, {}) related_model = field.remote_field.model._meta.concrete_model self._get_defer_select_mask( related_model._meta, field_mask, field_select_mask ) # Remaining defer entries must be references to reverse relationships. # The following code is expected to raise FieldError if it encounters # a malformed defer entry. for field_name, field_mask in mask.items(): if filtered_relation := self._filtered_relations.get(field_name): relation = opts.get_field(filtered_relation.relation_name) field_select_mask = select_mask.setdefault((field_name, relation), {}) field = relation.field else: field = opts.get_field(field_name).field field_select_mask = select_mask.setdefault(field, {}) related_model = field.model._meta.concrete_model self._get_defer_select_mask( related_model._meta, field_mask, field_select_mask ) return select_mask def _get_only_select_mask(self, opts, mask, select_mask=None): if select_mask is None: select_mask = {} select_mask[opts.pk] = {} # Only include fields mentioned in the mask. for field_name, field_mask in mask.items(): field = opts.get_field(field_name) field_select_mask = select_mask.setdefault(field, {}) if field_mask: if not field.is_relation: raise FieldError(next(iter(field_mask))) related_model = field.remote_field.model._meta.concrete_model self._get_only_select_mask( related_model._meta, field_mask, field_select_mask ) return select_mask def get_select_mask(self): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. """ field_names, defer = self.deferred_loading if not field_names: return {} mask = {} for field_name in field_names: part_mask = mask for part in field_name.split(LOOKUP_SEP): part_mask = part_mask.setdefault(part, {}) opts = self.get_meta() if defer: return self._get_defer_select_mask(opts, mask) return self._get_only_select_mask(opts, mask) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = ( filtered_relation.alias if filtered_relation is not None else table_name ) self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = ( parent_alias and self.alias_map[parent_alias].join_type == LOUTER ) already_louter = self.alias_map[alias].join_type == LOUTER if (self.alias_map[alias].nullable or parent_louter) and not already_louter: self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ # If keys and values of change_map were to intersect, an alias might be # updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending # on their order in change_map. assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple( [col.relabeled_clone(change_map) for col in self.group_by] ) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = { # Table is aliased or it's being changed and thus is aliased. change_map.get(alias, alias): (aliased or alias in change_map) for alias, aliased in self.external_aliases.items() } def bump_prefix(self, other_query, exclude=None): """ Change the alias prefix to the next letter in the alphabet in a way that the other query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. To prevent changing aliases use the exclude parameter. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet for s in product(seq, repeat=n): yield "".join(s) prefix = None if self.alias_prefix != other_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( "Maximum recursion depth exceeded: too many subqueries." ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases) if exclude is None: exclude = {} self.change_aliases( { alias: "%s%d" % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) if alias not in exclude } ) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) elif self.model: alias = self.join(self.base_table_class(self.get_meta().db_table, None)) else: alias = None return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a base_table_class or join_class. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias( join.table_name, create=True, filtered_relation=join.filtered_relation ) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def check_alias(self, alias): if FORBIDDEN_ALIAS_PATTERN.search(alias): raise ValueError( "Column aliases cannot contain whitespace characters, quotation marks, " "semicolons, or SQL comments." ) def add_annotation(self, annotation, alias, is_summary=False, select=True): """Add a single annotation expression to the Query.""" self.check_alias(alias) annotation = annotation.resolve_expression( self, allow_joins=True, reuse=None, summarize=is_summary ) if select: self.append_annotation_mask([alias]) else: self.set_annotation_mask(set(self.annotation_select).difference({alias})) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True clone.where.resolve_expression(query, *args, **kwargs) # Resolve combined queries. if clone.combinator: clone.combined_queries = tuple( [ combined_query.resolve_expression(query, *args, **kwargs) for combined_query in clone.combined_queries ] ) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, "external_aliases"): resolved.external_aliases.update(clone.external_aliases) clone.annotations[key] = resolved # Outer query's aliases are considered external. for alias, table in query.alias_map.items(): clone.external_aliases[alias] = ( isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias ) or ( isinstance(table, BaseTable) and table.table_name != table.table_alias ) return clone def get_external_cols(self): exprs = chain(self.annotations.values(), self.where.children) return [ col for col in self._gen_cols(exprs, include_external=True) if col.alias in self.external_aliases ] def get_group_by_cols(self, wrapper=None): # If wrapper is referenced by an alias for an explicit GROUP BY through # values() a reference to this expression and not the self must be # returned to ensure external column references are not grouped against # as well. external_cols = self.get_external_cols() if any(col.possibly_multivalued for col in external_cols): return [wrapper or self] return external_cols def as_sql(self, compiler, connection): # Some backends (e.g. Oracle) raise an error when a subquery contains # unnecessary ORDER BY clause. if ( self.subquery and not connection.features.ignores_unnecessary_order_by_in_subqueries ): self.clear_ordering(force=False) for query in self.combined_queries: query.clear_ordering(force=False) sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = "(%s)" % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins): if hasattr(value, "resolve_expression"): value = value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, ) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. values = ( self.resolve_lookup_value(sub_value, can_reuse, allow_joins) for sub_value in value ) type_ = type(value) if hasattr(type_, "_make"): # namedtuple return type_(*values) return type_(values) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: expression, expression_lookups = refs_expression( lookup_splitted, self.annotations ) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, "_meta"): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name) ) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if ( isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field) ): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, "_meta"): self.check_query_object_type(value, opts, field) elif hasattr(value, "__iter__"): for v in value: self.check_query_object_type(v, opts, field) def check_filterable(self, expression): """Raise an error if expression cannot be used in a WHERE clause.""" if hasattr(expression, "resolve_expression") and not getattr( expression, "filterable", True ): raise NotSupportedError( expression.__class__.__name__ + " is disallowed in the filter " "clause." ) if hasattr(expression, "get_source_expressions"): for expr in expression.get_source_expressions(): self.check_filterable(expr) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ["exact"] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = "exact" lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ("exact", "iexact"): raise ValueError("Cannot use None as a query value") return lhs.get_lookup("isnull")(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if ( lookup_name == "exact" and lookup.rhs == "" and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls ): return lhs.get_lookup("isnull")(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches( name, output_field.get_lookups() ) if suggested_lookups: suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups) else: suggestion = "." raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter( self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False, check_filterable=True, ): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") if isinstance(filter_expr, Q): return self._add_q( filter_expr, branch_negated=branch_negated, current_negated=current_negated, used_aliases=can_reuse, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) if hasattr(filter_expr, "resolve_expression"): if not getattr(filter_expr, "conditional", False): raise TypeError("Cannot filter against a non-conditional expression.") condition = filter_expr.resolve_expression(self, allow_joins=allow_joins) if not isinstance(condition, Lookup): condition = self.build_lookup(["exact"], condition, True) return WhereNode([condition], connector=AND), [] arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) if check_filterable: self.check_filterable(reffed_expression) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins) used_joins = { k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0) } if check_filterable: self.check_filterable(value) if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) return WhereNode([condition], connector=AND), [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins( join_info.targets, join_info.joins, join_info.path ) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: if len(targets) == 1: col = self._get_col(targets[0], join_info.final_field, alias) else: col = MultiColSource( alias, targets, join_info.targets, join_info.final_field ) else: col = self._get_col(targets[0], join_info.final_field, alias) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause = WhereNode([condition], connector=AND) require_outer = ( lookup_type == "isnull" and condition.rhs is True and not current_negated ) if ( current_negated and (lookup_type != "isnull" or condition.rhs is False) and condition.rhs is not None ): require_outer = True if lookup_type != "isnull": # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). if ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER ): lookup_class = targets[0].get_lookup("isnull") col = self._get_col(targets[0], join_info.targets[0], alias) clause.add(lookup_class(col, False), AND) # If someval is a nullable column, someval IS NOT NULL is # added. if isinstance(value, Col) and self.is_nullable(value.target): lookup_class = value.target.get_lookup("isnull") clause.add(lookup_class(value, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_lhs, filter_rhs): self.add_q(Q((filter_lhs, filter_rhs))) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = { a for a in self.alias_map if self.alias_map[a].join_type == INNER } clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, filter_expr): return self.build_filter(filter_expr, allow_joins=False)[0] def clear_where(self): self.where = WhereNode() def _add_q( self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, check_filterable=True, ): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = WhereNode(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter( q_object.connector, len(q_object.children), current_negated ) for child in q_object.children: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q( self, q_object, reuse, branch_negated=False, current_negated=False ): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = WhereNode(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type( filtered_relation.relation_name ) if relation_lookup_parts: raise ValueError( "FilteredRelation's relation_name cannot contain lookups " "(got %r)." % filtered_relation.relation_name ) for lookup in chain(lookups): lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 lookup_field_path = lookup_field_parts[:-shift] for idx, lookup_field_part in enumerate(lookup_field_path): if len(relation_field_parts) > idx: if relation_field_parts[idx] != lookup_field_part: raise ValueError( "FilteredRelation's condition doesn't support " "relations outside the %r (got %r)." % (filtered_relation.relation_name, lookup) ) else: raise ValueError( "FilteredRelation's condition doesn't support nested " "relations deeper than the relation_name (got %r for " "%r)." % (lookup, filtered_relation.relation_name) ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == "pk": name = opts.pk.name field = None filtered_relation = None try: if opts is None: raise FieldDoesNotExist field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] if LOOKUP_SEP in filtered_relation.relation_name: parts = filtered_relation.relation_name.split(LOOKUP_SEP) filtered_relation_path, field, _, _ = self.names_to_path( parts, opts, allow_many, fail_on_missing, ) path.extend(filtered_relation_path[:-1]) else: field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted( [ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ] ) raise FieldError( "Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available)) ) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if opts is not None and model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, "path_infos"): if filtered_relation: pathinfos = field.get_path_info(filtered_relation) else: pathinfos = field.path_infos if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name) ) break return path, final_field, targets, names[pos + 1 :] def setup_joins( self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False, ): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): if not self.alias_cols: alias = None return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial( transform, name=name, previous=final_transformer ) final_transformer.has_transforms = True # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = self.join_class( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = { r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets } targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins @classmethod def _gen_cols(cls, exprs, include_external=False): for expr in exprs: if isinstance(expr, Col): yield expr elif include_external and callable( getattr(expr, "get_external_cols", None) ): yield from expr.get_external_cols() elif hasattr(expr, "get_source_expressions"): yield from cls._gen_cols( expr.get_source_expressions(), include_external=include_external, ) @classmethod def _gen_col_aliases(cls, exprs): yield from (expr.alias for expr in cls._gen_cols(exprs)) def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False): annotation = self.annotations.get(name) if annotation is not None: if not allow_joins: for alias in self._gen_col_aliases([annotation]): if isinstance(self.alias_map[alias], Join): raise FieldError( "Joined field references are not permitted in this query" ) if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. if name not in self.annotation_select: raise FieldError( "Cannot aggregate over the '%s' alias. Use annotate() " "to promote it." % name ) return Ref(name, self.annotation_select[name]) else: return annotation else: field_list = name.split(LOOKUP_SEP) annotation = self.annotations.get(field_list[0]) if annotation is not None: for transform in field_list[1:]: annotation = self.try_transform(annotation, transform) return annotation join_info = self.setup_joins( field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse ) targets, final_alias, join_list = self.trim_joins( join_info.targets, join_info.joins, join_info.path ) if not allow_joins and len(join_list) > 1: raise FieldError( "Joined field references are not permitted in this query" ) if len(targets) > 1: raise FieldError( "Referencing multicolumn fields with F() objects isn't supported" ) # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. transform = join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) return transform def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT EXISTS( SELECT 1 FROM child WHERE name = 'foo' AND child.parent_id = parent.id LIMIT 1 ) """ # Generate the inner query. query = self.__class__(self.model) query._filtered_relations = self._filtered_relations filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, OuterRef): filter_rhs = OuterRef(filter_rhs) elif isinstance(filter_rhs, F): filter_rhs = OuterRef(filter_rhs.name) query.add_filter(filter_lhs, filter_rhs) query.clear_ordering(force=True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) col = query.select[0] select_field = col.target alias = col.alias if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup("exact") # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases[alias] = True lookup_class = select_field.get_lookup("exact") lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix)) query.where.add(lookup, AND) condition, needed_inner = self.build_filter(Exists(query)) if contains_louter: or_null_condition, _ = self.build_filter( ("%s__isnull" % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse, ) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) for query in self.combined_queries: query.set_empty() def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None @property def is_sliced(self): return self.low_mark != 0 or self.high_mark is not None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.is_sliced def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col, name): self.select += (col,) self.values_select += (name,) def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins( name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m ) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise elif name in self.annotations: raise FieldError( "Cannot select the '%s' alias. Use annotate() to promote " "it." % name ) else: names = sorted( [ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations, ] ) raise FieldError( "Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names)) ) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if isinstance(item, str): if item == "?": continue if item.startswith("-"): item = item[1:] if item in self.annotations: continue if self.extra and item in self.extra: continue # names_to_path() validates the lookup. A descriptive # FieldError will be raise if it's not. self.names_to_path(item.split(LOOKUP_SEP), self.model._meta) elif not hasattr(item, "resolve_expression"): errors.append(item) if getattr(item, "contains_aggregate", False): raise FieldError( "Using an aggregate in order_by() without also including " "it in annotate() is not allowed: %s" % item ) if errors: raise FieldError("Invalid order_by arguments: %s" % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force=False, clear_default=True): """ Remove any ordering settings if the current query allows it without side effects, set 'force' to True to clear the ordering regardless. If 'clear_default' is True, there will be no ordering in the resulting query (not even the model's default). """ if not force and ( self.is_sliced or self.distinct_fields or self.select_for_update ): return self.order_by = () self.extra_order_by = () if clear_default: self.default_ordering = False def set_group_by(self, allow_aliases=True): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ if allow_aliases: # Column names from JOINs to check collisions with aliases. column_names = set() seen_models = set() for join in list(self.alias_map.values())[1:]: # Skip base table. model = join.join_field.related_model if model not in seen_models: column_names.update( {field.column for field in model._meta.local_concrete_fields} ) seen_models.add(model) if self.values_select: # If grouping by aliases is allowed assign selected values # aliases by moving them to annotations. group_by_annotations = {} values_select = {} for alias, expr in zip(self.values_select, self.select): if isinstance(expr, Col): values_select[alias] = expr else: group_by_annotations[alias] = expr self.annotations = {**group_by_annotations, **self.annotations} self.append_annotation_mask(group_by_annotations) self.select = tuple(values_select.values()) self.values_select = tuple(values_select) group_by = list(self.select) for alias, annotation in self.annotation_select.items(): if not (group_by_cols := annotation.get_group_by_cols()): continue if ( allow_aliases and alias not in column_names and not annotation.contains_aggregate ): group_by.append(Ref(alias, annotation)) else: group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): self.check_alias(name) entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != "%": entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. if new_existing := existing.difference(field_names): self.deferred_loading = new_existing, False else: self.clear_deferred_loading() if new_only := set(field_names).difference(existing): self.deferred_loading = new_only, True def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if "pk" in field_names: field_names.remove("pk") field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() self.has_select_fields = True if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) selected = frozenset(field_names + extra_names + annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] selected = frozenset(field_names) # Selected annotations must be known before setting the GROUP BY # clause. if self.group_by is True: self.add_fields( (f.attname for f in self.model._meta.concrete_fields), False ) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. self.set_group_by(allow_aliases=False) self.clear_select_fields() elif self.group_by: # Resolve GROUP BY annotation references if they are not part of # the selected fields anymore. group_by = [] for expr in self.group_by: if isinstance(expr, Ref) and expr.refs not in selected: expr = self.annotations[expr.refs] group_by.append(expr) self.group_by = tuple(group_by) self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append(join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( None, lookup_tables[trimmed_paths + 1] ) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a join_class instead of a # base_table_class reference. But the first entry in the query's FROM # clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = self.base_table_class( self.alias_map[table].table_name, table, ) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return field.null or ( field.empty_strings_allowed and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls ) def get_order_dir(field, default="ASC"): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == "-": return field[1:], dirn[1] return field, dirn[0] class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def __repr__(self): return ( f"{self.__class__.__qualname__}(connector={self.connector!r}, " f"num_children={self.num_children!r}, negated={self.negated!r})" ) def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == OR and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == AND or ( self.effective_connector == OR and votes == self.num_children ): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
3e8542795d0799bdc356dad1f4f7015950e1c65ad54ecc07aa961188ec54080e
import collections import json import re from functools import partial from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError from django.db import DatabaseError, NotSupportedError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value from django.db.models.functions import Cast, Random from django.db.models.lookups import Lookup from django.db.models.query_utils import select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.models.sql.where import AND from django.db.transaction import TransactionManagementError from django.utils.functional import cached_property from django.utils.hashable import make_hashable from django.utils.regex_helper import _lazy_re_compile class SQLCompiler: # Multiline ordering SQL clause may appear from RawSQL. ordering_parts = _lazy_re_compile( r"^(.*)\s(?:ASC|DESC).*", re.MULTILINE | re.DOTALL, ) def __init__(self, query, connection, using, elide_empty=True): self.query = query self.connection = connection self.using = using # Some queries, e.g. coalesced aggregation, need to be executed even if # they would return an empty result set. self.elide_empty = elide_empty self.quote_cache = {"*": "*"} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None self._meta_ordering = None def __repr__(self): return ( f"<{self.__class__.__qualname__} " f"model={self.query.model.__qualname__} " f"connection={self.connection!r} using={self.using!r}>" ) def setup_query(self, with_col_aliases=False): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select( with_col_aliases=with_col_aliases, ) self.col_count = len(self.select) def pre_sql_setup(self, with_col_aliases=False): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query(with_col_aliases=with_col_aliases) order_by = self.get_order_by() self.where, self.having, self.qualify = self.query.where.split_having_qualify( must_group_by=self.query.group_by is not None ) extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk') # .annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] allows_group_by_refs = self.connection.features.allows_group_by_refs if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, "as_sql"): expr = self.query.resolve_ref(expr) if not allows_group_by_refs and isinstance(expr, Ref): expr = expr.source expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)} aliased_exprs = {} for expr, _, alias in select: # Skip members of the select clause that are already included # by reference. if expr in ref_sources: continue if alias: aliased_exprs[expr] = alias cols = expr.get_group_by_cols() for col in cols: expressions.append(col) if not self._meta_ordering: for expr, (sql, params, is_ref) in order_by: # Skip references to the SELECT clause, as all expressions in # the SELECT clause are already part of the GROUP BY. if not is_ref: expressions.extend(expr.get_group_by_cols()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: if allows_group_by_refs and (alias := aliased_exprs.get(expr)): expr = Ref(alias, expr) try: sql, params = self.compile(expr) except EmptyResultSet: continue sql, params = expr.select_format(self, sql, params) params_hash = make_hashable(params) if (sql, params_hash) not in seen: result.append((sql, params)) seen.add((sql, params_hash)) return result def collapse_group_by(self, expressions, having): # If the DB can group by primary key, then group by the primary key of # query's main model. Note that for PostgreSQL the GROUP BY clause must # include the primary key of every table, but for MySQL it is enough to # have the main table's primary key. if self.connection.features.allows_group_by_pk: # Determine if the main model's primary key is in the query. pk = None for expr in expressions: # Is this a reference to query's base table primary key? If the # expression isn't a Col-like, then skip the expression. if ( getattr(expr, "target", None) == self.query.model._meta.pk and getattr(expr, "alias", None) == self.query.base_table ): pk = expr break # If the main model's primary key is in the query, group by that # field, HAVING expressions, and expressions associated with tables # that don't have a primary key included in the grouped columns. if pk: pk_aliases = { expr.alias for expr in expressions if hasattr(expr, "target") and expr.target.primary_key } expressions = [pk] + [ expr for expr in expressions if expr in having or ( getattr(expr, "alias", None) is not None and expr.alias not in pk_aliases ) ] elif self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if ( hasattr(expr, "target") and expr.target.primary_key and self.connection.features.allows_group_by_selected_pks_on_model( expr.target.model ) ) } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or getattr(expr, "alias", None) not in aliases ] return expressions def get_select(self, with_col_aliases=False): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) select_mask = self.query.get_select_mask() if self.query.default_cols: cols = self.get_default_columns(select_mask) else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { "model": self.query.model, "select_fields": select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select, select_mask) klass_info["related_klass_infos"] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info["related_klass_infos"]: if ki["from_parent"]: ki["select_fields"] = ( klass_info["select_fields"] + ki["select_fields"] ) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] col_idx = 1 for col, alias in select: try: sql, params = self.compile(col) except EmptyResultSet: empty_result_set_value = getattr( col, "empty_result_set_value", NotImplemented ) if empty_result_set_value is NotImplemented: # Select a predicate that's always False. sql, params = "0", () else: sql, params = self.compile(Value(empty_result_set_value)) else: sql, params = col.select_format(self, sql, params) if alias is None and with_col_aliases: alias = f"col{col_idx}" col_idx += 1 ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def _order_by_pairs(self): if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif (meta := self.query.get_meta()) and meta.ordering: ordering = meta.ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: default_order, _ = ORDER_DIR["ASC"] else: default_order, _ = ORDER_DIR["DESC"] for field in ordering: if hasattr(field, "resolve_expression"): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() if isinstance(field.expression, F) and ( annotation := self.query.annotation_select.get( field.expression.name ) ): field.expression = Ref(field.expression.name, annotation) yield field, isinstance(field.expression, Ref) continue if field == "?": # random yield OrderBy(Random()), False continue col, order = get_order_dir(field, default_order) descending = order == "DESC" if col in self.query.annotation_select: # Reference to expression in SELECT clause yield ( OrderBy( Ref(col, self.query.annotation_select[col]), descending=descending, ), True, ) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT # clause. if self.query.combinator and self.select: # Don't use the resolved annotation because other # combinated queries might define it differently. expr = F(col) else: expr = self.query.annotations[col] if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) yield OrderBy(expr, descending=descending), False continue if "." in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split(".", 1) yield ( OrderBy( RawSQL( "%s.%s" % (self.quote_name_unless_alias(table), col), [] ), descending=descending, ), False, ) continue if self.query.extra and col in self.query.extra: if col in self.query.extra_select: yield ( OrderBy( Ref(col, RawSQL(*self.query.extra[col])), descending=descending, ), True, ) else: yield ( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False, ) else: if self.query.combinator and self.select: # Don't use the first model's field because other # combinated queries might define it differently. yield OrderBy(F(col), descending=descending), False else: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. yield from self.find_ordering_name( field, self.query.get_meta(), default_order=default_order, ) def get_order_by(self): """ Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ result = [] seen = set() replacements = { expr: Ref(alias, expr) for alias, expr in self.query.annotation_select.items() } for expr, is_ref in self._order_by_pairs(): resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if not is_ref and self.query.combinator and self.select: src = resolved.expression expr_src = expr.expression for sel_expr, _, col_alias in self.select: if col_alias and not ( isinstance(expr_src, F) and col_alias == expr_src.name ): continue if src == sel_expr: resolved.set_source_expressions( [Ref(col_alias if col_alias else src.target.column, src)] ) break else: if col_alias: raise DatabaseError( "ORDER BY term does not match any column in the result set." ) # Add column used in ORDER BY clause to the selected # columns and to each combined query. order_by_idx = len(self.query.select) + 1 col_name = f"__orderbycol{order_by_idx}" for q in self.query.combined_queries: q.add_annotation(expr_src, col_name) self.query.add_select_col(resolved, col_name) resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())]) sql, params = self.compile(resolved.replace_expressions(replacements)) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql)[1] params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql)[1] if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ( (name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( self.query.external_aliases.get(name) and name not in self.query.table_map ) ): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node): vendor_impl = getattr(node, "as_" + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection, self.elide_empty) for query in self.query.combined_queries if not query.is_empty() ] if not features.supports_slicing_ordering_in_compound: for compiler in compilers: if compiler.query.is_sliced: raise DatabaseError( "LIMIT/OFFSET not allowed in subqueries of compound statements." ) if compiler.get_order_by(): raise DatabaseError( "ORDER BY not allowed in subqueries of compound statements." ) elif self.query.is_sliced and combinator == "union": limit = (self.query.low_mark, self.query.high_mark) for compiler in compilers: if not compiler.query.is_sliced: compiler.query.set_limits(*limit) parts = () for compiler in compilers: try: # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. if not compiler.query.values_select and self.query.values_select: compiler.query = compiler.query.clone() compiler.query.set_values( ( *self.query.extra_select, *self.query.values_select, *self.query.annotation_select, ) ) part_sql, part_args = compiler.as_sql() if compiler.query.combinator: # Wrap in a subquery if wrapping in parentheses isn't # supported. if not features.supports_parentheses_in_compound: part_sql = "SELECT * FROM ({})".format(part_sql) # Add parentheses when combining with compound query if not # already added for all compound queries. elif ( self.query.subquery or not features.supports_slicing_ordering_in_compound ): part_sql = "({})".format(part_sql) elif ( self.query.subquery and features.supports_slicing_ordering_in_compound ): part_sql = "({})".format(part_sql) parts += ((part_sql, part_args),) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == "union" or (combinator == "difference" and parts): continue raise if not parts: raise EmptyResultSet combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == "union": combinator_sql += " ALL" braces = "{}" if not self.query.subquery and features.supports_slicing_ordering_in_compound: braces = "({})" sql_parts, args_parts = zip( *((braces.format(sql), args) for sql, args in parts) ) result = [" {} ".format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def get_qualify_sql(self): where_parts = [] if self.where: where_parts.append(self.where) if self.having: where_parts.append(self.having) inner_query = self.query.clone() inner_query.subquery = True inner_query.where = inner_query.where.__class__(where_parts) # Augment the inner query with any window function references that # might have been masked via values() and alias(). If any masked # aliases are added they'll be masked again to avoid fetching # the data in the `if qual_aliases` branch below. select = { expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0] } select_aliases = set(select.values()) qual_aliases = set() replacements = {} def collect_replacements(expressions): while expressions: expr = expressions.pop() if expr in replacements: continue elif select_alias := select.get(expr): replacements[expr] = select_alias elif isinstance(expr, Lookup): expressions.extend(expr.get_source_expressions()) elif isinstance(expr, Ref): if expr.refs not in select_aliases: expressions.extend(expr.get_source_expressions()) else: num_qual_alias = len(qual_aliases) select_alias = f"qual{num_qual_alias}" qual_aliases.add(select_alias) inner_query.add_annotation(expr, select_alias) replacements[expr] = select_alias collect_replacements(list(self.qualify.leaves())) self.qualify = self.qualify.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) order_by = [] for order_by_expr, *_ in self.get_order_by(): collect_replacements(order_by_expr.get_source_expressions()) order_by.append( order_by_expr.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) ) inner_query_compiler = inner_query.get_compiler( self.using, elide_empty=self.elide_empty ) inner_sql, inner_params = inner_query_compiler.as_sql( # The limits must be applied to the outer query to avoid pruning # results too eagerly. with_limits=False, # Force unique aliasing of selected columns to avoid collisions # and make rhs predicates referencing easier. with_col_aliases=True, ) qualify_sql, qualify_params = self.compile(self.qualify) result = [ "SELECT * FROM (", inner_sql, ")", self.connection.ops.quote_name("qualify"), "WHERE", qualify_sql, ] if qual_aliases: # If some select aliases were unmasked for filtering purposes they # must be masked back. cols = [self.connection.ops.quote_name(alias) for alias in select.values()] result = [ "SELECT", ", ".join(cols), "FROM (", *result, ")", self.connection.ops.quote_name("qualify_mask"), ] params = list(inner_params) + qualify_params # As the SQL spec is unclear on whether or not derived tables # ordering must propagate it has to be explicitly repeated on the # outer-most query to ensure it's preserved. if order_by: ordering_sqls = [] for ordering in order_by: ordering_sql, ordering_params = self.compile(ordering) ordering_sqls.append(ordering_sql) params.extend(ordering_params) result.extend(["ORDER BY", ", ".join(ordering_sqls)]) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup( with_col_aliases=with_col_aliases, ) for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and ( self.query.high_mark is not None or self.query.low_mark ) combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, "supports_select_{}".format(combinator)): raise NotSupportedError( "{} is not supported on this database backend.".format( combinator ) ) result, params = self.get_combinator_sql( combinator, self.query.combinator_all ) elif self.qualify: result, params = self.get_qualify_sql() order_by = None else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() try: where, w_params = ( self.compile(self.where) if self.where is not None else ("", []) ) except EmptyResultSet: if self.elide_empty: raise # Use a predicate that's always False. where, w_params = "0 = 1", [] having, h_params = ( self.compile(self.having) if self.having is not None else ("", []) ) result = ["SELECT"] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = "%s AS %s" % ( s_sql, self.connection.ops.quote_name(alias), ) params.extend(s_params) out_cols.append(s_sql) result += [", ".join(out_cols)] if from_: result += ["FROM", *from_] elif self.connection.features.bare_select_suffix: result += [self.connection.features.bare_select_suffix] params.extend(f_params) if self.query.select_for_update and features.has_select_for_update: if ( self.connection.get_autocommit() # Don't raise an exception when database doesn't # support transactions, as it's a noop. and features.supports_transactions ): raise TransactionManagementError( "select_for_update cannot be used outside of a transaction." ) if ( with_limit_offset and not features.supports_select_for_update_with_limit ): raise NotSupportedError( "LIMIT/OFFSET is not supported with " "select_for_update on this database backend." ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of no_key = self.query.select_for_no_key_update # If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the # backend doesn't support it, raise NotSupportedError to # prevent a possible deadlock. if nowait and not features.has_select_for_update_nowait: raise NotSupportedError( "NOWAIT is not supported on this database backend." ) elif skip_locked and not features.has_select_for_update_skip_locked: raise NotSupportedError( "SKIP LOCKED is not supported on this database backend." ) elif of and not features.has_select_for_update_of: raise NotSupportedError( "FOR UPDATE OF is not supported on this database backend." ) elif no_key and not features.has_select_for_no_key_update: raise NotSupportedError( "FOR NO KEY UPDATE is not supported on this " "database backend." ) for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), no_key=no_key, ) if for_update_part and features.for_update_after_from: result.append(for_update_part) if where: result.append("WHERE %s" % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError( "annotate() + distinct(fields) is not implemented." ) order_by = order_by or self.connection.ops.force_no_ordering() result.append("GROUP BY %s" % ", ".join(grouping)) if self._meta_ordering: order_by = None if having: result.append("HAVING %s" % having) params.extend(h_params) if self.query.explain_info: result.insert( 0, self.connection.ops.explain_query_prefix( self.query.explain_info.format, **self.query.explain_info.options, ), ) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) order_by_sql = "ORDER BY %s" % ", ".join(ordering) if combinator and features.requires_compound_order_by_subquery: result = ["SELECT * FROM (", *result, ")", order_by_sql] else: result.append(order_by_sql) if with_limit_offset: result.append( self.connection.ops.limit_offset_sql( self.query.low_mark, self.query.high_mark ) ) if for_update_part and not features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering expressions # to be selected as well. Wrap the query in another subquery # to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if alias: sub_selects.append( "%s.%s" % ( self.connection.ops.quote_name("subquery"), self.connection.ops.quote_name(alias), ) ) else: select_clone = select.relabeled_clone( {select.alias: "subquery"} ) subselect, subparams = select_clone.as_sql( self, self.connection ) sub_selects.append(subselect) sub_params.extend(subparams) return "SELECT %s FROM (%s) subquery" % ( ", ".join(sub_selects), " ".join(result), ), tuple(sub_params + params) return " ".join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns( self, select_mask, start_alias=None, opts=None, from_parent=None ): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: if (opts := self.query.get_meta()) is None: return result start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if ( from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model ) ): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if select_mask and field not in select_mask: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins( parts, opts, None ) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(self.connection.ops.quote_name(name)) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name( self, name, opts, alias=None, default_order="ASC", already_seen=None ): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == "DESC" pieces = name.split(LOOKUP_SEP) ( field, targets, alias, joins, path, opts, transform_function, ) = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless it is the pk # shortcut or the attribute name of the field that is specified or # there are transforms to process. if ( field.is_relation and opts.ordering and getattr(field, "attname", None) != pieces[-1] and name != "pk" and not getattr(transform_function, "has_transforms", False) ): # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple( getattr(self.query.alias_map[j], "join_cols", None) for j in joins ) if join_tuple in already_seen: raise FieldError("Infinite loop caused by ordering.") already_seen.add(join_tuple) results = [] for item in opts.ordering: if hasattr(item, "resolve_expression") and not isinstance( item, OrderBy ): item = item.desc() if descending else item.asc() if isinstance(item, OrderBy): results.append( (item.prefix_references(f"{name}{LOOKUP_SEP}"), False) ) continue results.extend( (expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref) for expr, is_ref in self.find_ordering_name( item, opts, alias, order, already_seen ) ) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [ (OrderBy(transform_function(t, alias), descending=descending), False) for t in targets ] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins( pieces, opts, alias ) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in tuple(self.query.alias_map): if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if ( alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1 ): result.append(", %s" % self.quote_name_unless_alias(alias)) return result, params def get_related_selections( self, select, select_mask, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None, ): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain( direct_choices, reverse_choices, self.query._filtered_relations ) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info["related_klass_infos"] = related_klass_infos for f in opts.fields: fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or "(none)", ) ) else: next = False if not select_related_descend(f, restricted, requested, select_mask): continue related_select_mask = select_mask.get(f) or {} klass_info = { "model": f.remote_field.model, "field": f, "reverse": False, "local_setter": f.set_cached_value, "remote_setter": f.remote_field.set_cached_value if f.unique else lambda x, y: None, "from_parent": False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=f.remote_field.model._meta ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_klass_infos = self.get_related_selections( select, related_select_mask, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: related_select_mask = select_mask.get(f) or {} if not select_related_descend( f, restricted, requested, related_select_mask, reverse=True ): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins( [related_field_name], opts, root_alias ) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { "model": model, "field": f, "reverse": True, "local_setter": f.remote_field.set_cached_value, "remote_setter": f.set_cached_value, "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, related_select_mask, model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) def local_setter(obj, from_obj): # Set a reverse fk object when relation is non-empty. if from_obj: f.remote_field.set_cached_value(from_obj, obj) def remote_setter(name, obj, from_obj): setattr(from_obj, name, obj) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) f, _, join_opts, joins, _, _ = self.query.setup_joins( [name], opts, root_alias ) model = join_opts.model alias = joins[-1] from_parent = ( issubclass(model, opts.model) and model is not opts.model ) klass_info = { "model": model, "field": f, "reverse": True, "local_setter": local_setter, "remote_setter": partial(remote_setter, name), "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] field_select_mask = select_mask.get((name, f)) or {} columns = self.get_default_columns( field_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, field_select_mask, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( "Invalid field name(s) given in select_related: %s. " "Choices are: %s" % ( ", ".join(invalid_fields), ", ".join(_get_field_choices()) or "(none)", ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_parent_klass_info(klass_info): concrete_model = klass_info["model"]._meta.concrete_model for parent_model, parent_link in concrete_model._meta.parents.items(): parent_list = parent_model._meta.get_parent_list() yield { "model": parent_model, "field": parent_link, "reverse": False, "select_fields": [ select_index for select_index in klass_info["select_fields"] # Selected columns from a model or its parents. if ( self.select[select_index][0].target.model == parent_model or self.select[select_index][0].target.model in parent_list ) ], } def _get_first_selected_col_from_model(klass_info): """ Find the first selected column from a model. If it doesn't exist, don't lock a model. select_fields is filled recursively, so it also contains fields from the parent models. """ concrete_model = klass_info["model"]._meta.concrete_model for select_index in klass_info["select_fields"]: if self.select[select_index][0].target.model == concrete_model: return self.select[select_index][0] def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield "self" else: field = klass_info["field"] if klass_info["reverse"]: field = field.remote_field path = parent_path + [field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in _get_parent_klass_info(klass_info) ) queue.extend( (path, klass_info) for klass_info in klass_info.get("related_klass_infos", []) ) if not self.klass_info: return [] result = [] invalid_names = [] for name in self.query.select_for_update_of: klass_info = self.klass_info if name == "self": col = _get_first_selected_col_from_model(klass_info) else: for part in name.split(LOOKUP_SEP): klass_infos = ( *klass_info.get("related_klass_infos", []), *_get_parent_klass_info(klass_info), ) for related_klass_info in klass_infos: field = related_klass_info["field"] if related_klass_info["reverse"]: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue col = _get_first_selected_col_from_model(klass_info) if col is not None: if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( "Invalid field name(s) given in select_for_update(of=(...)): %s. " "Only relational fields followed in the query are allowed. " "Choices are: %s." % ( ", ".join(invalid_names), ", ".join(_get_field_choices()), ) ) return result def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def results_iter( self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE, ): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql( MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size ) fields = [s[0] for s in self.select[0 : self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ return bool(self.execute_sql(SINGLE)) def execute_sql( self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE ): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0 : self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch or not self.connection.features.can_use_chunked_reads: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested, # unless the database doesn't support it. return list(result) return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = "%s.%s" % (qn(alias), qn2(columns[index])) self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND) sql, params = self.as_sql() return "EXISTS (%s)" % sql, params def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. format_ = self.query.explain_info.format output_formatter = json.dumps if format_ and format_.lower() == "json" else str for row in result[0]: if not isinstance(row, str): yield " ".join(output_formatter(c) for c in row) else: yield row class SQLInsertCompiler(SQLCompiler): returning_fields = None returning_params = () def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, "as_sql"): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, "get_placeholder"): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = "%s", [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, "resolve_expression"): value = value.resolve_expression( self.query, allow_joins=False, for_save=True ) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' "can only be used to update, not to insert." % (value, field) ) if value.contains_aggregate: raise FieldError( "Aggregate functions are not allowed in this query " "(%s=%r)." % (field.name, value) ) if value.contains_over_clause: raise FieldError( "Window expressions are not allowed in this query (%s=%r)." % (field.name, value) ) else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() insert_statement = self.connection.ops.insert_statement( on_conflict=self.query.on_conflict, ) result = ["%s %s" % (insert_statement, qn(opts.db_table))] fields = self.query.fields or [opts.pk] result.append("(%s)" % ", ".join(qn(f.column) for f in fields)) if self.query.fields: value_rows = [ [ self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields ] for obj in self.query.objs ] else: # An empty object. value_rows = [ [self.connection.ops.pk_default_value()] for _ in self.query.objs ] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = ( not self.returning_fields and self.connection.features.has_bulk_insert ) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql( fields, self.query.on_conflict, self.query.update_fields, self.query.unique_fields, ) if ( self.returning_fields and self.connection.features.can_return_columns_from_insert ): if self.connection.features.can_return_rows_from_bulk_insert: result.append( self.connection.ops.bulk_insert_sql(fields, placeholder_rows) ) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) # Skip empty r_sql to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. r_sql, self.returning_params = self.connection.ops.return_insert_columns( self.returning_fields ) if r_sql: result.append(r_sql) params += [self.returning_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, returning_fields=None): assert not ( returning_fields and len(self.query.objs) != 1 and not self.connection.features.can_return_rows_from_bulk_insert ) opts = self.query.get_meta() self.returning_fields = returning_fields with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not self.returning_fields: return [] if ( self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1 ): rows = self.connection.ops.fetch_returned_insert_rows(cursor) elif self.connection.features.can_return_columns_from_insert: assert len(self.query.objs) == 1 rows = [ self.connection.ops.fetch_returned_insert_columns( cursor, self.returning_params, ) ] else: rows = [ ( self.connection.ops.last_insert_id( cursor, opts.db_table, opts.pk.column, ), ) ] cols = [field.get_col(opts.db_table) for field in self.returning_fields] converters = self.get_converters(cols) if converters: rows = list(self.apply_converters(rows, converters)) return rows class SQLDeleteCompiler(SQLCompiler): @cached_property def single_alias(self): # Ensure base table is in aliases. self.query.get_initial_alias() return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1 @classmethod def _expr_refs_base_model(cls, expr, base_model): if isinstance(expr, Query): return expr.model == base_model if not hasattr(expr, "get_source_expressions"): return False return any( cls._expr_refs_base_model(source_expr, base_model) for source_expr in expr.get_source_expressions() ) @cached_property def contains_self_reference_subquery(self): return any( self._expr_refs_base_model(expr, self.query.model) for expr in chain( self.query.annotations.values(), self.query.where.children ) ) def _as_sql(self, query): result = ["DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)] where, params = self.compile(query.where) if where: result.append("WHERE %s" % where) return " ".join(result), tuple(params) def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ if self.single_alias and not self.contains_self_reference_subquery: return self._as_sql(self.query) innerq = self.query.clone() innerq.__class__ = Query innerq.clear_select_clause() pk = self.query.model._meta.pk innerq.select = [pk.get_col(self.query.get_initial_alias())] outerq = Query(self.query.model) if not self.connection.features.update_can_self_select: # Force the materialization of the inner query to allow reference # to the target table on MySQL. sql, params = innerq.get_compiler(connection=self.connection).as_sql() innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params) outerq.add_filter("pk__in", innerq) return self._as_sql(outerq) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return "", () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, "resolve_expression"): val = val.resolve_expression( self.query, allow_joins=False, for_save=True ) if val.contains_aggregate: raise FieldError( "Aggregate functions are not allowed in this query " "(%s=%r)." % (field.name, val) ) if val.contains_over_clause: raise FieldError( "Window expressions are not allowed in this query " "(%s=%r)." % (field.name, val) ) elif hasattr(val, "prepare_database_save"): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, "get_placeholder"): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = "%s" name = field.column if hasattr(val, "as_sql"): sql, params = self.compile(val) values.append("%s = %s" % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append("%s = %s" % (qn(name), placeholder)) update_params.append(val) else: values.append("%s = NULL" % qn(name)) table = self.query.base_table result = [ "UPDATE %s SET" % qn(table), ", ".join(values), ] where, params = self.compile(self.query.where) if where: result.append("WHERE %s" % where) return " ".join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(force=True) query.extra = {} query.select = [] meta = query.get_meta() fields = [meta.pk.name] related_ids_index = [] for related in self.query.related_updates: if all( path.join_field.primary_key for path in meta.get_path_to_parent(related) ): # If a primary key chain exists to the targeted related update, # then the meta.pk value can be used for it. related_ids_index.append((related, 0)) else: # This branch will only be reached when updating a field of an # ancestor that is not part of the primary key chain of a MTI # tree. related_ids_index.append((related, len(fields))) fields.append(related._meta.pk.name) query.add_fields(fields) super().pre_sql_setup() must_pre_select = ( count > 1 and not self.connection.features.update_can_self_select ) # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.clear_where() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] related_ids = collections.defaultdict(list) for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) for parent, index in related_ids_index: related_ids[parent].extend(r[index] for r in rows) self.query.add_filter("pk__in", idents) self.query.related_ids = related_ids else: # The fast path. Filters and updates in one query. self.query.add_filter("pk__in", query) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation) ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ", ".join(sql) params = tuple(params) inner_query_sql, inner_query_params = self.query.inner_query.get_compiler( self.using, elide_empty=self.elide_empty, ).as_sql(with_col_aliases=True) sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql) params = params + inner_query_params return sql, params def cursor_iter(cursor, sentinel, col_count, itersize): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): yield rows if col_count is None else [r[:col_count] for r in rows] finally: cursor.close()
67fe81cf597a58526e8c55abbb3b78d0007b534a0eff75b0267480de2989372a
""" Code to manage the creation and SQL rendering of 'where' constraints. """ import operator from functools import reduce from django.core.exceptions import EmptyResultSet from django.db.models.expressions import Case, When from django.db.models.lookups import Exact from django.utils import tree from django.utils.functional import cached_property # Connection types AND = "AND" OR = "OR" XOR = "XOR" class WhereNode(tree.Node): """ An SQL WHERE clause. The class is tied to the Query class that created it (in order to create the correct SQL). A child is usually an expression producing boolean values. Most likely the expression is a Lookup instance. However, a child could also be any class with as_sql() and either relabeled_clone() method or relabel_aliases() and clone() methods and contains_aggregate attribute. """ default = AND resolved = False conditional = True def split_having_qualify(self, negated=False, must_group_by=False): """ Return three possibly None nodes: one for those parts of self that should be included in the WHERE clause, one for those parts of self that must be included in the HAVING clause, and one for those parts that refer to window functions. """ if not self.contains_aggregate and not self.contains_over_clause: return self, None, None in_negated = negated ^ self.negated # Whether or not children must be connected in the same filtering # clause (WHERE > HAVING > QUALIFY) to maintain logical semantic. must_remain_connected = ( (in_negated and self.connector == AND) or (not in_negated and self.connector == OR) or self.connector == XOR ) if ( must_remain_connected and self.contains_aggregate and not self.contains_over_clause ): # It's must cheaper to short-circuit and stash everything in the # HAVING clause than split children if possible. return None, self, None where_parts = [] having_parts = [] qualify_parts = [] for c in self.children: if hasattr(c, "split_having_qualify"): where_part, having_part, qualify_part = c.split_having_qualify( in_negated, must_group_by ) if where_part is not None: where_parts.append(where_part) if having_part is not None: having_parts.append(having_part) if qualify_part is not None: qualify_parts.append(qualify_part) elif c.contains_over_clause: qualify_parts.append(c) elif c.contains_aggregate: having_parts.append(c) else: where_parts.append(c) if must_remain_connected and qualify_parts: # Disjunctive heterogeneous predicates can be pushed down to # qualify as long as no conditional aggregation is involved. if not where_parts or (where_parts and not must_group_by): return None, None, self elif where_parts: # In theory this should only be enforced when dealing with # where_parts containing predicates against multi-valued # relationships that could affect aggregation results but this # is complex to infer properly. raise NotImplementedError( "Heterogeneous disjunctive predicates against window functions are " "not implemented when performing conditional aggregation." ) where_node = ( self.create(where_parts, self.connector, self.negated) if where_parts else None ) having_node = ( self.create(having_parts, self.connector, self.negated) if having_parts else None ) qualify_node = ( self.create(qualify_parts, self.connector, self.negated) if qualify_parts else None ) return where_node, having_node, qualify_node def as_sql(self, compiler, connection): """ Return the SQL version of the where clause and the value to be substituted in. Return '', [] if this node matches everything, None, [] if this node is empty, and raise EmptyResultSet if this node can't match anything. """ result = [] result_params = [] if self.connector == AND: full_needed, empty_needed = len(self.children), 1 else: full_needed, empty_needed = 1, len(self.children) if self.connector == XOR and not connection.features.supports_logical_xor: # Convert if the database doesn't support XOR: # a XOR b XOR c XOR ... # to: # (a OR b OR c OR ...) AND (a + b + c + ...) == 1 lhs = self.__class__(self.children, OR) rhs_sum = reduce( operator.add, (Case(When(c, then=1), default=0) for c in self.children), ) rhs = Exact(1, rhs_sum) return self.__class__([lhs, rhs], AND, self.negated).as_sql( compiler, connection ) for child in self.children: try: sql, params = compiler.compile(child) except EmptyResultSet: empty_needed -= 1 else: if sql: result.append(sql) result_params.extend(params) else: full_needed -= 1 # Check if this node matches nothing or everything. # First check the amount of full nodes and empty nodes # to make this node empty/full. # Now, check if this node is full/empty using the # counts. if empty_needed == 0: if self.negated: return "", [] else: raise EmptyResultSet if full_needed == 0: if self.negated: raise EmptyResultSet else: return "", [] conn = " %s " % self.connector sql_string = conn.join(result) if sql_string: if self.negated: # Some backends (Oracle at least) need parentheses # around the inner SQL in the negated case, even if the # inner SQL contains just a single expression. sql_string = "NOT (%s)" % sql_string elif len(result) > 1 or self.resolved: sql_string = "(%s)" % sql_string return sql_string, result_params def get_group_by_cols(self): cols = [] for child in self.children: cols.extend(child.get_group_by_cols()) return cols def get_source_expressions(self): return self.children[:] def set_source_expressions(self, children): assert len(children) == len(self.children) self.children = children def relabel_aliases(self, change_map): """ Relabel the alias values of any children. 'change_map' is a dictionary mapping old (current) alias values to the new values. """ for pos, child in enumerate(self.children): if hasattr(child, "relabel_aliases"): # For example another WhereNode child.relabel_aliases(change_map) elif hasattr(child, "relabeled_clone"): self.children[pos] = child.relabeled_clone(change_map) def clone(self): clone = self.create(connector=self.connector, negated=self.negated) for child in self.children: if hasattr(child, "clone"): child = child.clone() clone.children.append(child) return clone def relabeled_clone(self, change_map): clone = self.clone() clone.relabel_aliases(change_map) return clone def replace_expressions(self, replacements): if replacement := replacements.get(self): return replacement clone = self.create(connector=self.connector, negated=self.negated) for child in self.children: clone.children.append(child.replace_expressions(replacements)) return clone @classmethod def _contains_aggregate(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_aggregate(c) for c in obj.children) return obj.contains_aggregate @cached_property def contains_aggregate(self): return self._contains_aggregate(self) @classmethod def _contains_over_clause(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_over_clause(c) for c in obj.children) return obj.contains_over_clause @cached_property def contains_over_clause(self): return self._contains_over_clause(self) @property def is_summary(self): return any(child.is_summary for child in self.children) @staticmethod def _resolve_leaf(expr, query, *args, **kwargs): if hasattr(expr, "resolve_expression"): expr = expr.resolve_expression(query, *args, **kwargs) return expr @classmethod def _resolve_node(cls, node, query, *args, **kwargs): if hasattr(node, "children"): for child in node.children: cls._resolve_node(child, query, *args, **kwargs) if hasattr(node, "lhs"): node.lhs = cls._resolve_leaf(node.lhs, query, *args, **kwargs) if hasattr(node, "rhs"): node.rhs = cls._resolve_leaf(node.rhs, query, *args, **kwargs) def resolve_expression(self, *args, **kwargs): clone = self.clone() clone._resolve_node(clone, *args, **kwargs) clone.resolved = True return clone @cached_property def output_field(self): from django.db.models import BooleanField return BooleanField() @property def _output_field_or_none(self): return self.output_field def select_format(self, compiler, sql, params): # Wrap filters with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END" return sql, params def get_db_converters(self, connection): return self.output_field.get_db_converters(connection) def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def leaves(self): for child in self.children: if isinstance(child, WhereNode): yield from child.leaves() else: yield child class NothingNode: """A node that matches nothing.""" contains_aggregate = False contains_over_clause = False def as_sql(self, compiler=None, connection=None): raise EmptyResultSet class ExtraWhere: # The contents are a black box - assume no aggregates or windows are used. contains_aggregate = False contains_over_clause = False def __init__(self, sqls, params): self.sqls = sqls self.params = params def as_sql(self, compiler=None, connection=None): sqls = ["(%s)" % sql for sql in self.sqls] return " AND ".join(sqls), list(self.params or ()) class SubqueryConstraint: # Even if aggregates or windows would be used in a subquery, # the outer query isn't interested about those. contains_aggregate = False contains_over_clause = False def __init__(self, alias, columns, targets, query_object): self.alias = alias self.columns = columns self.targets = targets query_object.clear_ordering(clear_default=True) self.query_object = query_object def as_sql(self, compiler, connection): query = self.query_object query.set_values(self.targets) query_compiler = query.get_compiler(connection=connection) return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
82334c0ad804d3564831e1a45f3e8a5bef0811cd25d397e6ea2d30d3df38a76b
from django.db import DatabaseError, InterfaceError from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): minimum_database_version = (19,) # Oracle crashes with "ORA-00932: inconsistent datatypes: expected - got # BLOB" when grouping by LOBs (#24096). allows_group_by_lob = False allows_group_by_refs = False interprets_empty_strings_as_nulls = True has_select_for_update = True has_select_for_update_nowait = True has_select_for_update_skip_locked = True has_select_for_update_of = True select_for_update_of_column = True can_return_columns_from_insert = True supports_subqueries_in_group_by = False ignores_unnecessary_order_by_in_subqueries = False supports_transactions = True supports_timezones = False has_native_duration_field = True can_defer_constraint_checks = True supports_partially_nullable_unique_constraints = False supports_deferrable_unique_constraints = True truncates_names = True supports_tablespaces = True supports_sequence_reset = False can_introspect_materialized_views = True atomic_transactions = False nulls_order_largest = True requires_literal_defaults = True closed_cursor_error_class = InterfaceError bare_select_suffix = " FROM DUAL" # Select for update with limit can be achieved on Oracle, but not with the # current backend. supports_select_for_update_with_limit = False supports_temporal_subtraction = True # Oracle doesn't ignore quoted identifiers case but the current backend # does by uppercasing all identifiers. ignores_table_name_case = True supports_index_on_text_field = False create_test_procedure_without_params_sql = """ CREATE PROCEDURE "TEST_PROCEDURE" AS V_I INTEGER; BEGIN V_I := 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE "TEST_PROCEDURE" (P_I INTEGER) AS V_I INTEGER; BEGIN V_I := P_I; END; """ create_test_table_with_composite_primary_key = """ CREATE TABLE test_table_composite_pk ( column_1 NUMBER(11) NOT NULL, column_2 NUMBER(11) NOT NULL, PRIMARY KEY (column_1, column_2) ) """ supports_callproc_kwargs = True supports_over_clause = True supports_frame_range_fixed_distance = True supports_ignore_conflicts = False max_query_params = 2**16 - 1 supports_partial_indexes = False can_rename_index = True supports_slicing_ordering_in_compound = True requires_compound_order_by_subquery = True allows_multiple_constraints_on_same_fields = False supports_boolean_expr_in_select_clause = False supports_comparing_boolean_expr = False supports_primitives_in_json_field = False supports_json_field_contains = False supports_collation_on_textfield = False test_collations = { "ci": "BINARY_CI", "cs": "BINARY", "non_default": "SWEDISH_CI", "swedish_ci": "SWEDISH_CI", } test_now_utc_template = "CURRENT_TIMESTAMP AT TIME ZONE 'UTC'" django_test_skips = { "Oracle doesn't support SHA224.": { "db_functions.text.test_sha224.SHA224Tests.test_basic", "db_functions.text.test_sha224.SHA224Tests.test_transform", }, "Oracle doesn't correctly calculate ISO 8601 week numbering before " "1583 (the Gregorian calendar was introduced in 1582).": { "db_functions.datetime.test_extract_trunc.DateFunctionTests." "test_trunc_week_before_1000", "db_functions.datetime.test_extract_trunc.DateFunctionWithTimeZoneTests." "test_trunc_week_before_1000", }, "Oracle extracts seconds including fractional seconds (#33517).": { "db_functions.datetime.test_extract_trunc.DateFunctionTests." "test_extract_second_func_no_fractional", "db_functions.datetime.test_extract_trunc.DateFunctionWithTimeZoneTests." "test_extract_second_func_no_fractional", }, "Oracle doesn't support bitwise XOR.": { "expressions.tests.ExpressionOperatorTests.test_lefthand_bitwise_xor", "expressions.tests.ExpressionOperatorTests.test_lefthand_bitwise_xor_null", "expressions.tests.ExpressionOperatorTests." "test_lefthand_bitwise_xor_right_null", }, "Oracle requires ORDER BY in row_number, ANSI:SQL doesn't.": { "expressions_window.tests.WindowFunctionTests.test_row_number_no_ordering", }, "Raises ORA-00600: internal error code.": { "model_fields.test_jsonfield.TestQuerying.test_usage_in_subquery", }, "Oracle doesn't support changing collations on indexed columns (#33671).": { "migrations.test_operations.OperationTests." "test_alter_field_pk_fk_db_collation", }, } django_test_expected_failures = { # A bug in Django/cx_Oracle with respect to string handling (#23843). "annotations.tests.NonAggregateAnnotationTestCase.test_custom_functions", "annotations.tests.NonAggregateAnnotationTestCase." "test_custom_functions_can_ref_other_functions", } @cached_property def introspected_field_types(self): return { **super().introspected_field_types, "GenericIPAddressField": "CharField", "PositiveBigIntegerField": "BigIntegerField", "PositiveIntegerField": "IntegerField", "PositiveSmallIntegerField": "IntegerField", "SmallIntegerField": "IntegerField", "TimeField": "DateTimeField", } @cached_property def supports_collation_on_charfield(self): with self.connection.cursor() as cursor: try: cursor.execute("SELECT CAST('a' AS VARCHAR2(4001)) FROM dual") except DatabaseError as e: if e.args[0].code == 910: return False raise return True
cb72ff8e66a271080e7f9c97f8ed8aa092ccd7d38ac3b3531f497208fbd8013a
import copy import datetime import re from django.db import DatabaseError from django.db.backends.base.schema import ( BaseDatabaseSchemaEditor, _related_non_m2m_objects, ) from django.utils.duration import duration_iso_string class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s" sql_alter_column_type = "MODIFY %(column)s %(type)s" sql_alter_column_null = "MODIFY %(column)s NULL" sql_alter_column_not_null = "MODIFY %(column)s NOT NULL" sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s" sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL" sql_alter_column_no_default_null = sql_alter_column_no_default sql_alter_column_collate = "MODIFY %(column)s %(type)s%(collation)s" sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" sql_create_column_inline_fk = ( "CONSTRAINT %(name)s REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s" ) sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS" sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s" def quote_value(self, value): if isinstance(value, (datetime.date, datetime.time, datetime.datetime)): return "'%s'" % value elif isinstance(value, datetime.timedelta): return "'%s'" % duration_iso_string(value) elif isinstance(value, str): return "'%s'" % value.replace("'", "''").replace("%", "%%") elif isinstance(value, (bytes, bytearray, memoryview)): return "'%s'" % value.hex() elif isinstance(value, bool): return "1" if value else "0" else: return str(value) def remove_field(self, model, field): # If the column is an identity column, drop the identity before # removing the field. if self._is_identity_column(model._meta.db_table, field.column): self._drop_identity(model._meta.db_table, field.column) super().remove_field(model, field) def delete_model(self, model): # Run superclass action super().delete_model(model) # Clean up manually created sequence. self.execute( """ DECLARE i INTEGER; BEGIN SELECT COUNT(1) INTO i FROM USER_SEQUENCES WHERE SEQUENCE_NAME = '%(sq_name)s'; IF i = 1 THEN EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"'; END IF; END; /""" % { "sq_name": self.connection.ops._get_no_autofield_sequence_name( model._meta.db_table ) } ) def alter_field(self, model, old_field, new_field, strict=False): try: super().alter_field(model, old_field, new_field, strict) except DatabaseError as e: description = str(e) # If we're changing type to an unsupported type we need a # SQLite-ish workaround if "ORA-22858" in description or "ORA-22859" in description: self._alter_field_type_workaround(model, old_field, new_field) # If an identity column is changing to a non-numeric type, drop the # identity first. elif "ORA-30675" in description: self._drop_identity(model._meta.db_table, old_field.column) self.alter_field(model, old_field, new_field, strict) # If a primary key column is changing to an identity column, drop # the primary key first. elif "ORA-30673" in description and old_field.primary_key: self._delete_primary_key(model, strict=True) self._alter_field_type_workaround(model, old_field, new_field) # If a collation is changing on a primary key, drop the primary key # first. elif "ORA-43923" in description and old_field.primary_key: self._delete_primary_key(model, strict=True) self.alter_field(model, old_field, new_field, strict) # Restore a primary key, if needed. if new_field.primary_key: self.execute(self._create_primary_key_sql(model, new_field)) else: raise def _alter_field_type_workaround(self, model, old_field, new_field): """ Oracle refuses to change from some type to other type. What we need to do instead is: - Add a nullable version of the desired field with a temporary name. If the new column is an auto field, then the temporary column can't be nullable. - Update the table to transfer values from old to new - Drop old column - Rename the new column and possibly drop the nullable property """ # Make a new field that's like the new one but with a temporary # column name. new_temp_field = copy.deepcopy(new_field) new_temp_field.null = new_field.get_internal_type() not in ( "AutoField", "BigAutoField", "SmallAutoField", ) new_temp_field.column = self._generate_temp_name(new_field.column) # Add it self.add_field(model, new_temp_field) # Explicit data type conversion # https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf # /Data-Type-Comparison-Rules.html#GUID-D0C5A47E-6F93-4C2D-9E49-4F2B86B359DD new_value = self.quote_name(old_field.column) old_type = old_field.db_type(self.connection) if re.match("^N?CLOB", old_type): new_value = "TO_CHAR(%s)" % new_value old_type = "VARCHAR2" if re.match("^N?VARCHAR2", old_type): new_internal_type = new_field.get_internal_type() if new_internal_type == "DateField": new_value = "TO_DATE(%s, 'YYYY-MM-DD')" % new_value elif new_internal_type == "DateTimeField": new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value elif new_internal_type == "TimeField": # TimeField are stored as TIMESTAMP with a 1900-01-01 date part. new_value = "CONCAT('1900-01-01 ', %s)" % new_value new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value # Transfer values across self.execute( "UPDATE %s set %s=%s" % ( self.quote_name(model._meta.db_table), self.quote_name(new_temp_field.column), new_value, ) ) # Drop the old field self.remove_field(model, old_field) # Rename and possibly make the new field NOT NULL super().alter_field(model, new_temp_field, new_field) # Recreate foreign key (if necessary) because the old field is not # passed to the alter_field() and data types of new_temp_field and # new_field always match. new_type = new_field.db_type(self.connection) if ( (old_field.primary_key and new_field.primary_key) or (old_field.unique and new_field.unique) ) and old_type != new_type: for _, rel in _related_non_m2m_objects(new_temp_field, new_field): if rel.field.db_constraint: self.execute( self._create_fk_sql(rel.related_model, rel.field, "_fk") ) def _alter_column_type_sql(self, model, old_field, new_field, new_type): auto_field_types = {"AutoField", "BigAutoField", "SmallAutoField"} # Drop the identity if migrating away from AutoField. if ( old_field.get_internal_type() in auto_field_types and new_field.get_internal_type() not in auto_field_types and self._is_identity_column(model._meta.db_table, new_field.column) ): self._drop_identity(model._meta.db_table, new_field.column) return super()._alter_column_type_sql(model, old_field, new_field, new_type) def normalize_name(self, name): """ Get the properly shortened and uppercased identifier as returned by quote_name() but without the quotes. """ nn = self.quote_name(name) if nn[0] == '"' and nn[-1] == '"': nn = nn[1:-1] return nn def _generate_temp_name(self, for_name): """Generate temporary names for workarounds that need temp columns.""" suffix = hex(hash(for_name)).upper()[1:] return self.normalize_name(for_name + "_" + suffix) def prepare_default(self, value): return self.quote_value(value) def _field_should_be_indexed(self, model, field): create_index = super()._field_should_be_indexed(model, field) db_type = field.db_type(self.connection) if ( db_type is not None and db_type.lower() in self.connection._limited_data_types ): return False return create_index def _is_identity_column(self, table_name, column_name): with self.connection.cursor() as cursor: cursor.execute( """ SELECT CASE WHEN identity_column = 'YES' THEN 1 ELSE 0 END FROM user_tab_cols WHERE table_name = %s AND column_name = %s """, [self.normalize_name(table_name), self.normalize_name(column_name)], ) row = cursor.fetchone() return row[0] if row else False def _drop_identity(self, table_name, column_name): self.execute( "ALTER TABLE %(table)s MODIFY %(column)s DROP IDENTITY" % { "table": self.quote_name(table_name), "column": self.quote_name(column_name), } ) def _get_default_collation(self, table_name): with self.connection.cursor() as cursor: cursor.execute( """ SELECT default_collation FROM user_tables WHERE table_name = %s """, [self.normalize_name(table_name)], ) return cursor.fetchone()[0] def _alter_column_collation_sql( self, model, new_field, new_type, new_collation, old_field ): if new_collation is None: new_collation = self._get_default_collation(model._meta.db_table) return super()._alter_column_collation_sql( model, new_field, new_type, new_collation, old_field )
45d02b0efb0573d18b349048e7598c27e516b00ac04dbb4b9324e38f65a53d12
from django.db import ProgrammingError from django.utils.functional import cached_property class BaseDatabaseFeatures: # An optional tuple indicating the minimum supported database version. minimum_database_version = None gis_enabled = False # Oracle can't group by LOB (large object) data types. allows_group_by_lob = True allows_group_by_pk = False allows_group_by_selected_pks = False allows_group_by_refs = True empty_fetchmany_value = [] update_can_self_select = True # Does the backend distinguish between '' and None? interprets_empty_strings_as_nulls = False # Does the backend allow inserting duplicate NULL rows in a nullable # unique field? All core backends implement this correctly, but other # databases such as SQL Server do not. supports_nullable_unique_constraints = True # Does the backend allow inserting duplicate rows when a unique_together # constraint exists and some fields are nullable but not all of them? supports_partially_nullable_unique_constraints = True # Does the backend support initially deferrable unique constraints? supports_deferrable_unique_constraints = False can_use_chunked_reads = True can_return_columns_from_insert = False can_return_rows_from_bulk_insert = False has_bulk_insert = True uses_savepoints = True can_release_savepoints = False # If True, don't use integer foreign keys referring to, e.g., positive # integer primary keys. related_fields_match_type = False allow_sliced_subqueries_with_in = True has_select_for_update = False has_select_for_update_nowait = False has_select_for_update_skip_locked = False has_select_for_update_of = False has_select_for_no_key_update = False # Does the database's SELECT FOR UPDATE OF syntax require a column rather # than a table? select_for_update_of_column = False # Does the default test database allow multiple connections? # Usually an indication that the test database is in-memory test_db_allows_multiple_connections = True # Can an object be saved without an explicit primary key? supports_unspecified_pk = False # Can a fixture contain forward references? i.e., are # FK constraints checked at the end of transaction, or # at the end of each save operation? supports_forward_references = True # Does the backend truncate names properly when they are too long? truncates_names = False # Is there a REAL datatype in addition to floats/doubles? has_real_datatype = False supports_subqueries_in_group_by = True # Does the backend ignore unnecessary ORDER BY clauses in subqueries? ignores_unnecessary_order_by_in_subqueries = True # Is there a true datatype for uuid? has_native_uuid_field = False # Is there a true datatype for timedeltas? has_native_duration_field = False # Does the database driver supports same type temporal data subtraction # by returning the type used to store duration field? supports_temporal_subtraction = False # Does the __regex lookup support backreferencing and grouping? supports_regex_backreferencing = True # Can date/datetime lookups be performed using a string? supports_date_lookup_using_string = True # Can datetimes with timezones be used? supports_timezones = True # Does the database have a copy of the zoneinfo database? has_zoneinfo_database = True # When performing a GROUP BY, is an ORDER BY NULL required # to remove any ordering? requires_explicit_null_ordering_when_grouping = False # Does the backend order NULL values as largest or smallest? nulls_order_largest = False # Does the backend support NULLS FIRST and NULLS LAST in ORDER BY? supports_order_by_nulls_modifier = True # Does the backend orders NULLS FIRST by default? order_by_nulls_first = False # The database's limit on the number of query parameters. max_query_params = None # Can an object have an autoincrement primary key of 0? allows_auto_pk_0 = True # Do we need to NULL a ForeignKey out, or can the constraint check be # deferred can_defer_constraint_checks = False # Does the backend support tablespaces? Default to False because it isn't # in the SQL standard. supports_tablespaces = False # Does the backend reset sequences between tests? supports_sequence_reset = True # Can the backend introspect the default value of a column? can_introspect_default = True # Confirm support for introspected foreign keys # Every database can do this reliably, except MySQL, # which can't do it for MyISAM tables can_introspect_foreign_keys = True # Map fields which some backends may not be able to differentiate to the # field it's introspected as. introspected_field_types = { "AutoField": "AutoField", "BigAutoField": "BigAutoField", "BigIntegerField": "BigIntegerField", "BinaryField": "BinaryField", "BooleanField": "BooleanField", "CharField": "CharField", "DurationField": "DurationField", "GenericIPAddressField": "GenericIPAddressField", "IntegerField": "IntegerField", "PositiveBigIntegerField": "PositiveBigIntegerField", "PositiveIntegerField": "PositiveIntegerField", "PositiveSmallIntegerField": "PositiveSmallIntegerField", "SmallAutoField": "SmallAutoField", "SmallIntegerField": "SmallIntegerField", "TimeField": "TimeField", } # Can the backend introspect the column order (ASC/DESC) for indexes? supports_index_column_ordering = True # Does the backend support introspection of materialized views? can_introspect_materialized_views = False # Support for the DISTINCT ON clause can_distinct_on_fields = False # Does the backend prevent running SQL queries in broken transactions? atomic_transactions = True # Can we roll back DDL in a transaction? can_rollback_ddl = False # Does it support operations requiring references rename in a transaction? supports_atomic_references_rename = True # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? supports_combined_alters = False # Does it support foreign keys? supports_foreign_keys = True # Can it create foreign key constraints inline when adding columns? can_create_inline_fk = True # Can an index be renamed? can_rename_index = False # Does it automatically index foreign keys? indexes_foreign_keys = True # Does it support CHECK constraints? supports_column_check_constraints = True supports_table_check_constraints = True # Does the backend support introspection of CHECK constraints? can_introspect_check_constraints = True # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) # parameter passing? Note this can be provided by the backend even if not # supported by the Python driver supports_paramstyle_pyformat = True # Does the backend require literal defaults, rather than parameterized ones? requires_literal_defaults = False # Does the backend require a connection reset after each material schema change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? closed_cursor_error_class = ProgrammingError # Does 'a' LIKE 'A' match? has_case_insensitive_like = False # Suffix for backends that don't support "SELECT xxx;" queries. bare_select_suffix = "" # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False # Does the backend support "select for update" queries with limit (and offset)? supports_select_for_update_with_limit = True # Does the backend ignore null expressions in GREATEST and LEAST queries unless # every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? # Defaults to False to allow third-party backends to opt-in. can_clone_databases = False # Does the backend consider table names with different casing to # be equal? ignores_table_name_case = False # Place FOR UPDATE right after FROM clause. Used on MSSQL. for_update_after_from = False # Combinatorial flags supports_select_union = True supports_select_intersection = True supports_select_difference = True supports_slicing_ordering_in_compound = False supports_parentheses_in_compound = True requires_compound_order_by_subquery = False # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate # expressions? supports_aggregate_filter_clause = False # Does the backend support indexing a TextField? supports_index_on_text_field = True # Does the backend support window expressions (expression OVER (...))? supports_over_clause = False supports_frame_range_fixed_distance = False only_supports_unbounded_with_preceding_and_following = False # Does the backend support CAST with precision? supports_cast_with_precision = True # How many second decimals does the database return when casting a value to # a type with time? time_cast_precision = 6 # SQL to create a procedure for use by the Django test suite. The # functionality of the procedure isn't important. create_test_procedure_without_params_sql = None create_test_procedure_with_int_param_sql = None # SQL to create a table with a composite primary key for use by the Django # test suite. create_test_table_with_composite_primary_key = None # Does the backend support keyword parameters for cursor.callproc()? supports_callproc_kwargs = False # What formats does the backend EXPLAIN syntax support? supported_explain_formats = set() # Does the backend support the default parameter in lead() and lag()? supports_default_in_lead_lag = True # Does the backend support ignoring constraint or uniqueness errors during # INSERT? supports_ignore_conflicts = True # Does the backend support updating rows on constraint or uniqueness errors # during INSERT? supports_update_conflicts = False supports_update_conflicts_with_target = False # Does this backend require casting the results of CASE expressions used # in UPDATE statements to ensure the expression has the correct type? requires_casted_case_in_updates = False # Does the backend support partial indexes (CREATE INDEX ... WHERE ...)? supports_partial_indexes = True supports_functions_in_partial_indexes = True # Does the backend support covering indexes (CREATE INDEX ... INCLUDE ...)? supports_covering_indexes = False # Does the backend support indexes on expressions? supports_expression_indexes = True # Does the backend treat COLLATE as an indexed expression? collate_as_index_expression = False # Does the database allow more than one constraint or index on the same # field(s)? allows_multiple_constraints_on_same_fields = True # Does the backend support boolean expressions in SELECT and GROUP BY # clauses? supports_boolean_expr_in_select_clause = True # Does the backend support comparing boolean expressions in WHERE clauses? # Eg: WHERE (price > 0) IS NOT NULL supports_comparing_boolean_expr = True # Does the backend support JSONField? supports_json_field = True # Can the backend introspect a JSONField? can_introspect_json_field = True # Does the backend support primitives in JSONField? supports_primitives_in_json_field = True # Is there a true datatype for JSON? has_native_json_field = False # Does the backend use PostgreSQL-style JSON operators like '->'? has_json_operators = False # Does the backend support __contains and __contained_by lookups for # a JSONField? supports_json_field_contains = True # Does value__d__contains={'f': 'g'} (without a list around the dict) match # {'d': [{'f': 'g'}]}? json_key_contains_list_matching_requires_list = False # Does the backend support JSONObject() database function? has_json_object_function = True # Does the backend support column collations? supports_collation_on_charfield = True supports_collation_on_textfield = True # Does the backend support non-deterministic collations? supports_non_deterministic_collations = True # Does the backend support the logical XOR operator? supports_logical_xor = False # Collation names for use by the Django test suite. test_collations = { "ci": None, # Case-insensitive. "cs": None, # Case-sensitive. "non_default": None, # Non-default. "swedish_ci": None, # Swedish case-insensitive. } # SQL template override for tests.aggregation.tests.NowUTC test_now_utc_template = None # A set of dotted paths to tests in Django's test suite that are expected # to fail on this database. django_test_expected_failures = set() # A map of reasons to sets of dotted paths to tests in Django's test suite # that should be skipped for this database. django_test_skips = {} def __init__(self, connection): self.connection = connection @cached_property def supports_explaining_query_execution(self): """Does this backend support explaining query execution?""" return self.connection.ops.explain_prefix is not None @cached_property def supports_transactions(self): """Confirm support for transactions.""" with self.connection.cursor() as cursor: cursor.execute("CREATE TABLE ROLLBACK_TEST (X INT)") self.connection.set_autocommit(False) cursor.execute("INSERT INTO ROLLBACK_TEST (X) VALUES (8)") self.connection.rollback() self.connection.set_autocommit(True) cursor.execute("SELECT COUNT(X) FROM ROLLBACK_TEST") (count,) = cursor.fetchone() cursor.execute("DROP TABLE ROLLBACK_TEST") return count == 0 def allows_group_by_selected_pks_on_model(self, model): if not self.allows_group_by_selected_pks: return False return model._meta.managed
4b62bbf077484eed7a5675fcd530b880be32a34bb515b59442972b0c5e0058be
import _thread import copy import datetime import logging import threading import time import warnings from collections import deque from contextlib import contextmanager try: import zoneinfo except ImportError: from backports import zoneinfo from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import DEFAULT_DB_ALIAS, DatabaseError, NotSupportedError from django.db.backends import utils from django.db.backends.base.validation import BaseDatabaseValidation from django.db.backends.signals import connection_created from django.db.transaction import TransactionManagementError from django.db.utils import DatabaseErrorWrapper from django.utils.asyncio import async_unsafe from django.utils.functional import cached_property NO_DB_ALIAS = "__no_db__" RAN_DB_VERSION_CHECK = set() logger = logging.getLogger("django.db.backends.base") # RemovedInDjango50Warning def timezone_constructor(tzname): if settings.USE_DEPRECATED_PYTZ: import pytz return pytz.timezone(tzname) return zoneinfo.ZoneInfo(tzname) class BaseDatabaseWrapper: """Represent a database connection.""" # Mapping of Field objects to their column types. data_types = {} # Mapping of Field objects to their SQL suffix such as AUTOINCREMENT. data_types_suffix = {} # Mapping of Field objects to their SQL for CHECK constraints. data_type_check_constraints = {} ops = None vendor = "unknown" display_name = "unknown" SchemaEditorClass = None # Classes instantiated in __init__(). client_class = None creation_class = None features_class = None introspection_class = None ops_class = None validation_class = BaseDatabaseValidation queries_limit = 9000 def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS): # Connection related attributes. # The underlying database connection. self.connection = None # `settings_dict` should be a dictionary containing keys such as # NAME, USER, etc. It's called `settings_dict` instead of `settings` # to disambiguate it from Django settings modules. self.settings_dict = settings_dict self.alias = alias # Query logging in debug mode or when explicitly enabled. self.queries_log = deque(maxlen=self.queries_limit) self.force_debug_cursor = False # Transaction related attributes. # Tracks if the connection is in autocommit mode. Per PEP 249, by # default, it isn't. self.autocommit = False # Tracks if the connection is in a transaction managed by 'atomic'. self.in_atomic_block = False # Increment to generate unique savepoint ids. self.savepoint_state = 0 # List of savepoints created by 'atomic'. self.savepoint_ids = [] # Stack of active 'atomic' blocks. self.atomic_blocks = [] # Tracks if the outermost 'atomic' block should commit on exit, # ie. if autocommit was active on entry. self.commit_on_exit = True # Tracks if the transaction should be rolled back to the next # available savepoint because of an exception in an inner block. self.needs_rollback = False self.rollback_exc = None # Connection termination related attributes. self.close_at = None self.closed_in_transaction = False self.errors_occurred = False self.health_check_enabled = False self.health_check_done = False # Thread-safety related attributes. self._thread_sharing_lock = threading.Lock() self._thread_sharing_count = 0 self._thread_ident = _thread.get_ident() # A list of no-argument functions to run when the transaction commits. # Each entry is an (sids, func, robust) tuple, where sids is a set of # the active savepoint IDs when this function was registered and robust # specifies whether it's allowed for the function to fail. self.run_on_commit = [] # Should we run the on-commit hooks the next time set_autocommit(True) # is called? self.run_commit_hooks_on_set_autocommit_on = False # A stack of wrappers to be invoked around execute()/executemany() # calls. Each entry is a function taking five arguments: execute, sql, # params, many, and context. It's the function's responsibility to # call execute(sql, params, many, context). self.execute_wrappers = [] self.client = self.client_class(self) self.creation = self.creation_class(self) self.features = self.features_class(self) self.introspection = self.introspection_class(self) self.ops = self.ops_class(self) self.validation = self.validation_class(self) def __repr__(self): return ( f"<{self.__class__.__qualname__} " f"vendor={self.vendor!r} alias={self.alias!r}>" ) def ensure_timezone(self): """ Ensure the connection's timezone is set to `self.timezone_name` and return whether it changed or not. """ return False @cached_property def timezone(self): """ Return a tzinfo of the database connection time zone. This is only used when time zone support is enabled. When a datetime is read from the database, it is always returned in this time zone. When the database backend supports time zones, it doesn't matter which time zone Django uses, as long as aware datetimes are used everywhere. Other users connecting to the database can choose their own time zone. When the database backend doesn't support time zones, the time zone Django uses may be constrained by the requirements of other users of the database. """ if not settings.USE_TZ: return None elif self.settings_dict["TIME_ZONE"] is None: return datetime.timezone.utc else: return timezone_constructor(self.settings_dict["TIME_ZONE"]) @cached_property def timezone_name(self): """ Name of the time zone of the database connection. """ if not settings.USE_TZ: return settings.TIME_ZONE elif self.settings_dict["TIME_ZONE"] is None: return "UTC" else: return self.settings_dict["TIME_ZONE"] @property def queries_logged(self): return self.force_debug_cursor or settings.DEBUG @property def queries(self): if len(self.queries_log) == self.queries_log.maxlen: warnings.warn( "Limit for query logging exceeded, only the last {} queries " "will be returned.".format(self.queries_log.maxlen) ) return list(self.queries_log) def get_database_version(self): """Return a tuple of the database's version.""" raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require a get_database_version() " "method." ) def check_database_version_supported(self): """ Raise an error if the database version isn't supported by this version of Django. """ if ( self.features.minimum_database_version is not None and self.get_database_version() < self.features.minimum_database_version ): db_version = ".".join(map(str, self.get_database_version())) min_db_version = ".".join(map(str, self.features.minimum_database_version)) raise NotSupportedError( f"{self.display_name} {min_db_version} or later is required " f"(found {db_version})." ) # ##### Backend-specific methods for creating connections and cursors ##### def get_connection_params(self): """Return a dict of parameters suitable for get_new_connection.""" raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require a get_connection_params() " "method" ) def get_new_connection(self, conn_params): """Open a connection to the database.""" raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require a get_new_connection() " "method" ) def init_connection_state(self): """Initialize the database connection settings.""" global RAN_DB_VERSION_CHECK if self.alias not in RAN_DB_VERSION_CHECK: self.check_database_version_supported() RAN_DB_VERSION_CHECK.add(self.alias) def create_cursor(self, name=None): """Create a cursor. Assume that a connection is established.""" raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require a create_cursor() method" ) # ##### Backend-specific methods for creating connections ##### @async_unsafe def connect(self): """Connect to the database. Assume that the connection is closed.""" # Check for invalid configurations. self.check_settings() # In case the previous connection was closed while in an atomic block self.in_atomic_block = False self.savepoint_ids = [] self.atomic_blocks = [] self.needs_rollback = False # Reset parameters defining when to close/health-check the connection. self.health_check_enabled = self.settings_dict["CONN_HEALTH_CHECKS"] max_age = self.settings_dict["CONN_MAX_AGE"] self.close_at = None if max_age is None else time.monotonic() + max_age self.closed_in_transaction = False self.errors_occurred = False # New connections are healthy. self.health_check_done = True # Establish the connection conn_params = self.get_connection_params() self.connection = self.get_new_connection(conn_params) self.set_autocommit(self.settings_dict["AUTOCOMMIT"]) self.init_connection_state() connection_created.send(sender=self.__class__, connection=self) self.run_on_commit = [] def check_settings(self): if self.settings_dict["TIME_ZONE"] is not None and not settings.USE_TZ: raise ImproperlyConfigured( "Connection '%s' cannot set TIME_ZONE because USE_TZ is False." % self.alias ) @async_unsafe def ensure_connection(self): """Guarantee that a connection to the database is established.""" if self.connection is None: with self.wrap_database_errors: self.connect() # ##### Backend-specific wrappers for PEP-249 connection methods ##### def _prepare_cursor(self, cursor): """ Validate the connection is usable and perform database cursor wrapping. """ self.validate_thread_sharing() if self.queries_logged: wrapped_cursor = self.make_debug_cursor(cursor) else: wrapped_cursor = self.make_cursor(cursor) return wrapped_cursor def _cursor(self, name=None): self.close_if_health_check_failed() self.ensure_connection() with self.wrap_database_errors: return self._prepare_cursor(self.create_cursor(name)) def _commit(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.commit() def _rollback(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.rollback() def _close(self): if self.connection is not None: with self.wrap_database_errors: return self.connection.close() # ##### Generic wrappers for PEP-249 connection methods ##### @async_unsafe def cursor(self): """Create a cursor, opening a connection if necessary.""" return self._cursor() @async_unsafe def commit(self): """Commit a transaction and reset the dirty flag.""" self.validate_thread_sharing() self.validate_no_atomic_block() self._commit() # A successful commit means that the database connection works. self.errors_occurred = False self.run_commit_hooks_on_set_autocommit_on = True @async_unsafe def rollback(self): """Roll back a transaction and reset the dirty flag.""" self.validate_thread_sharing() self.validate_no_atomic_block() self._rollback() # A successful rollback means that the database connection works. self.errors_occurred = False self.needs_rollback = False self.run_on_commit = [] @async_unsafe def close(self): """Close the connection to the database.""" self.validate_thread_sharing() self.run_on_commit = [] # Don't call validate_no_atomic_block() to avoid making it difficult # to get rid of a connection in an invalid state. The next connect() # will reset the transaction state anyway. if self.closed_in_transaction or self.connection is None: return try: self._close() finally: if self.in_atomic_block: self.closed_in_transaction = True self.needs_rollback = True else: self.connection = None # ##### Backend-specific savepoint management methods ##### def _savepoint(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_create_sql(sid)) def _savepoint_rollback(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_rollback_sql(sid)) def _savepoint_commit(self, sid): with self.cursor() as cursor: cursor.execute(self.ops.savepoint_commit_sql(sid)) def _savepoint_allowed(self): # Savepoints cannot be created outside a transaction return self.features.uses_savepoints and not self.get_autocommit() # ##### Generic savepoint management methods ##### @async_unsafe def savepoint(self): """ Create a savepoint inside the current transaction. Return an identifier for the savepoint that will be used for the subsequent rollback or commit. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return thread_ident = _thread.get_ident() tid = str(thread_ident).replace("-", "") self.savepoint_state += 1 sid = "s%s_x%d" % (tid, self.savepoint_state) self.validate_thread_sharing() self._savepoint(sid) return sid @async_unsafe def savepoint_rollback(self, sid): """ Roll back to a savepoint. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return self.validate_thread_sharing() self._savepoint_rollback(sid) # Remove any callbacks registered while this savepoint was active. self.run_on_commit = [ (sids, func, robust) for (sids, func, robust) in self.run_on_commit if sid not in sids ] @async_unsafe def savepoint_commit(self, sid): """ Release a savepoint. Do nothing if savepoints are not supported. """ if not self._savepoint_allowed(): return self.validate_thread_sharing() self._savepoint_commit(sid) @async_unsafe def clean_savepoints(self): """ Reset the counter used to generate unique savepoint ids in this thread. """ self.savepoint_state = 0 # ##### Backend-specific transaction management methods ##### def _set_autocommit(self, autocommit): """ Backend-specific implementation to enable or disable autocommit. """ raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require a _set_autocommit() method" ) # ##### Generic transaction management methods ##### def get_autocommit(self): """Get the autocommit state.""" self.ensure_connection() return self.autocommit def set_autocommit( self, autocommit, force_begin_transaction_with_broken_autocommit=False ): """ Enable or disable autocommit. The usual way to start a transaction is to turn autocommit off. SQLite does not properly start a transaction when disabling autocommit. To avoid this buggy behavior and to actually enter a new transaction, an explicit BEGIN is required. Using force_begin_transaction_with_broken_autocommit=True will issue an explicit BEGIN with SQLite. This option will be ignored for other backends. """ self.validate_no_atomic_block() self.close_if_health_check_failed() self.ensure_connection() start_transaction_under_autocommit = ( force_begin_transaction_with_broken_autocommit and not autocommit and hasattr(self, "_start_transaction_under_autocommit") ) if start_transaction_under_autocommit: self._start_transaction_under_autocommit() else: self._set_autocommit(autocommit) self.autocommit = autocommit if autocommit and self.run_commit_hooks_on_set_autocommit_on: self.run_and_clear_commit_hooks() self.run_commit_hooks_on_set_autocommit_on = False def get_rollback(self): """Get the "needs rollback" flag -- for *advanced use* only.""" if not self.in_atomic_block: raise TransactionManagementError( "The rollback flag doesn't work outside of an 'atomic' block." ) return self.needs_rollback def set_rollback(self, rollback): """ Set or unset the "needs rollback" flag -- for *advanced use* only. """ if not self.in_atomic_block: raise TransactionManagementError( "The rollback flag doesn't work outside of an 'atomic' block." ) self.needs_rollback = rollback def validate_no_atomic_block(self): """Raise an error if an atomic block is active.""" if self.in_atomic_block: raise TransactionManagementError( "This is forbidden when an 'atomic' block is active." ) def validate_no_broken_transaction(self): if self.needs_rollback: raise TransactionManagementError( "An error occurred in the current transaction. You can't " "execute queries until the end of the 'atomic' block." ) from self.rollback_exc # ##### Foreign key constraints checks handling ##### @contextmanager def constraint_checks_disabled(self): """ Disable foreign key constraint checking. """ disabled = self.disable_constraint_checking() try: yield finally: if disabled: self.enable_constraint_checking() def disable_constraint_checking(self): """ Backends can implement as needed to temporarily disable foreign key constraint checking. Should return True if the constraints were disabled and will need to be reenabled. """ return False def enable_constraint_checking(self): """ Backends can implement as needed to re-enable foreign key constraint checking. """ pass def check_constraints(self, table_names=None): """ Backends can override this method if they can apply constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an IntegrityError if any invalid foreign key references are encountered. """ pass # ##### Connection termination handling ##### def is_usable(self): """ Test if the database connection is usable. This method may assume that self.connection is not None. Actual implementations should take care not to raise exceptions as that may prevent Django from recycling unusable connections. """ raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require an is_usable() method" ) def close_if_health_check_failed(self): """Close existing connection if it fails a health check.""" if ( self.connection is None or not self.health_check_enabled or self.health_check_done ): return if not self.is_usable(): self.close() self.health_check_done = True def close_if_unusable_or_obsolete(self): """ Close the current connection if unrecoverable errors have occurred or if it outlived its maximum age. """ if self.connection is not None: self.health_check_done = False # If the application didn't restore the original autocommit setting, # don't take chances, drop the connection. if self.get_autocommit() != self.settings_dict["AUTOCOMMIT"]: self.close() return # If an exception other than DataError or IntegrityError occurred # since the last commit / rollback, check if the connection works. if self.errors_occurred: if self.is_usable(): self.errors_occurred = False self.health_check_done = True else: self.close() return if self.close_at is not None and time.monotonic() >= self.close_at: self.close() return # ##### Thread safety handling ##### @property def allow_thread_sharing(self): with self._thread_sharing_lock: return self._thread_sharing_count > 0 def inc_thread_sharing(self): with self._thread_sharing_lock: self._thread_sharing_count += 1 def dec_thread_sharing(self): with self._thread_sharing_lock: if self._thread_sharing_count <= 0: raise RuntimeError( "Cannot decrement the thread sharing count below zero." ) self._thread_sharing_count -= 1 def validate_thread_sharing(self): """ Validate that the connection isn't accessed by another thread than the one which originally created it, unless the connection was explicitly authorized to be shared between threads (via the `inc_thread_sharing()` method). Raise an exception if the validation fails. """ if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()): raise DatabaseError( "DatabaseWrapper objects created in a " "thread can only be used in that same thread. The object " "with alias '%s' was created in thread id %s and this is " "thread id %s." % (self.alias, self._thread_ident, _thread.get_ident()) ) # ##### Miscellaneous ##### def prepare_database(self): """ Hook to do any database check or preparation, generally called before migrating a project or an app. """ pass @cached_property def wrap_database_errors(self): """ Context manager and decorator that re-throws backend-specific database exceptions using Django's common wrappers. """ return DatabaseErrorWrapper(self) def chunked_cursor(self): """ Return a cursor that tries to avoid caching in the database (if supported by the database), otherwise return a regular cursor. """ return self.cursor() def make_debug_cursor(self, cursor): """Create a cursor that logs all queries in self.queries_log.""" return utils.CursorDebugWrapper(cursor, self) def make_cursor(self, cursor): """Create a cursor without debug logging.""" return utils.CursorWrapper(cursor, self) @contextmanager def temporary_connection(self): """ Context manager that ensures that a connection is established, and if it opened one, closes it to avoid leaving a dangling connection. This is useful for operations outside of the request-response cycle. Provide a cursor: with self.temporary_connection() as cursor: ... """ must_close = self.connection is None try: with self.cursor() as cursor: yield cursor finally: if must_close: self.close() @contextmanager def _nodb_cursor(self): """ Return a cursor from an alternative connection to be used when there is no need to access the main database, specifically for test db creation/deletion. This also prevents the production database from being exposed to potential child threads while (or after) the test database is destroyed. Refs #10868, #17786, #16969. """ conn = self.__class__({**self.settings_dict, "NAME": None}, alias=NO_DB_ALIAS) try: with conn.cursor() as cursor: yield cursor finally: conn.close() def schema_editor(self, *args, **kwargs): """ Return a new instance of this backend's SchemaEditor. """ if self.SchemaEditorClass is None: raise NotImplementedError( "The SchemaEditorClass attribute of this database wrapper is still None" ) return self.SchemaEditorClass(self, *args, **kwargs) def on_commit(self, func, robust=False): if not callable(func): raise TypeError("on_commit()'s callback must be a callable.") if self.in_atomic_block: # Transaction in progress; save for execution on commit. self.run_on_commit.append((set(self.savepoint_ids), func, robust)) elif not self.get_autocommit(): raise TransactionManagementError( "on_commit() cannot be used in manual transaction management" ) else: # No transaction in progress and in autocommit mode; execute # immediately. if robust: try: func() except Exception as e: logger.error( f"Error calling {func.__qualname__} in on_commit() (%s).", e, exc_info=True, ) else: func() def run_and_clear_commit_hooks(self): self.validate_no_atomic_block() current_run_on_commit = self.run_on_commit self.run_on_commit = [] while current_run_on_commit: _, func, robust = current_run_on_commit.pop(0) if robust: try: func() except Exception as e: logger.error( f"Error calling {func.__qualname__} in on_commit() during " f"transaction (%s).", e, exc_info=True, ) else: func() @contextmanager def execute_wrapper(self, wrapper): """ Return a context manager under which the wrapper is applied to suitable database query executions. """ self.execute_wrappers.append(wrapper) try: yield finally: self.execute_wrappers.pop() def copy(self, alias=None): """ Return a copy of this connection. For tests that require two connections to the same database. """ settings_dict = copy.deepcopy(self.settings_dict) if alias is None: alias = self.alias return type(self)(settings_dict, alias)
60dca29339133abc885ae8a43086c9c205817454435a9598386d41fe91d0ae5a
import logging import operator from datetime import datetime from django.conf import settings from django.db.backends.ddl_references import ( Columns, Expressions, ForeignKeyName, IndexName, Statement, Table, ) from django.db.backends.utils import names_digest, split_identifier from django.db.models import Deferrable, Index from django.db.models.sql import Query from django.db.transaction import TransactionManagementError, atomic from django.utils import timezone logger = logging.getLogger("django.db.backends.schema") def _is_relevant_relation(relation, altered_field): """ When altering the given field, must constraints on its model from the given relation be temporarily dropped? """ field = relation.field if field.many_to_many: # M2M reverse field return False if altered_field.primary_key and field.to_fields == [None]: # Foreign key constraint on the primary key, which is being altered. return True # Is the constraint targeting the field being altered? return altered_field.name in field.to_fields def _all_related_fields(model): # Related fields must be returned in a deterministic order. return sorted( model._meta._get_fields( forward=False, reverse=True, include_hidden=True, include_parents=False, ), key=operator.attrgetter("name"), ) def _related_non_m2m_objects(old_field, new_field): # Filter out m2m objects from reverse relations. # Return (old_relation, new_relation) tuples. related_fields = zip( ( obj for obj in _all_related_fields(old_field.model) if _is_relevant_relation(obj, old_field) ), ( obj for obj in _all_related_fields(new_field.model) if _is_relevant_relation(obj, new_field) ), ) for old_rel, new_rel in related_fields: yield old_rel, new_rel yield from _related_non_m2m_objects( old_rel.remote_field, new_rel.remote_field, ) class BaseDatabaseSchemaEditor: """ This class and its subclasses are responsible for emitting schema-changing statements to the databases - model creation/removal/alteration, field renaming, index fiddling, and so on. """ # Overrideable SQL templates sql_create_table = "CREATE TABLE %(table)s (%(definition)s)" sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s" sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s" sql_delete_table = "DROP TABLE %(table)s CASCADE" sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s" sql_alter_column = "ALTER TABLE %(table)s %(changes)s" sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s" sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL" sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL" sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s" sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT" sql_alter_column_no_default_null = sql_alter_column_no_default sql_alter_column_collate = "ALTER COLUMN %(column)s TYPE %(type)s%(collation)s" sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE" sql_rename_column = ( "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s" ) sql_update_with_default = ( "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL" ) sql_unique_constraint = "UNIQUE (%(columns)s)%(deferrable)s" sql_check_constraint = "CHECK (%(check)s)" sql_delete_constraint = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_constraint = "CONSTRAINT %(name)s %(constraint)s" sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)" sql_delete_check = sql_delete_constraint sql_create_unique = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s " "UNIQUE (%(columns)s)%(deferrable)s" ) sql_delete_unique = sql_delete_constraint sql_create_fk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) " "REFERENCES %(to_table)s (%(to_column)s)%(deferrable)s" ) sql_create_inline_fk = None sql_create_column_inline_fk = None sql_delete_fk = sql_delete_constraint sql_create_index = ( "CREATE INDEX %(name)s ON %(table)s " "(%(columns)s)%(include)s%(extra)s%(condition)s" ) sql_create_unique_index = ( "CREATE UNIQUE INDEX %(name)s ON %(table)s " "(%(columns)s)%(include)s%(condition)s" ) sql_rename_index = "ALTER INDEX %(old_name)s RENAME TO %(new_name)s" sql_delete_index = "DROP INDEX %(name)s" sql_create_pk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" ) sql_delete_pk = sql_delete_constraint sql_delete_procedure = "DROP PROCEDURE %(procedure)s" def __init__(self, connection, collect_sql=False, atomic=True): self.connection = connection self.collect_sql = collect_sql if self.collect_sql: self.collected_sql = [] self.atomic_migration = self.connection.features.can_rollback_ddl and atomic # State-managing methods def __enter__(self): self.deferred_sql = [] if self.atomic_migration: self.atomic = atomic(self.connection.alias) self.atomic.__enter__() return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: for sql in self.deferred_sql: self.execute(sql) if self.atomic_migration: self.atomic.__exit__(exc_type, exc_value, traceback) # Core utility functions def execute(self, sql, params=()): """Execute the given SQL statement, with optional parameters.""" # Don't perform the transactional DDL check if SQL is being collected # as it's not going to be executed anyway. if ( not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl ): raise TransactionManagementError( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) # Account for non-string statement objects. sql = str(sql) # Log the command we're running, then run it logger.debug( "%s; (params %r)", sql, params, extra={"params": params, "sql": sql} ) if self.collect_sql: ending = "" if sql.rstrip().endswith(";") else ";" if params is not None: self.collected_sql.append( (sql % tuple(map(self.quote_value, params))) + ending ) else: self.collected_sql.append(sql + ending) else: with self.connection.cursor() as cursor: cursor.execute(sql, params) def quote_name(self, name): return self.connection.ops.quote_name(name) def table_sql(self, model): """Take a model and return its table definition.""" # Add any unique_togethers (always deferred, as some fields might be # created afterward, like geometry fields with some backends). for field_names in model._meta.unique_together: fields = [model._meta.get_field(field) for field in field_names] self.deferred_sql.append(self._create_unique_sql(model, fields)) # Create column SQL, add FK deferreds if needed. column_sqls = [] params = [] for field in model._meta.local_fields: # SQL. definition, extra_params = self.column_sql(model, field) if definition is None: continue # Check constraints can go on the column SQL here. db_params = field.db_parameters(connection=self.connection) if db_params["check"]: definition += " " + self.sql_check_constraint % db_params # Autoincrement SQL (for backends with inline variant). col_type_suffix = field.db_type_suffix(connection=self.connection) if col_type_suffix: definition += " %s" % col_type_suffix params.extend(extra_params) # FK. if field.remote_field and field.db_constraint: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field( field.remote_field.field_name ).column if self.sql_create_inline_fk: definition += " " + self.sql_create_inline_fk % { "to_table": self.quote_name(to_table), "to_column": self.quote_name(to_column), } elif self.connection.features.supports_foreign_keys: self.deferred_sql.append( self._create_fk_sql( model, field, "_fk_%(to_table)s_%(to_column)s" ) ) # Add the SQL to our big list. column_sqls.append( "%s %s" % ( self.quote_name(field.column), definition, ) ) # Autoincrement SQL (for backends with post table definition # variant). if field.get_internal_type() in ( "AutoField", "BigAutoField", "SmallAutoField", ): autoinc_sql = self.connection.ops.autoinc_sql( model._meta.db_table, field.column ) if autoinc_sql: self.deferred_sql.extend(autoinc_sql) constraints = [ constraint.constraint_sql(model, self) for constraint in model._meta.constraints ] sql = self.sql_create_table % { "table": self.quote_name(model._meta.db_table), "definition": ", ".join( str(constraint) for constraint in (*column_sqls, *constraints) if constraint ), } if model._meta.db_tablespace: tablespace_sql = self.connection.ops.tablespace_sql( model._meta.db_tablespace ) if tablespace_sql: sql += " " + tablespace_sql return sql, params # Field <-> database mapping functions def _iter_column_sql( self, column_db_type, params, model, field, field_db_params, include_default ): yield column_db_type if collation := field_db_params.get("collation"): yield self._collate_sql(collation) # Work out nullability. null = field.null # Include a default value, if requested. include_default = ( include_default and not self.skip_default(field) and # Don't include a default value if it's a nullable field and the # default cannot be dropped in the ALTER COLUMN statement (e.g. # MySQL longtext and longblob). not (null and self.skip_default_on_alter(field)) ) if include_default: default_value = self.effective_default(field) if default_value is not None: column_default = "DEFAULT " + self._column_default_sql(field) if self.connection.features.requires_literal_defaults: # Some databases can't take defaults as a parameter (Oracle). # If this is the case, the individual schema backend should # implement prepare_default(). yield column_default % self.prepare_default(default_value) else: yield column_default params.append(default_value) # Oracle treats the empty string ('') as null, so coerce the null # option whenever '' is a possible value. if ( field.empty_strings_allowed and not field.primary_key and self.connection.features.interprets_empty_strings_as_nulls ): null = True if not null: yield "NOT NULL" elif not self.connection.features.implied_column_null: yield "NULL" if field.primary_key: yield "PRIMARY KEY" elif field.unique: yield "UNIQUE" # Optionally add the tablespace if it's an implicitly indexed column. tablespace = field.db_tablespace or model._meta.db_tablespace if ( tablespace and self.connection.features.supports_tablespaces and field.unique ): yield self.connection.ops.tablespace_sql(tablespace, inline=True) def column_sql(self, model, field, include_default=False): """ Return the column definition for a field. The field must already have had set_attributes_from_name() called. """ # Get the column's type and use that as the basis of the SQL. field_db_params = field.db_parameters(connection=self.connection) column_db_type = field_db_params["type"] # Check for fields that aren't actually columns (e.g. M2M). if column_db_type is None: return None, None params = [] return ( " ".join( # This appends to the params being returned. self._iter_column_sql( column_db_type, params, model, field, field_db_params, include_default, ) ), params, ) def skip_default(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob). """ return False def skip_default_on_alter(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob) in the ALTER COLUMN statement. """ return False def prepare_default(self, value): """ Only used for backends which have requires_literal_defaults feature """ raise NotImplementedError( "subclasses of BaseDatabaseSchemaEditor for backends which have " "requires_literal_defaults must provide a prepare_default() method" ) def _column_default_sql(self, field): """ Return the SQL to use in a DEFAULT clause. The resulting string should contain a '%s' placeholder for a default value. """ return "%s" @staticmethod def _effective_default(field): # This method allows testing its logic without a connection. if field.has_default(): default = field.get_default() elif not field.null and field.blank and field.empty_strings_allowed: if field.get_internal_type() == "BinaryField": default = b"" else: default = "" elif getattr(field, "auto_now", False) or getattr(field, "auto_now_add", False): internal_type = field.get_internal_type() if internal_type == "DateTimeField": default = timezone.now() else: default = datetime.now() if internal_type == "DateField": default = default.date() elif internal_type == "TimeField": default = default.time() else: default = None return default def effective_default(self, field): """Return a field's effective database default value.""" return field.get_db_prep_save(self._effective_default(field), self.connection) def quote_value(self, value): """ Return a quoted version of the value so it's safe to use in an SQL string. This is not safe against injection from user code; it is intended only for use in making SQL scripts or preparing default values for particularly tricky backends (defaults are not user-defined, though, so this is safe). """ raise NotImplementedError() # Actions def create_model(self, model): """ Create a table and any accompanying indexes or unique constraints for the given `model`. """ sql, params = self.table_sql(model) # Prevent using [] as params, in the case a literal '%' is used in the # definition. self.execute(sql, params or None) # Add any field index and index_together's (deferred as SQLite # _remake_table needs it). self.deferred_sql.extend(self._model_indexes_sql(model)) # Make M2M tables for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.create_model(field.remote_field.through) def delete_model(self, model): """Delete a model from the database.""" # Handle auto-created intermediary models for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # Delete the table self.execute( self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), } ) # Remove all deferred statements referencing the deleted table. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_table( model._meta.db_table ): self.deferred_sql.remove(sql) def add_index(self, model, index): """Add an index on a model.""" if ( index.contains_expressions and not self.connection.features.supports_expression_indexes ): return None # Index.create_sql returns interpolated SQL which makes params=None a # necessity to avoid escaping attempts on execution. self.execute(index.create_sql(model, self), params=None) def remove_index(self, model, index): """Remove an index from a model.""" if ( index.contains_expressions and not self.connection.features.supports_expression_indexes ): return None self.execute(index.remove_sql(model, self)) def rename_index(self, model, old_index, new_index): if self.connection.features.can_rename_index: self.execute( self._rename_index_sql(model, old_index.name, new_index.name), params=None, ) else: self.remove_index(model, old_index) self.add_index(model, new_index) def add_constraint(self, model, constraint): """Add a constraint to a model.""" sql = constraint.create_sql(model, self) if sql: # Constraint.create_sql returns interpolated SQL which makes # params=None a necessity to avoid escaping attempts on execution. self.execute(sql, params=None) def remove_constraint(self, model, constraint): """Remove a constraint from a model.""" sql = constraint.remove_sql(model, self) if sql: self.execute(sql) def alter_unique_together(self, model, old_unique_together, new_unique_together): """ Deal with a model changing its unique_together. The input unique_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_unique_together} news = {tuple(fields) for fields in new_unique_together} # Deleted uniques for fields in olds.difference(news): self._delete_composed_index( model, fields, {"unique": True, "primary_key": False}, self.sql_delete_unique, ) # Created uniques for field_names in news.difference(olds): fields = [model._meta.get_field(field) for field in field_names] self.execute(self._create_unique_sql(model, fields)) def alter_index_together(self, model, old_index_together, new_index_together): """ Deal with a model changing its index_together. The input index_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_index_together} news = {tuple(fields) for fields in new_index_together} # Deleted indexes for fields in olds.difference(news): self._delete_composed_index( model, fields, {"index": True, "unique": False}, self.sql_delete_index, ) # Created indexes for field_names in news.difference(olds): fields = [model._meta.get_field(field) for field in field_names] self.execute(self._create_index_sql(model, fields=fields, suffix="_idx")) def _delete_composed_index(self, model, fields, constraint_kwargs, sql): meta_constraint_names = { constraint.name for constraint in model._meta.constraints } meta_index_names = {constraint.name for constraint in model._meta.indexes} columns = [model._meta.get_field(field).column for field in fields] constraint_names = self._constraint_names( model, columns, exclude=meta_constraint_names | meta_index_names, **constraint_kwargs, ) if ( constraint_kwargs.get("unique") is True and constraint_names and self.connection.features.allows_multiple_constraints_on_same_fields ): # Constraint matching the unique_together name. default_name = str( self._unique_constraint_name(model._meta.db_table, columns, quote=False) ) if default_name in constraint_names: constraint_names = [default_name] if len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of constraints for %s(%s)" % ( len(constraint_names), model._meta.db_table, ", ".join(columns), ) ) self.execute(self._delete_constraint_sql(sql, model, constraint_names[0])) def alter_db_table(self, model, old_db_table, new_db_table): """Rename the table a model points to.""" if old_db_table == new_db_table or ( self.connection.features.ignores_table_name_case and old_db_table.lower() == new_db_table.lower() ): return self.execute( self.sql_rename_table % { "old_table": self.quote_name(old_db_table), "new_table": self.quote_name(new_db_table), } ) # Rename all references to the old table name. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_table_references(old_db_table, new_db_table) def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace): """Move a model's table between tablespaces.""" self.execute( self.sql_retablespace_table % { "table": self.quote_name(model._meta.db_table), "old_tablespace": self.quote_name(old_db_tablespace), "new_tablespace": self.quote_name(new_db_tablespace), } ) def add_field(self, model, field): """ Create a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields). """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.create_model(field.remote_field.through) # Get the column's definition definition, params = self.column_sql(model, field, include_default=True) # It might not actually have a column behind it if definition is None: return if col_type_suffix := field.db_type_suffix(connection=self.connection): definition += f" {col_type_suffix}" # Check constraints can go on the column SQL here db_params = field.db_parameters(connection=self.connection) if db_params["check"]: definition += " " + self.sql_check_constraint % db_params if ( field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint ): constraint_suffix = "_fk_%(to_table)s_%(to_column)s" # Add FK constraint inline, if supported. if self.sql_create_column_inline_fk: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field( field.remote_field.field_name ).column namespace, _ = split_identifier(model._meta.db_table) definition += " " + self.sql_create_column_inline_fk % { "name": self._fk_constraint_name(model, field, constraint_suffix), "namespace": "%s." % self.quote_name(namespace) if namespace else "", "column": self.quote_name(field.column), "to_table": self.quote_name(to_table), "to_column": self.quote_name(to_column), "deferrable": self.connection.ops.deferrable_sql(), } # Otherwise, add FK constraints later. else: self.deferred_sql.append( self._create_fk_sql(model, field, constraint_suffix) ) # Build the SQL and run it sql = self.sql_create_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), "definition": definition, } self.execute(sql, params) # Drop the default if we need to # (Django usually does not use in-database defaults) if ( not self.skip_default_on_alter(field) and self.effective_default(field) is not None ): changes_sql, params = self._alter_column_default_sql( model, None, field, drop=True ) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Add an index, if required self.deferred_sql.extend(self._field_indexes_sql(model, field)) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.delete_model(field.remote_field.through) # It might not actually have a column behind it if field.db_parameters(connection=self.connection)["type"] is None: return # Drop any FK constraints, MySQL requires explicit deletion if field.remote_field: fk_names = self._constraint_names(model, [field.column], foreign_key=True) for fk_name in fk_names: self.execute(self._delete_fk_sql(model, fk_name)) # Delete the column sql = self.sql_delete_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() # Remove all deferred statements referencing the deleted column. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_column( model._meta.db_table, field.column ): self.deferred_sql.remove(sql) def alter_field(self, model, old_field, new_field, strict=False): """ Allow a field's type, uniqueness, nullability, default, column, constraints, etc. to be modified. `old_field` is required to compute the necessary changes. If `strict` is True, raise errors if the old column does not match `old_field` precisely. """ if not self._field_should_be_altered(old_field, new_field): return # Ensure this field is even column-based old_db_params = old_field.db_parameters(connection=self.connection) old_type = old_db_params["type"] new_db_params = new_field.db_parameters(connection=self.connection) new_type = new_db_params["type"] if (old_type is None and old_field.remote_field is None) or ( new_type is None and new_field.remote_field is None ): raise ValueError( "Cannot alter field %s into %s - they do not properly define " "db_type (are you using a badly-written custom field?)" % (old_field, new_field), ) elif ( old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and old_field.remote_field.through._meta.auto_created and new_field.remote_field.through._meta.auto_created ) ): return self._alter_many_to_many(model, old_field, new_field, strict) elif ( old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and not old_field.remote_field.through._meta.auto_created and not new_field.remote_field.through._meta.auto_created ) ): # Both sides have through models; this is a no-op. return elif old_type is None or new_type is None: raise ValueError( "Cannot alter field %s into %s - they are not compatible types " "(you cannot alter to or from M2M fields, or add or remove " "through= on M2M fields)" % (old_field, new_field) ) self._alter_field( model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict, ) def _alter_field( self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False, ): """Perform a "physical" (non-ManyToMany) field update.""" # Drop any FK constraints, we'll remake them later fks_dropped = set() if ( self.connection.features.supports_foreign_keys and old_field.remote_field and old_field.db_constraint ): fk_names = self._constraint_names( model, [old_field.column], foreign_key=True ) if strict and len(fk_names) != 1: raise ValueError( "Found wrong number (%s) of foreign key constraints for %s.%s" % ( len(fk_names), model._meta.db_table, old_field.column, ) ) for fk_name in fk_names: fks_dropped.add((old_field.column,)) self.execute(self._delete_fk_sql(model, fk_name)) # Has unique been removed? if old_field.unique and ( not new_field.unique or self._field_became_primary_key(old_field, new_field) ): # Find the unique constraint for this field meta_constraint_names = { constraint.name for constraint in model._meta.constraints } constraint_names = self._constraint_names( model, [old_field.column], unique=True, primary_key=False, exclude=meta_constraint_names, ) if strict and len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of unique constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, ) ) for constraint_name in constraint_names: self.execute(self._delete_unique_sql(model, constraint_name)) # Drop incoming FK constraints if the field is a primary key or unique, # which might be a to_field target, and things are going to change. old_collation = old_db_params.get("collation") new_collation = new_db_params.get("collation") drop_foreign_keys = ( self.connection.features.supports_foreign_keys and ( (old_field.primary_key and new_field.primary_key) or (old_field.unique and new_field.unique) ) and ((old_type != new_type) or (old_collation != new_collation)) ) if drop_foreign_keys: # '_meta.related_field' also contains M2M reverse fields, these # will be filtered out for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field): rel_fk_names = self._constraint_names( new_rel.related_model, [new_rel.field.column], foreign_key=True ) for fk_name in rel_fk_names: self.execute(self._delete_fk_sql(new_rel.related_model, fk_name)) # Removed an index? (no strict check, as multiple indexes are possible) # Remove indexes if db_index switched to False or a unique constraint # will now be used in lieu of an index. The following lines from the # truth table show all True cases; the rest are False: # # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique # ------------------------------------------------------------------------------ # True | False | False | False # True | False | False | True # True | False | True | True if ( old_field.db_index and not old_field.unique and (not new_field.db_index or new_field.unique) ): # Find the index for this field meta_index_names = {index.name for index in model._meta.indexes} # Retrieve only BTREE indexes since this is what's created with # db_index=True. index_names = self._constraint_names( model, [old_field.column], index=True, type_=Index.suffix, exclude=meta_index_names, ) for index_name in index_names: # The only way to check if an index was created with # db_index=True or with Index(['field'], name='foo') # is to look at its name (refs #28053). self.execute(self._delete_index_sql(model, index_name)) # Change check constraints? if old_db_params["check"] != new_db_params["check"] and old_db_params["check"]: meta_constraint_names = { constraint.name for constraint in model._meta.constraints } constraint_names = self._constraint_names( model, [old_field.column], check=True, exclude=meta_constraint_names, ) if strict and len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of check constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, ) ) for constraint_name in constraint_names: self.execute(self._delete_check_sql(model, constraint_name)) # Have they renamed the column? if old_field.column != new_field.column: self.execute( self._rename_field_sql( model._meta.db_table, old_field, new_field, new_type ) ) # Rename all references to the renamed column. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_column_references( model._meta.db_table, old_field.column, new_field.column ) # Next, start accumulating actions to do actions = [] null_actions = [] post_actions = [] # Type suffix change? (e.g. auto increment). old_type_suffix = old_field.db_type_suffix(connection=self.connection) new_type_suffix = new_field.db_type_suffix(connection=self.connection) # Collation change? if old_collation != new_collation: # Collation change handles also a type change. fragment = self._alter_column_collation_sql( model, new_field, new_type, new_collation, old_field ) actions.append(fragment) # Type change? elif (old_type, old_type_suffix) != (new_type, new_type_suffix): fragment, other_actions = self._alter_column_type_sql( model, old_field, new_field, new_type ) actions.append(fragment) post_actions.extend(other_actions) # When changing a column NULL constraint to NOT NULL with a given # default value, we need to perform 4 steps: # 1. Add a default for new incoming writes # 2. Update existing NULL rows with new default # 3. Replace NULL constraint with NOT NULL # 4. Drop the default again. # Default change? needs_database_default = False if old_field.null and not new_field.null: old_default = self.effective_default(old_field) new_default = self.effective_default(new_field) if ( not self.skip_default_on_alter(new_field) and old_default != new_default and new_default is not None ): needs_database_default = True actions.append( self._alter_column_default_sql(model, old_field, new_field) ) # Nullability change? if old_field.null != new_field.null: fragment = self._alter_column_null_sql(model, old_field, new_field) if fragment: null_actions.append(fragment) # Only if we have a default and there is a change from NULL to NOT NULL four_way_default_alteration = new_field.has_default() and ( old_field.null and not new_field.null ) if actions or null_actions: if not four_way_default_alteration: # If we don't have to do a 4-way default alteration we can # directly run a (NOT) NULL alteration actions = actions + null_actions # Combine actions together if we can (e.g. postgres) if self.connection.features.supports_combined_alters and actions: sql, params = tuple(zip(*actions)) actions = [(", ".join(sql), sum(params, []))] # Apply those actions for sql, params in actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if four_way_default_alteration: # Update existing rows with default value self.execute( self.sql_update_with_default % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(new_field.column), "default": "%s", }, [new_default], ) # Since we didn't run a NOT NULL change before we need to do it # now for sql, params in null_actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if post_actions: for sql, params in post_actions: self.execute(sql, params) # If primary_key changed to False, delete the primary key constraint. if old_field.primary_key and not new_field.primary_key: self._delete_primary_key(model, strict) # Added a unique? if self._unique_should_be_added(old_field, new_field): self.execute(self._create_unique_sql(model, [new_field])) # Added an index? Add an index if db_index switched to True or a unique # constraint will no longer be used in lieu of an index. The following # lines from the truth table show all True cases; the rest are False: # # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique # ------------------------------------------------------------------------------ # False | False | True | False # False | True | True | False # True | True | True | False if ( (not old_field.db_index or old_field.unique) and new_field.db_index and not new_field.unique ): self.execute(self._create_index_sql(model, fields=[new_field])) # Type alteration on primary key? Then we need to alter the column # referring to us. rels_to_update = [] if drop_foreign_keys: rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Changed to become primary key? if self._field_became_primary_key(old_field, new_field): # Make the new one self.execute(self._create_primary_key_sql(model, new_field)) # Update all referencing columns rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Handle our type alters on the other end of rels from the PK stuff above for old_rel, new_rel in rels_to_update: rel_db_params = new_rel.field.db_parameters(connection=self.connection) rel_type = rel_db_params["type"] rel_collation = rel_db_params.get("collation") old_rel_db_params = old_rel.field.db_parameters(connection=self.connection) old_rel_collation = old_rel_db_params.get("collation") if old_rel_collation != rel_collation: # Collation change handles also a type change. fragment = self._alter_column_collation_sql( new_rel.related_model, new_rel.field, rel_type, rel_collation, old_rel.field, ) other_actions = [] else: fragment, other_actions = self._alter_column_type_sql( new_rel.related_model, old_rel.field, new_rel.field, rel_type ) self.execute( self.sql_alter_column % { "table": self.quote_name(new_rel.related_model._meta.db_table), "changes": fragment[0], }, fragment[1], ) for sql, params in other_actions: self.execute(sql, params) # Does it have a foreign key? if ( self.connection.features.supports_foreign_keys and new_field.remote_field and ( fks_dropped or not old_field.remote_field or not old_field.db_constraint ) and new_field.db_constraint ): self.execute( self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s") ) # Rebuild FKs that pointed to us if we previously had to drop them if drop_foreign_keys: for _, rel in rels_to_update: if rel.field.db_constraint: self.execute( self._create_fk_sql(rel.related_model, rel.field, "_fk") ) # Does it have check constraints we need to add? if old_db_params["check"] != new_db_params["check"] and new_db_params["check"]: constraint_name = self._create_index_name( model._meta.db_table, [new_field.column], suffix="_check" ) self.execute( self._create_check_sql(model, constraint_name, new_db_params["check"]) ) # Drop the default if we need to # (Django usually does not use in-database defaults) if needs_database_default: changes_sql, params = self._alter_column_default_sql( model, old_field, new_field, drop=True ) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def _alter_column_null_sql(self, model, old_field, new_field): """ Hook to specialize column null alteration. Return a (sql, params) fragment to set a column to null or non-null as required by new_field, or None if no changes are required. """ if ( self.connection.features.interprets_empty_strings_as_nulls and new_field.empty_strings_allowed ): # The field is nullable in the database anyway, leave it alone. return else: new_db_params = new_field.db_parameters(connection=self.connection) sql = ( self.sql_alter_column_null if new_field.null else self.sql_alter_column_not_null ) return ( sql % { "column": self.quote_name(new_field.column), "type": new_db_params["type"], }, [], ) def _alter_column_default_sql(self, model, old_field, new_field, drop=False): """ Hook to specialize column default alteration. Return a (sql, params) fragment to add or drop (depending on the drop argument) a default to new_field's column. """ new_default = self.effective_default(new_field) default = self._column_default_sql(new_field) params = [new_default] if drop: params = [] elif self.connection.features.requires_literal_defaults: # Some databases (Oracle) can't take defaults as a parameter # If this is the case, the SchemaEditor for that database should # implement prepare_default(). default = self.prepare_default(new_default) params = [] new_db_params = new_field.db_parameters(connection=self.connection) if drop: if new_field.null: sql = self.sql_alter_column_no_default_null else: sql = self.sql_alter_column_no_default else: sql = self.sql_alter_column_default return ( sql % { "column": self.quote_name(new_field.column), "type": new_db_params["type"], "default": default, }, params, ) def _alter_column_type_sql(self, model, old_field, new_field, new_type): """ Hook to specialize column type alteration for different backends, for cases when a creation type is different to an alteration type (e.g. SERIAL in PostgreSQL, PostGIS fields). Return a two-tuple of: an SQL fragment of (sql, params) to insert into an ALTER TABLE statement and a list of extra (sql, params) tuples to run once the field is altered. """ return ( ( self.sql_alter_column_type % { "column": self.quote_name(new_field.column), "type": new_type, }, [], ), [], ) def _alter_column_collation_sql( self, model, new_field, new_type, new_collation, old_field ): return ( self.sql_alter_column_collate % { "column": self.quote_name(new_field.column), "type": new_type, "collation": " " + self._collate_sql(new_collation) if new_collation else "", }, [], ) def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" # Rename the through table if ( old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table ): self.alter_db_table( old_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table, ) # Repoint the FK to the other side self.alter_field( new_field.remote_field.through, # The field that points to the target model is needed, so we can # tell alter_field to change it - this is m2m_reverse_field_name() # (as opposed to m2m_field_name(), which points to our model). old_field.remote_field.through._meta.get_field( old_field.m2m_reverse_field_name() ), new_field.remote_field.through._meta.get_field( new_field.m2m_reverse_field_name() ), ) self.alter_field( new_field.remote_field.through, # for self-referential models we need to alter field from the other end too old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()), ) def _create_index_name(self, table_name, column_names, suffix=""): """ Generate a unique name for an index/unique constraint. The name is divided into 3 parts: the table name, the column names, and a unique digest and suffix. """ _, table_name = split_identifier(table_name) hash_suffix_part = "%s%s" % ( names_digest(table_name, *column_names, length=8), suffix, ) max_length = self.connection.ops.max_name_length() or 200 # If everything fits into max_length, use that name. index_name = "%s_%s_%s" % (table_name, "_".join(column_names), hash_suffix_part) if len(index_name) <= max_length: return index_name # Shorten a long suffix. if len(hash_suffix_part) > max_length / 3: hash_suffix_part = hash_suffix_part[: max_length // 3] other_length = (max_length - len(hash_suffix_part)) // 2 - 1 index_name = "%s_%s_%s" % ( table_name[:other_length], "_".join(column_names)[:other_length], hash_suffix_part, ) # Prepend D if needed to prevent the name from starting with an # underscore or a number (not permitted on Oracle). if index_name[0] == "_" or index_name[0].isdigit(): index_name = "D%s" % index_name[:-1] return index_name def _get_index_tablespace_sql(self, model, fields, db_tablespace=None): if db_tablespace is None: if len(fields) == 1 and fields[0].db_tablespace: db_tablespace = fields[0].db_tablespace elif settings.DEFAULT_INDEX_TABLESPACE: db_tablespace = settings.DEFAULT_INDEX_TABLESPACE elif model._meta.db_tablespace: db_tablespace = model._meta.db_tablespace if db_tablespace is not None: return " " + self.connection.ops.tablespace_sql(db_tablespace) return "" def _index_condition_sql(self, condition): if condition: return " WHERE " + condition return "" def _index_include_sql(self, model, columns): if not columns or not self.connection.features.supports_covering_indexes: return "" return Statement( " INCLUDE (%(columns)s)", columns=Columns(model._meta.db_table, columns, self.quote_name), ) def _create_index_sql( self, model, *, fields=None, name=None, suffix="", using="", db_tablespace=None, col_suffixes=(), sql=None, opclasses=(), condition=None, include=None, expressions=None, ): """ Return the SQL statement to create the index for one or several fields or expressions. `sql` can be specified if the syntax differs from the standard (GIS indexes, ...). """ fields = fields or [] expressions = expressions or [] compiler = Query(model, alias_cols=False).get_compiler( connection=self.connection, ) tablespace_sql = self._get_index_tablespace_sql( model, fields, db_tablespace=db_tablespace ) columns = [field.column for field in fields] sql_create_index = sql or self.sql_create_index table = model._meta.db_table def create_index_name(*args, **kwargs): nonlocal name if name is None: name = self._create_index_name(*args, **kwargs) return self.quote_name(name) return Statement( sql_create_index, table=Table(table, self.quote_name), name=IndexName(table, columns, suffix, create_index_name), using=using, columns=( self._index_columns(table, columns, col_suffixes, opclasses) if columns else Expressions(table, expressions, compiler, self.quote_value) ), extra=tablespace_sql, condition=self._index_condition_sql(condition), include=self._index_include_sql(model, include), ) def _delete_index_sql(self, model, name, sql=None): return Statement( sql or self.sql_delete_index, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), ) def _rename_index_sql(self, model, old_name, new_name): return Statement( self.sql_rename_index, table=Table(model._meta.db_table, self.quote_name), old_name=self.quote_name(old_name), new_name=self.quote_name(new_name), ) def _index_columns(self, table, columns, col_suffixes, opclasses): return Columns(table, columns, self.quote_name, col_suffixes=col_suffixes) def _model_indexes_sql(self, model): """ Return a list of all index SQL statements (field indexes, index_together, Meta.indexes) for the specified model. """ if not model._meta.managed or model._meta.proxy or model._meta.swapped: return [] output = [] for field in model._meta.local_fields: output.extend(self._field_indexes_sql(model, field)) for field_names in model._meta.index_together: fields = [model._meta.get_field(field) for field in field_names] output.append(self._create_index_sql(model, fields=fields, suffix="_idx")) for index in model._meta.indexes: if ( not index.contains_expressions or self.connection.features.supports_expression_indexes ): output.append(index.create_sql(model, self)) return output def _field_indexes_sql(self, model, field): """ Return a list of all index SQL statements for the specified field. """ output = [] if self._field_should_be_indexed(model, field): output.append(self._create_index_sql(model, fields=[field])) return output def _field_should_be_altered(self, old_field, new_field): _, old_path, old_args, old_kwargs = old_field.deconstruct() _, new_path, new_args, new_kwargs = new_field.deconstruct() # Don't alter when: # - changing only a field name # - changing an attribute that doesn't affect the schema # - adding only a db_column and the column name is not changed for attr in old_field.non_db_attrs: old_kwargs.pop(attr, None) for attr in new_field.non_db_attrs: new_kwargs.pop(attr, None) return self.quote_name(old_field.column) != self.quote_name( new_field.column ) or (old_path, old_args, old_kwargs) != (new_path, new_args, new_kwargs) def _field_should_be_indexed(self, model, field): return field.db_index and not field.unique def _field_became_primary_key(self, old_field, new_field): return not old_field.primary_key and new_field.primary_key def _unique_should_be_added(self, old_field, new_field): return ( not new_field.primary_key and new_field.unique and (not old_field.unique or old_field.primary_key) ) def _rename_field_sql(self, table, old_field, new_field, new_type): return self.sql_rename_column % { "table": self.quote_name(table), "old_column": self.quote_name(old_field.column), "new_column": self.quote_name(new_field.column), "type": new_type, } def _create_fk_sql(self, model, field, suffix): table = Table(model._meta.db_table, self.quote_name) name = self._fk_constraint_name(model, field, suffix) column = Columns(model._meta.db_table, [field.column], self.quote_name) to_table = Table(field.target_field.model._meta.db_table, self.quote_name) to_column = Columns( field.target_field.model._meta.db_table, [field.target_field.column], self.quote_name, ) deferrable = self.connection.ops.deferrable_sql() return Statement( self.sql_create_fk, table=table, name=name, column=column, to_table=to_table, to_column=to_column, deferrable=deferrable, ) def _fk_constraint_name(self, model, field, suffix): def create_fk_name(*args, **kwargs): return self.quote_name(self._create_index_name(*args, **kwargs)) return ForeignKeyName( model._meta.db_table, [field.column], split_identifier(field.target_field.model._meta.db_table)[1], [field.target_field.column], suffix, create_fk_name, ) def _delete_fk_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_fk, model, name) def _deferrable_constraint_sql(self, deferrable): if deferrable is None: return "" if deferrable == Deferrable.DEFERRED: return " DEFERRABLE INITIALLY DEFERRED" if deferrable == Deferrable.IMMEDIATE: return " DEFERRABLE INITIALLY IMMEDIATE" def _unique_sql( self, model, fields, name, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, ): if ( deferrable and not self.connection.features.supports_deferrable_unique_constraints ): return None if condition or include or opclasses or expressions: # Databases support conditional, covering, and functional unique # constraints via a unique index. sql = self._create_unique_sql( model, fields, name=name, condition=condition, include=include, opclasses=opclasses, expressions=expressions, ) if sql: self.deferred_sql.append(sql) return None constraint = self.sql_unique_constraint % { "columns": ", ".join([self.quote_name(field.column) for field in fields]), "deferrable": self._deferrable_constraint_sql(deferrable), } return self.sql_constraint % { "name": self.quote_name(name), "constraint": constraint, } def _create_unique_sql( self, model, fields, name=None, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, ): if ( ( deferrable and not self.connection.features.supports_deferrable_unique_constraints ) or (condition and not self.connection.features.supports_partial_indexes) or (include and not self.connection.features.supports_covering_indexes) or ( expressions and not self.connection.features.supports_expression_indexes ) ): return None compiler = Query(model, alias_cols=False).get_compiler( connection=self.connection ) table = model._meta.db_table columns = [field.column for field in fields] if name is None: name = self._unique_constraint_name(table, columns, quote=True) else: name = self.quote_name(name) if condition or include or opclasses or expressions: sql = self.sql_create_unique_index else: sql = self.sql_create_unique if columns: columns = self._index_columns( table, columns, col_suffixes=(), opclasses=opclasses ) else: columns = Expressions(table, expressions, compiler, self.quote_value) return Statement( sql, table=Table(table, self.quote_name), name=name, columns=columns, condition=self._index_condition_sql(condition), deferrable=self._deferrable_constraint_sql(deferrable), include=self._index_include_sql(model, include), ) def _unique_constraint_name(self, table, columns, quote=True): if quote: def create_unique_name(*args, **kwargs): return self.quote_name(self._create_index_name(*args, **kwargs)) else: create_unique_name = self._create_index_name return IndexName(table, columns, "_uniq", create_unique_name) def _delete_unique_sql( self, model, name, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, ): if ( ( deferrable and not self.connection.features.supports_deferrable_unique_constraints ) or (condition and not self.connection.features.supports_partial_indexes) or (include and not self.connection.features.supports_covering_indexes) or ( expressions and not self.connection.features.supports_expression_indexes ) ): return None if condition or include or opclasses or expressions: sql = self.sql_delete_index else: sql = self.sql_delete_unique return self._delete_constraint_sql(sql, model, name) def _check_sql(self, name, check): return self.sql_constraint % { "name": self.quote_name(name), "constraint": self.sql_check_constraint % {"check": check}, } def _create_check_sql(self, model, name, check): return Statement( self.sql_create_check, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), check=check, ) def _delete_check_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_check, model, name) def _delete_constraint_sql(self, template, model, name): return Statement( template, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), ) def _constraint_names( self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None, type_=None, exclude=None, ): """Return all constraint names matching the columns and conditions.""" if column_names is not None: column_names = [ self.connection.introspection.identifier_converter(name) for name in column_names ] with self.connection.cursor() as cursor: constraints = self.connection.introspection.get_constraints( cursor, model._meta.db_table ) result = [] for name, infodict in constraints.items(): if column_names is None or column_names == infodict["columns"]: if unique is not None and infodict["unique"] != unique: continue if primary_key is not None and infodict["primary_key"] != primary_key: continue if index is not None and infodict["index"] != index: continue if check is not None and infodict["check"] != check: continue if foreign_key is not None and not infodict["foreign_key"]: continue if type_ is not None and infodict["type"] != type_: continue if not exclude or name not in exclude: result.append(name) return result def _delete_primary_key(self, model, strict=False): constraint_names = self._constraint_names(model, primary_key=True) if strict and len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of PK constraints for %s" % ( len(constraint_names), model._meta.db_table, ) ) for constraint_name in constraint_names: self.execute(self._delete_primary_key_sql(model, constraint_name)) def _create_primary_key_sql(self, model, field): return Statement( self.sql_create_pk, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name( self._create_index_name( model._meta.db_table, [field.column], suffix="_pk" ) ), columns=Columns(model._meta.db_table, [field.column], self.quote_name), ) def _delete_primary_key_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_pk, model, name) def _collate_sql(self, collation): return "COLLATE " + self.quote_name(collation) def remove_procedure(self, procedure_name, param_types=()): sql = self.sql_delete_procedure % { "procedure": self.quote_name(procedure_name), "param_types": ",".join(param_types), } self.execute(sql)
be4060a9b7479885864409fd766796aa1dcaca06b21de4fbe46e0744574d2b3b
import operator from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () allows_group_by_pk = True related_fields_match_type = True # MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME. allow_sliced_subqueries_with_in = False has_select_for_update = True supports_forward_references = False supports_regex_backreferencing = False supports_date_lookup_using_string = False supports_timezones = False requires_explicit_null_ordering_when_grouping = True can_release_savepoints = True atomic_transactions = False can_clone_databases = True supports_temporal_subtraction = True supports_select_intersection = False supports_select_difference = False supports_slicing_ordering_in_compound = True supports_index_on_text_field = False supports_update_conflicts = True create_test_procedure_without_params_sql = """ CREATE PROCEDURE test_procedure () BEGIN DECLARE V_I INTEGER; SET V_I = 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE test_procedure (P_I INTEGER) BEGIN DECLARE V_I INTEGER; SET V_I = P_I; END; """ create_test_table_with_composite_primary_key = """ CREATE TABLE test_table_composite_pk ( column_1 INTEGER NOT NULL, column_2 INTEGER NOT NULL, PRIMARY KEY(column_1, column_2) ) """ # Neither MySQL nor MariaDB support partial indexes. supports_partial_indexes = False # COLLATE must be wrapped in parentheses because MySQL treats COLLATE as an # indexed expression. collate_as_index_expression = True supports_order_by_nulls_modifier = False order_by_nulls_first = True supports_logical_xor = True @cached_property def minimum_database_version(self): if self.connection.mysql_is_mariadb: return (10, 4) else: return (8,) @cached_property def test_collations(self): charset = "utf8" if ( self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 6) ) or ( not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 30) ): # utf8 is an alias for utf8mb3 in MariaDB 10.6+ and MySQL 8.0.30+. charset = "utf8mb3" return { "ci": f"{charset}_general_ci", "non_default": f"{charset}_esperanto_ci", "swedish_ci": f"{charset}_swedish_ci", } test_now_utc_template = "UTC_TIMESTAMP(6)" @cached_property def django_test_skips(self): skips = { "This doesn't work on MySQL.": { "db_functions.comparison.test_greatest.GreatestTests." "test_coalesce_workaround", "db_functions.comparison.test_least.LeastTests." "test_coalesce_workaround", }, "Running on MySQL requires utf8mb4 encoding (#18392).": { "model_fields.test_textfield.TextFieldTests.test_emoji", "model_fields.test_charfield.TestCharField.test_emoji", }, "MySQL doesn't support functional indexes on a function that " "returns JSON": { "schema.tests.SchemaTests.test_func_index_json_key_transform", }, "MySQL supports multiplying and dividing DurationFields by a " "scalar value but it's not implemented (#25287).": { "expressions.tests.FTimeDeltaTests.test_durationfield_multiply_divide", }, "UPDATE ... ORDER BY syntax on MySQL/MariaDB does not support ordering by" "related fields.": { "update.tests.AdvancedTests." "test_update_ordered_by_inline_m2m_annotation", "update.tests.AdvancedTests.test_update_ordered_by_m2m_annotation", }, } if "ONLY_FULL_GROUP_BY" in self.connection.sql_mode: skips.update( { "GROUP BY optimization does not work properly when " "ONLY_FULL_GROUP_BY mode is enabled on MySQL, see #31331.": { "aggregation.tests.AggregateTestCase." "test_aggregation_subquery_annotation_multivalued", "annotations.tests.NonAggregateAnnotationTestCase." "test_annotation_aggregate_with_m2o", }, } ) if self.connection.mysql_is_mariadb and ( 10, 4, 3, ) < self.connection.mysql_version < (10, 5, 2): skips.update( { "https://jira.mariadb.org/browse/MDEV-19598": { "schema.tests.SchemaTests." "test_alter_not_unique_field_to_primary_key", }, } ) if self.connection.mysql_is_mariadb and ( 10, 4, 12, ) < self.connection.mysql_version < (10, 5): skips.update( { "https://jira.mariadb.org/browse/MDEV-22775": { "schema.tests.SchemaTests." "test_alter_pk_with_self_referential_field", }, } ) if not self.supports_explain_analyze: skips.update( { "MariaDB and MySQL >= 8.0.18 specific.": { "queries.test_explain.ExplainTests.test_mysql_analyze", }, } ) return skips @cached_property def _mysql_storage_engine(self): "Internal method used in Django tests. Don't rely on this from your code" return self.connection.mysql_server_data["default_storage_engine"] @cached_property def allows_auto_pk_0(self): """ Autoincrement primary key can be set to 0 if it doesn't generate new autoincrement values. """ return "NO_AUTO_VALUE_ON_ZERO" in self.connection.sql_mode @cached_property def update_can_self_select(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= ( 10, 3, 2, ) @cached_property def can_introspect_foreign_keys(self): "Confirm support for introspected foreign keys" return self._mysql_storage_engine != "MyISAM" @cached_property def introspected_field_types(self): return { **super().introspected_field_types, "BinaryField": "TextField", "BooleanField": "IntegerField", "DurationField": "BigIntegerField", "GenericIPAddressField": "CharField", } @cached_property def can_return_columns_from_insert(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= ( 10, 5, 0, ) can_return_rows_from_bulk_insert = property( operator.attrgetter("can_return_columns_from_insert") ) @cached_property def has_zoneinfo_database(self): return self.connection.mysql_server_data["has_zoneinfo_database"] @cached_property def is_sql_auto_is_null_enabled(self): return self.connection.mysql_server_data["sql_auto_is_null"] @cached_property def supports_over_clause(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 2) supports_frame_range_fixed_distance = property( operator.attrgetter("supports_over_clause") ) @cached_property def supports_column_check_constraints(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 16) supports_table_check_constraints = property( operator.attrgetter("supports_column_check_constraints") ) @cached_property def can_introspect_check_constraints(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 16) @cached_property def has_select_for_update_skip_locked(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 6) return self.connection.mysql_version >= (8, 0, 1) @cached_property def has_select_for_update_nowait(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 1) @cached_property def has_select_for_update_of(self): return ( not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1) ) @cached_property def supports_explain_analyze(self): return self.connection.mysql_is_mariadb or self.connection.mysql_version >= ( 8, 0, 18, ) @cached_property def supported_explain_formats(self): # Alias MySQL's TRADITIONAL to TEXT for consistency with other # backends. formats = {"JSON", "TEXT", "TRADITIONAL"} if not self.connection.mysql_is_mariadb and self.connection.mysql_version >= ( 8, 0, 16, ): formats.add("TREE") return formats @cached_property def supports_transactions(self): """ All storage engines except MyISAM support transactions. """ return self._mysql_storage_engine != "MyISAM" uses_savepoints = property(operator.attrgetter("supports_transactions")) can_release_savepoints = property(operator.attrgetter("supports_transactions")) @cached_property def ignores_table_name_case(self): return self.connection.mysql_server_data["lower_case_table_names"] @cached_property def supports_default_in_lead_lag(self): # To be added in https://jira.mariadb.org/browse/MDEV-12981. return not self.connection.mysql_is_mariadb @cached_property def can_introspect_json_field(self): if self.connection.mysql_is_mariadb: return self.can_introspect_check_constraints return True @cached_property def supports_index_column_ordering(self): if self._mysql_storage_engine != "InnoDB": return False if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 8) return self.connection.mysql_version >= (8, 0, 1) @cached_property def supports_expression_indexes(self): return ( not self.connection.mysql_is_mariadb and self._mysql_storage_engine != "MyISAM" and self.connection.mysql_version >= (8, 0, 13) ) @cached_property def can_rename_index(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 5, 2) return True
8554d341f589af170f26c6d97611feec57ca048dfd49e70fa7cfe24a3d68a867
from django.core.exceptions import FieldError from django.db.models.expressions import Col from django.db.models.sql import compiler class SQLCompiler(compiler.SQLCompiler): def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name sql, params = self.as_sql() return ( "(%s) IN (%s)" % ( ", ".join("%s.%s" % (qn(alias), qn2(column)) for column in columns), sql, ), params, ) class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler): pass class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler): def as_sql(self): # Prefer the non-standard DELETE FROM syntax over the SQL generated by # the SQLDeleteCompiler's default implementation when multiple tables # are involved since MySQL/MariaDB will generate a more efficient query # plan than when using a subquery. where, having, qualify = self.query.where.split_having_qualify( must_group_by=self.query.group_by is not None ) if self.single_alias or having or qualify: # DELETE FROM cannot be used when filtering against aggregates or # window functions as it doesn't allow for GROUP BY/HAVING clauses # and the subquery wrapping (necessary to emulate QUALIFY). return super().as_sql() result = [ "DELETE %s FROM" % self.quote_name_unless_alias(self.query.get_initial_alias()) ] from_sql, from_params = self.get_from_clause() result.extend(from_sql) where_sql, where_params = self.compile(where) if where_sql: result.append("WHERE %s" % where_sql) return " ".join(result), tuple(from_params) + tuple(where_params) class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler): def as_sql(self): update_query, update_params = super().as_sql() # MySQL and MariaDB support UPDATE ... ORDER BY syntax. if self.query.order_by: order_by_sql = [] order_by_params = [] db_table = self.query.get_meta().db_table try: for resolved, (sql, params, _) in self.get_order_by(): if ( isinstance(resolved.expression, Col) and resolved.expression.alias != db_table ): # Ignore ordering if it contains joined fields, because # they cannot be used in the ORDER BY clause. raise FieldError order_by_sql.append(sql) order_by_params.extend(params) update_query += " ORDER BY " + ", ".join(order_by_sql) update_params += tuple(order_by_params) except FieldError: # Ignore ordering if it contains annotations, because they're # removed in .update() and cannot be resolved. pass return update_query, update_params class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler): pass
3ae7e49b33095dfdc965238af9537db8778cbc103a52bb6ee7ff6ddddce2438d
from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.models import NOT_PROVIDED, F, UniqueConstraint from django.db.models.constants import LOOKUP_SEP class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s" sql_alter_column_null = "MODIFY %(column)s %(type)s NULL" sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL" sql_alter_column_type = "MODIFY %(column)s %(type)s" sql_alter_column_collate = "MODIFY %(column)s %(type)s%(collation)s" sql_alter_column_no_default_null = "ALTER COLUMN %(column)s SET DEFAULT NULL" # No 'CASCADE' which works as a no-op in MySQL but is undocumented sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s" sql_create_column_inline_fk = ( ", ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) " "REFERENCES %(to_table)s(%(to_column)s)" ) sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s" sql_delete_index = "DROP INDEX %(name)s ON %(table)s" sql_rename_index = "ALTER TABLE %(table)s RENAME INDEX %(old_name)s TO %(new_name)s" sql_create_pk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" ) sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY" sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s" @property def sql_delete_check(self): if self.connection.mysql_is_mariadb: # The name of the column check constraint is the same as the field # name on MariaDB. Adding IF EXISTS clause prevents migrations # crash. Constraint is removed during a "MODIFY" column statement. return "ALTER TABLE %(table)s DROP CONSTRAINT IF EXISTS %(name)s" return "ALTER TABLE %(table)s DROP CHECK %(name)s" @property def sql_rename_column(self): # MariaDB >= 10.5.2 and MySQL >= 8.0.4 support an # "ALTER TABLE ... RENAME COLUMN" statement. if self.connection.mysql_is_mariadb: if self.connection.mysql_version >= (10, 5, 2): return super().sql_rename_column elif self.connection.mysql_version >= (8, 0, 4): return super().sql_rename_column return "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s" def quote_value(self, value): self.connection.ensure_connection() if isinstance(value, str): value = value.replace("%", "%%") # MySQLdb escapes to string, PyMySQL to bytes. quoted = self.connection.connection.escape( value, self.connection.connection.encoders ) if isinstance(value, str) and isinstance(quoted, bytes): quoted = quoted.decode() return quoted def _is_limited_data_type(self, field): db_type = field.db_type(self.connection) return ( db_type is not None and db_type.lower() in self.connection._limited_data_types ) def skip_default(self, field): if not self._supports_limited_data_type_defaults: return self._is_limited_data_type(field) return False def skip_default_on_alter(self, field): if self._is_limited_data_type(field) and not self.connection.mysql_is_mariadb: # MySQL doesn't support defaults for BLOB and TEXT in the # ALTER COLUMN statement. return True return False @property def _supports_limited_data_type_defaults(self): # MariaDB and MySQL >= 8.0.13 support defaults for BLOB and TEXT. if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 13) def _column_default_sql(self, field): if ( not self.connection.mysql_is_mariadb and self._supports_limited_data_type_defaults and self._is_limited_data_type(field) ): # MySQL supports defaults for BLOB and TEXT columns only if the # default value is written as an expression i.e. in parentheses. return "(%s)" return super()._column_default_sql(field) def add_field(self, model, field): super().add_field(model, field) # Simulate the effect of a one-off default. # field.default may be unhashable, so a set isn't used for "in" check. if self.skip_default(field) and field.default not in (None, NOT_PROVIDED): effective_default = self.effective_default(field) self.execute( "UPDATE %(table)s SET %(column)s = %%s" % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), }, [effective_default], ) def remove_constraint(self, model, constraint): if isinstance(constraint, UniqueConstraint): self._create_missing_fk_index( model, fields=constraint.fields, expressions=constraint.expressions, ) super().remove_constraint(model, constraint) def remove_index(self, model, index): self._create_missing_fk_index( model, fields=[field_name for field_name, _ in index.fields_orders], expressions=index.expressions, ) super().remove_index(model, index) def _field_should_be_indexed(self, model, field): if not super()._field_should_be_indexed(model, field): return False storage = self.connection.introspection.get_storage_engine( self.connection.cursor(), model._meta.db_table ) # No need to create an index for ForeignKey fields except if # db_constraint=False because the index from that constraint won't be # created. if ( storage == "InnoDB" and field.get_internal_type() == "ForeignKey" and field.db_constraint ): return False return not self._is_limited_data_type(field) def _create_missing_fk_index( self, model, *, fields, expressions=None, ): """ MySQL can remove an implicit FK index on a field when that field is covered by another index like a unique_together. "covered" here means that the more complex index has the FK field as its first field (see https://bugs.mysql.com/bug.php?id=37910). Manually create an implicit FK index to make it possible to remove the composed index. """ first_field_name = None if fields: first_field_name = fields[0] elif ( expressions and self.connection.features.supports_expression_indexes and isinstance(expressions[0], F) and LOOKUP_SEP not in expressions[0].name ): first_field_name = expressions[0].name if not first_field_name: return first_field = model._meta.get_field(first_field_name) if first_field.get_internal_type() == "ForeignKey": column = self.connection.introspection.identifier_converter( first_field.column ) with self.connection.cursor() as cursor: constraint_names = [ name for name, infodict in self.connection.introspection.get_constraints( cursor, model._meta.db_table ).items() if infodict["index"] and infodict["columns"][0] == column ] # There are no other indexes that starts with the FK field, only # the index that is expected to be deleted. if len(constraint_names) == 1: self.execute( self._create_index_sql(model, fields=[first_field], suffix="") ) def _delete_composed_index(self, model, fields, *args): self._create_missing_fk_index(model, fields=fields) return super()._delete_composed_index(model, fields, *args) def _set_field_new_type_null_status(self, field, new_type): """ Keep the null property of the old field. If it has changed, it will be handled separately. """ if field.null: new_type += " NULL" else: new_type += " NOT NULL" return new_type def _alter_column_type_sql(self, model, old_field, new_field, new_type): new_type = self._set_field_new_type_null_status(old_field, new_type) return super()._alter_column_type_sql(model, old_field, new_field, new_type) def _rename_field_sql(self, table, old_field, new_field, new_type): new_type = self._set_field_new_type_null_status(old_field, new_type) return super()._rename_field_sql(table, old_field, new_field, new_type)
a2f7cdd895b5c7fe24fd39955c40ffe81a4c4e8607c0f72facb2c1376ac5bc2b
from psycopg2.extras import Inet from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations from django.db.backends.utils import split_tzname_delta from django.db.models.constants import OnConflict class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = "varchar" explain_prefix = "EXPLAIN" explain_options = frozenset( [ "ANALYZE", "BUFFERS", "COSTS", "SETTINGS", "SUMMARY", "TIMING", "VERBOSE", "WAL", ] ) cast_data_types = { "AutoField": "integer", "BigAutoField": "bigint", "SmallAutoField": "smallint", } def unification_cast_sql(self, output_field): internal_type = output_field.get_internal_type() if internal_type in ( "GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField", ): # PostgreSQL will resolve a union as type 'text' if input types are # 'unknown'. # https://www.postgresql.org/docs/current/typeconv-union-case.html # These fields cannot be implicitly cast back in the default # PostgreSQL configuration so we need to explicitly cast them. # We must also remove components of the type within brackets: # varchar(255) -> varchar. return ( "CAST(%%s AS %s)" % output_field.db_type(self.connection).split("(")[0] ) return "%s" def date_extract_sql(self, lookup_type, sql, params): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT extract_sql = f"EXTRACT(%s FROM {sql})" extract_param = lookup_type if lookup_type == "week_day": # For consistency across backends, we return Sunday=1, Saturday=7. extract_sql = f"EXTRACT(%s FROM {sql}) + 1" extract_param = "dow" elif lookup_type == "iso_week_day": extract_param = "isodow" elif lookup_type == "iso_year": extract_param = "isoyear" return extract_sql, (extract_param, *params) def date_trunc_sql(self, lookup_type, sql, params, tzname=None): sql, params = self._convert_sql_to_tz(sql, params, tzname) # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return f"DATE_TRUNC(%s, {sql})", (lookup_type, *params) def _prepare_tzname_delta(self, tzname): tzname, sign, offset = split_tzname_delta(tzname) if offset: sign = "-" if sign == "+" else "+" return f"{tzname}{sign}{offset}" return tzname def _convert_sql_to_tz(self, sql, params, tzname): if tzname and settings.USE_TZ: tzname_param = self._prepare_tzname_delta(tzname) return f"{sql} AT TIME ZONE %s", (*params, tzname_param) return sql, params def datetime_cast_date_sql(self, sql, params, tzname): sql, params = self._convert_sql_to_tz(sql, params, tzname) return f"({sql})::date", params def datetime_cast_time_sql(self, sql, params, tzname): sql, params = self._convert_sql_to_tz(sql, params, tzname) return f"({sql})::time", params def datetime_extract_sql(self, lookup_type, sql, params, tzname): sql, params = self._convert_sql_to_tz(sql, params, tzname) if lookup_type == "second": # Truncate fractional seconds. return ( f"EXTRACT(%s FROM DATE_TRUNC(%s, {sql}))", ("second", "second", *params), ) return self.date_extract_sql(lookup_type, sql, params) def datetime_trunc_sql(self, lookup_type, sql, params, tzname): sql, params = self._convert_sql_to_tz(sql, params, tzname) # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return f"DATE_TRUNC(%s, {sql})", (lookup_type, *params) def time_extract_sql(self, lookup_type, sql, params): if lookup_type == "second": # Truncate fractional seconds. return ( f"EXTRACT(%s FROM DATE_TRUNC(%s, {sql}))", ("second", "second", *params), ) return self.date_extract_sql(lookup_type, sql, params) def time_trunc_sql(self, lookup_type, sql, params, tzname=None): sql, params = self._convert_sql_to_tz(sql, params, tzname) return f"DATE_TRUNC(%s, {sql})::time", (lookup_type, *params) def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_rows(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the tuple of returned data. """ return cursor.fetchall() def lookup_cast(self, lookup_type, internal_type=None): lookup = "%s" # Cast text lookups to text to allow things like filter(x__contains=4) if lookup_type in ( "iexact", "contains", "icontains", "startswith", "istartswith", "endswith", "iendswith", "regex", "iregex", ): if internal_type in ("IPAddressField", "GenericIPAddressField"): lookup = "HOST(%s)" # RemovedInDjango51Warning. elif internal_type in ("CICharField", "CIEmailField", "CITextField"): lookup = "%s::citext" else: lookup = "%s::text" # Use UPPER(x) for case-insensitive lookups; it's faster. if lookup_type in ("iexact", "icontains", "istartswith", "iendswith"): lookup = "UPPER(%s)" % lookup return lookup def no_limit_value(self): return None def prepare_sql_script(self, sql): return [sql] def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def set_time_zone_sql(self): return "SET TIME ZONE %s" def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): if not tables: return [] # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows us # to truncate tables referenced by a foreign key in any other table. sql_parts = [ style.SQL_KEYWORD("TRUNCATE"), ", ".join(style.SQL_FIELD(self.quote_name(table)) for table in tables), ] if reset_sequences: sql_parts.append(style.SQL_KEYWORD("RESTART IDENTITY")) if allow_cascade: sql_parts.append(style.SQL_KEYWORD("CASCADE")) return ["%s;" % " ".join(sql_parts)] def sequence_reset_by_name_sql(self, style, sequences): # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements # to reset sequence indices sql = [] for sequence_info in sequences: table_name = sequence_info["table"] # 'id' will be the case if it's an m2m using an autogenerated # intermediate table (see BaseDatabaseIntrospection.sequence_list). column_name = sequence_info["column"] or "id" sql.append( "%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % ( style.SQL_KEYWORD("SELECT"), style.SQL_TABLE(self.quote_name(table_name)), style.SQL_FIELD(column_name), ) ) return sql def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def sequence_reset_sql(self, style, model_list): from django.db import models output = [] qn = self.quote_name for model in model_list: # Use `coalesce` to set the sequence for each model to the max pk # value if there are records, or 1 if there are none. Set the # `is_called` property (the third argument to `setval`) to true if # there are records (as the max pk value is already in use), # otherwise set it to false. Use pg_get_serial_sequence to get the # underlying sequence name from the table name and column name. for f in model._meta.local_fields: if isinstance(f, models.AutoField): output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD("SELECT"), style.SQL_TABLE(qn(model._meta.db_table)), style.SQL_FIELD(f.column), style.SQL_FIELD(qn(f.column)), style.SQL_FIELD(qn(f.column)), style.SQL_KEYWORD("IS NOT"), style.SQL_KEYWORD("FROM"), style.SQL_TABLE(qn(model._meta.db_table)), ) ) # Only one AutoField is allowed per model, so don't bother # continuing. break return output def prep_for_iexact_query(self, x): return x def max_name_length(self): """ Return the maximum length of an identifier. The maximum length of an identifier is 63 by default, but can be changed by recompiling PostgreSQL after editing the NAMEDATALEN macro in src/include/pg_config_manual.h. This implementation returns 63, but can be overridden by a custom database backend that inherits most of its behavior from this one. """ return 63 def distinct_sql(self, fields, params): if fields: params = [param for param_list in params for param in param_list] return (["DISTINCT ON (%s)" % ", ".join(fields)], params) else: return ["DISTINCT"], [] def last_executed_query(self, cursor, sql, params): # https://www.psycopg.org/docs/cursor.html#cursor.query # The query attribute is a Psycopg extension to the DB API 2.0. if cursor.query is not None: return cursor.query.decode() return None def return_insert_columns(self, fields): if not fields: return "", () columns = [ "%s.%s" % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), ) for field in fields ] return "RETURNING %s" % ", ".join(columns), () def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def adapt_datefield_value(self, value): return value def adapt_datetimefield_value(self, value): return value def adapt_timefield_value(self, value): return value def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): return value def adapt_ipaddressfield_value(self, value): if value: return Inet(value) return None def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == "DateField": lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), params return super().subtract_temporals(internal_type, lhs, rhs) def explain_query_prefix(self, format=None, **options): extra = {} # Normalize options. if options: options = { name.upper(): "true" if value else "false" for name, value in options.items() } for valid_option in self.explain_options: value = options.pop(valid_option, None) if value is not None: extra[valid_option] = value prefix = super().explain_query_prefix(format, **options) if format: extra["FORMAT"] = format if extra: prefix += " (%s)" % ", ".join("%s %s" % i for i in extra.items()) return prefix def on_conflict_suffix_sql(self, fields, on_conflict, update_fields, unique_fields): if on_conflict == OnConflict.IGNORE: return "ON CONFLICT DO NOTHING" if on_conflict == OnConflict.UPDATE: return "ON CONFLICT(%s) DO UPDATE SET %s" % ( ", ".join(map(self.quote_name, unique_fields)), ", ".join( [ f"{field} = EXCLUDED.{field}" for field in map(self.quote_name, update_fields) ] ), ) return super().on_conflict_suffix_sql( fields, on_conflict, update_fields, unique_fields, )
26452cdb1727afd7b229f7340081746d4c2b2682925a586fa5e0b7ae337b850c
import psycopg2 from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.backends.ddl_references import IndexColumns from django.db.backends.utils import strip_quotes class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): # Setting all constraints to IMMEDIATE to allow changing data in the same # transaction. sql_update_with_default = ( "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL" "; SET CONSTRAINTS ALL IMMEDIATE" ) sql_alter_sequence_type = "ALTER SEQUENCE IF EXISTS %(sequence)s AS %(type)s" sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE" sql_create_index = ( "CREATE INDEX %(name)s ON %(table)s%(using)s " "(%(columns)s)%(include)s%(extra)s%(condition)s" ) sql_create_index_concurrently = ( "CREATE INDEX CONCURRENTLY %(name)s ON %(table)s%(using)s " "(%(columns)s)%(include)s%(extra)s%(condition)s" ) sql_delete_index = "DROP INDEX IF EXISTS %(name)s" sql_delete_index_concurrently = "DROP INDEX CONCURRENTLY IF EXISTS %(name)s" # Setting the constraint to IMMEDIATE to allow changing data in the same # transaction. sql_create_column_inline_fk = ( "CONSTRAINT %(name)s REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s" "; SET CONSTRAINTS %(namespace)s%(name)s IMMEDIATE" ) # Setting the constraint to IMMEDIATE runs any deferred checks to allow # dropping it in the same transaction. sql_delete_fk = ( "SET CONSTRAINTS %(name)s IMMEDIATE; " "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" ) sql_delete_procedure = "DROP FUNCTION %(procedure)s(%(param_types)s)" sql_add_identity = ( "ALTER TABLE %(table)s ALTER COLUMN %(column)s ADD " "GENERATED BY DEFAULT AS IDENTITY" ) sql_drop_indentity = ( "ALTER TABLE %(table)s ALTER COLUMN %(column)s DROP IDENTITY IF EXISTS" ) def quote_value(self, value): if isinstance(value, str): value = value.replace("%", "%%") adapted = psycopg2.extensions.adapt(value) if hasattr(adapted, "encoding"): adapted.encoding = "utf8" # getquoted() returns a quoted bytestring of the adapted value. return adapted.getquoted().decode() def _field_indexes_sql(self, model, field): output = super()._field_indexes_sql(model, field) like_index_statement = self._create_like_index_sql(model, field) if like_index_statement is not None: output.append(like_index_statement) return output def _field_data_type(self, field): if field.is_relation: return field.rel_db_type(self.connection) return self.connection.data_types.get( field.get_internal_type(), field.db_type(self.connection), ) def _field_base_data_types(self, field): # Yield base data types for array fields. if field.base_field.get_internal_type() == "ArrayField": yield from self._field_base_data_types(field.base_field) else: yield self._field_data_type(field.base_field) def _create_like_index_sql(self, model, field): """ Return the statement to create an index with varchar operator pattern when the column type is 'varchar' or 'text', otherwise return None. """ db_type = field.db_type(connection=self.connection) if db_type is not None and (field.db_index or field.unique): # Fields with database column types of `varchar` and `text` need # a second index that specifies their operator class, which is # needed when performing correct LIKE queries outside the # C locale. See #12234. # # The same doesn't apply to array fields such as varchar[size] # and text[size], so skip them. if "[" in db_type: return None # Non-deterministic collations on Postgresql don't support indexes # for operator classes varchar_pattern_ops/text_pattern_ops. if getattr(field, "db_collation", None): return None if db_type.startswith("varchar"): return self._create_index_sql( model, fields=[field], suffix="_like", opclasses=["varchar_pattern_ops"], ) elif db_type.startswith("text"): return self._create_index_sql( model, fields=[field], suffix="_like", opclasses=["text_pattern_ops"], ) return None def _using_sql(self, new_field, old_field): using_sql = " USING %(column)s::%(type)s" new_internal_type = new_field.get_internal_type() old_internal_type = old_field.get_internal_type() if new_internal_type == "ArrayField" and new_internal_type == old_internal_type: # Compare base data types for array fields. if list(self._field_base_data_types(old_field)) != list( self._field_base_data_types(new_field) ): return using_sql elif self._field_data_type(old_field) != self._field_data_type(new_field): return using_sql return "" def _get_sequence_name(self, table, column): with self.connection.cursor() as cursor: for sequence in self.connection.introspection.get_sequences(cursor, table): if sequence["column"] == column: return sequence["name"] return None def _alter_column_type_sql(self, model, old_field, new_field, new_type): # Drop indexes on varchar/text/citext columns that are changing to a # different type. old_db_params = old_field.db_parameters(connection=self.connection) old_type = old_db_params["type"] if (old_field.db_index or old_field.unique) and ( (old_type.startswith("varchar") and not new_type.startswith("varchar")) or (old_type.startswith("text") and not new_type.startswith("text")) or (old_type.startswith("citext") and not new_type.startswith("citext")) ): index_name = self._create_index_name( model._meta.db_table, [old_field.column], suffix="_like" ) self.execute(self._delete_index_sql(model, index_name)) self.sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s" # Cast when data type changed. if using_sql := self._using_sql(new_field, old_field): self.sql_alter_column_type += using_sql new_internal_type = new_field.get_internal_type() old_internal_type = old_field.get_internal_type() # Make ALTER TYPE with IDENTITY make sense. table = strip_quotes(model._meta.db_table) auto_field_types = { "AutoField", "BigAutoField", "SmallAutoField", } old_is_auto = old_internal_type in auto_field_types new_is_auto = new_internal_type in auto_field_types if new_is_auto and not old_is_auto: column = strip_quotes(new_field.column) return ( ( self.sql_alter_column_type % { "column": self.quote_name(column), "type": new_type, }, [], ), [ ( self.sql_add_identity % { "table": self.quote_name(table), "column": self.quote_name(column), }, [], ), ], ) elif old_is_auto and not new_is_auto: # Drop IDENTITY if exists (pre-Django 4.1 serial columns don't have # it). self.execute( self.sql_drop_indentity % { "table": self.quote_name(table), "column": self.quote_name(strip_quotes(new_field.column)), } ) column = strip_quotes(new_field.column) fragment, _ = super()._alter_column_type_sql( model, old_field, new_field, new_type ) # Drop the sequence if exists (Django 4.1+ identity columns don't # have it). other_actions = [] if sequence_name := self._get_sequence_name(table, column): other_actions = [ ( self.sql_delete_sequence % { "sequence": self.quote_name(sequence_name), }, [], ) ] return fragment, other_actions elif new_is_auto and old_is_auto and old_internal_type != new_internal_type: fragment, _ = super()._alter_column_type_sql( model, old_field, new_field, new_type ) column = strip_quotes(new_field.column) db_types = { "AutoField": "integer", "BigAutoField": "bigint", "SmallAutoField": "smallint", } # Alter the sequence type if exists (Django 4.1+ identity columns # don't have it). other_actions = [] if sequence_name := self._get_sequence_name(table, column): other_actions = [ ( self.sql_alter_sequence_type % { "sequence": self.quote_name(sequence_name), "type": db_types[new_internal_type], }, [], ), ] return fragment, other_actions else: return super()._alter_column_type_sql(model, old_field, new_field, new_type) def _alter_column_collation_sql( self, model, new_field, new_type, new_collation, old_field ): sql = self.sql_alter_column_collate # Cast when data type changed. if using_sql := self._using_sql(new_field, old_field): sql += using_sql return ( sql % { "column": self.quote_name(new_field.column), "type": new_type, "collation": " " + self._collate_sql(new_collation) if new_collation else "", }, [], ) def _alter_field( self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False, ): super()._alter_field( model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict, ) # Added an index? Create any PostgreSQL-specific indexes. if (not (old_field.db_index or old_field.unique) and new_field.db_index) or ( not old_field.unique and new_field.unique ): like_index_statement = self._create_like_index_sql(model, new_field) if like_index_statement is not None: self.execute(like_index_statement) # Removed an index? Drop any PostgreSQL-specific indexes. if old_field.unique and not (new_field.db_index or new_field.unique): index_to_remove = self._create_index_name( model._meta.db_table, [old_field.column], suffix="_like" ) self.execute(self._delete_index_sql(model, index_to_remove)) def _index_columns(self, table, columns, col_suffixes, opclasses): if opclasses: return IndexColumns( table, columns, self.quote_name, col_suffixes=col_suffixes, opclasses=opclasses, ) return super()._index_columns(table, columns, col_suffixes, opclasses) def add_index(self, model, index, concurrently=False): self.execute( index.create_sql(model, self, concurrently=concurrently), params=None ) def remove_index(self, model, index, concurrently=False): self.execute(index.remove_sql(model, self, concurrently=concurrently)) def _delete_index_sql(self, model, name, sql=None, concurrently=False): sql = ( self.sql_delete_index_concurrently if concurrently else self.sql_delete_index ) return super()._delete_index_sql(model, name, sql) def _create_index_sql( self, model, *, fields=None, name=None, suffix="", using="", db_tablespace=None, col_suffixes=(), sql=None, opclasses=(), condition=None, concurrently=False, include=None, expressions=None, ): sql = ( self.sql_create_index if not concurrently else self.sql_create_index_concurrently ) return super()._create_index_sql( model, fields=fields, name=name, suffix=suffix, using=using, db_tablespace=db_tablespace, col_suffixes=col_suffixes, sql=sql, opclasses=opclasses, condition=condition, include=include, expressions=expressions, )
44bb18caa4e54a5672101e12234b744ec4c8b4370ddde9f226822800aaea4c4f
import signal from django.db.backends.base.client import BaseDatabaseClient class DatabaseClient(BaseDatabaseClient): executable_name = "psql" @classmethod def settings_to_cmd_args_env(cls, settings_dict, parameters): args = [cls.executable_name] options = settings_dict.get("OPTIONS", {}) host = settings_dict.get("HOST") port = settings_dict.get("PORT") dbname = settings_dict.get("NAME") user = settings_dict.get("USER") passwd = settings_dict.get("PASSWORD") passfile = options.get("passfile") service = options.get("service") sslmode = options.get("sslmode") sslrootcert = options.get("sslrootcert") sslcert = options.get("sslcert") sslkey = options.get("sslkey") if not dbname and not service: # Connect to the default 'postgres' db. dbname = "postgres" if user: args += ["-U", user] if host: args += ["-h", host] if port: args += ["-p", str(port)] args.extend(parameters) if dbname: args += [dbname] env = {} if passwd: env["PGPASSWORD"] = str(passwd) if service: env["PGSERVICE"] = str(service) if sslmode: env["PGSSLMODE"] = str(sslmode) if sslrootcert: env["PGSSLROOTCERT"] = str(sslrootcert) if sslcert: env["PGSSLCERT"] = str(sslcert) if sslkey: env["PGSSLKEY"] = str(sslkey) if passfile: env["PGPASSFILE"] = str(passfile) return args, (env or None) def runshell(self, parameters): sigint_handler = signal.getsignal(signal.SIGINT) try: # Allow SIGINT to pass to psql to abort queries. signal.signal(signal.SIGINT, signal.SIG_IGN) super().runshell(parameters) finally: # Restore the original SIGINT handler. signal.signal(signal.SIGINT, sigint_handler)
b0d2e06a5ac4420faaa53cb3a792e08249467c2b891cb2b52d134ed88e562568
""" SQLite backend for the sqlite3 module in the standard library. """ import datetime import decimal import warnings from itertools import chain from sqlite3 import dbapi2 as Database from django.core.exceptions import ImproperlyConfigured from django.db import IntegrityError from django.db.backends.base.base import BaseDatabaseWrapper from django.utils.asyncio import async_unsafe from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.regex_helper import _lazy_re_compile from ._functions import register as register_functions from .client import DatabaseClient from .creation import DatabaseCreation from .features import DatabaseFeatures from .introspection import DatabaseIntrospection from .operations import DatabaseOperations from .schema import DatabaseSchemaEditor def decoder(conv_func): """ Convert bytestrings from Python's sqlite3 interface to a regular string. """ return lambda s: conv_func(s.decode()) def adapt_date(val): return val.isoformat() def adapt_datetime(val): return val.isoformat(" ") Database.register_converter("bool", b"1".__eq__) Database.register_converter("date", decoder(parse_date)) Database.register_converter("time", decoder(parse_time)) Database.register_converter("datetime", decoder(parse_datetime)) Database.register_converter("timestamp", decoder(parse_datetime)) Database.register_adapter(decimal.Decimal, str) Database.register_adapter(datetime.date, adapt_date) Database.register_adapter(datetime.datetime, adapt_datetime) class DatabaseWrapper(BaseDatabaseWrapper): vendor = "sqlite" display_name = "SQLite" # SQLite doesn't actually support most of these types, but it "does the right # thing" given more verbose field definitions, so leave them as is so that # schema inspection is more useful. data_types = { "AutoField": "integer", "BigAutoField": "integer", "BinaryField": "BLOB", "BooleanField": "bool", "CharField": "varchar(%(max_length)s)", "DateField": "date", "DateTimeField": "datetime", "DecimalField": "decimal", "DurationField": "bigint", "FileField": "varchar(%(max_length)s)", "FilePathField": "varchar(%(max_length)s)", "FloatField": "real", "IntegerField": "integer", "BigIntegerField": "bigint", "IPAddressField": "char(15)", "GenericIPAddressField": "char(39)", "JSONField": "text", "OneToOneField": "integer", "PositiveBigIntegerField": "bigint unsigned", "PositiveIntegerField": "integer unsigned", "PositiveSmallIntegerField": "smallint unsigned", "SlugField": "varchar(%(max_length)s)", "SmallAutoField": "integer", "SmallIntegerField": "smallint", "TextField": "text", "TimeField": "time", "UUIDField": "char(32)", } data_type_check_constraints = { "PositiveBigIntegerField": '"%(column)s" >= 0', "JSONField": '(JSON_VALID("%(column)s") OR "%(column)s" IS NULL)', "PositiveIntegerField": '"%(column)s" >= 0', "PositiveSmallIntegerField": '"%(column)s" >= 0', } data_types_suffix = { "AutoField": "AUTOINCREMENT", "BigAutoField": "AUTOINCREMENT", "SmallAutoField": "AUTOINCREMENT", } # SQLite requires LIKE statements to include an ESCAPE clause if the value # being escaped has a percent or underscore in it. # See https://www.sqlite.org/lang_expr.html for an explanation. operators = { "exact": "= %s", "iexact": "LIKE %s ESCAPE '\\'", "contains": "LIKE %s ESCAPE '\\'", "icontains": "LIKE %s ESCAPE '\\'", "regex": "REGEXP %s", "iregex": "REGEXP '(?i)' || %s", "gt": "> %s", "gte": ">= %s", "lt": "< %s", "lte": "<= %s", "startswith": "LIKE %s ESCAPE '\\'", "endswith": "LIKE %s ESCAPE '\\'", "istartswith": "LIKE %s ESCAPE '\\'", "iendswith": "LIKE %s ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { "contains": r"LIKE '%%' || {} || '%%' ESCAPE '\'", "icontains": r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'", "startswith": r"LIKE {} || '%%' ESCAPE '\'", "istartswith": r"LIKE UPPER({}) || '%%' ESCAPE '\'", "endswith": r"LIKE '%%' || {} ESCAPE '\'", "iendswith": r"LIKE '%%' || UPPER({}) ESCAPE '\'", } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations def get_connection_params(self): settings_dict = self.settings_dict if not settings_dict["NAME"]: raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value." ) kwargs = { "database": settings_dict["NAME"], "detect_types": Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES, **settings_dict["OPTIONS"], } # Always allow the underlying SQLite connection to be shareable # between multiple threads. The safe-guarding will be handled at a # higher level by the `BaseDatabaseWrapper.allow_thread_sharing` # property. This is necessary as the shareability is disabled by # default in pysqlite and it cannot be changed once a connection is # opened. if "check_same_thread" in kwargs and kwargs["check_same_thread"]: warnings.warn( "The `check_same_thread` option was provided and set to " "True. It will be overridden with False. Use the " "`DatabaseWrapper.allow_thread_sharing` property instead " "for controlling thread shareability.", RuntimeWarning, ) kwargs.update({"check_same_thread": False, "uri": True}) return kwargs def get_database_version(self): return self.Database.sqlite_version_info @async_unsafe def get_new_connection(self, conn_params): conn = Database.connect(**conn_params) register_functions(conn) conn.execute("PRAGMA foreign_keys = ON") # The macOS bundled SQLite defaults legacy_alter_table ON, which # prevents atomic table renames (feature supports_atomic_references_rename) conn.execute("PRAGMA legacy_alter_table = OFF") return conn def create_cursor(self, name=None): return self.connection.cursor(factory=SQLiteCursorWrapper) @async_unsafe def close(self): self.validate_thread_sharing() # If database is in memory, closing the connection destroys the # database. To prevent accidental data loss, ignore close requests on # an in-memory db. if not self.is_in_memory_db(): BaseDatabaseWrapper.close(self) def _savepoint_allowed(self): # When 'isolation_level' is not None, sqlite3 commits before each # savepoint; it's a bug. When it is None, savepoints don't make sense # because autocommit is enabled. The only exception is inside 'atomic' # blocks. To work around that bug, on SQLite, 'atomic' starts a # transaction explicitly rather than simply disable autocommit. return self.in_atomic_block def _set_autocommit(self, autocommit): if autocommit: level = None else: # sqlite3's internal default is ''. It's different from None. # See Modules/_sqlite/connection.c. level = "" # 'isolation_level' is a misleading API. # SQLite always runs at the SERIALIZABLE isolation level. with self.wrap_database_errors: self.connection.isolation_level = level def disable_constraint_checking(self): with self.cursor() as cursor: cursor.execute("PRAGMA foreign_keys = OFF") # Foreign key constraints cannot be turned off while in a multi- # statement transaction. Fetch the current state of the pragma # to determine if constraints are effectively disabled. enabled = cursor.execute("PRAGMA foreign_keys").fetchone()[0] return not bool(enabled) def enable_constraint_checking(self): with self.cursor() as cursor: cursor.execute("PRAGMA foreign_keys = ON") def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ if self.features.supports_pragma_foreign_key_check: with self.cursor() as cursor: if table_names is None: violations = cursor.execute("PRAGMA foreign_key_check").fetchall() else: violations = chain.from_iterable( cursor.execute( "PRAGMA foreign_key_check(%s)" % self.ops.quote_name(table_name) ).fetchall() for table_name in table_names ) # See https://www.sqlite.org/pragma.html#pragma_foreign_key_check for ( table_name, rowid, referenced_table_name, foreign_key_index, ) in violations: foreign_key = cursor.execute( "PRAGMA foreign_key_list(%s)" % self.ops.quote_name(table_name) ).fetchall()[foreign_key_index] column_name, referenced_column_name = foreign_key[3:5] primary_key_column_name = self.introspection.get_primary_key_column( cursor, table_name ) primary_key_value, bad_value = cursor.execute( "SELECT %s, %s FROM %s WHERE rowid = %%s" % ( self.ops.quote_name(primary_key_column_name), self.ops.quote_name(column_name), self.ops.quote_name(table_name), ), (rowid,), ).fetchone() raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, primary_key_value, table_name, column_name, bad_value, referenced_table_name, referenced_column_name, ) ) else: with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column( cursor, table_name ) if not primary_key_column_name: continue relations = self.introspection.get_relations(cursor, table_name) for column_name, ( referenced_column_name, referenced_table_name, ) in relations.items(): cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): return True def _start_transaction_under_autocommit(self): """ Start a transaction explicitly in autocommit mode. Staying in autocommit mode works around a bug of sqlite3 that breaks savepoints when autocommit is disabled. """ self.cursor().execute("BEGIN") def is_in_memory_db(self): return self.creation.is_in_memory_db(self.settings_dict["NAME"]) FORMAT_QMARK_REGEX = _lazy_re_compile(r"(?<!%)%s") class SQLiteCursorWrapper(Database.Cursor): """ Django uses "format" style placeholders, but pysqlite2 uses "qmark" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". """ def execute(self, query, params=None): if params is None: return Database.Cursor.execute(self, query) query = self.convert_query(query) return Database.Cursor.execute(self, query, params) def executemany(self, query, param_list): query = self.convert_query(query) return Database.Cursor.executemany(self, query, param_list) def convert_query(self, query): return FORMAT_QMARK_REGEX.sub("?", query).replace("%%", "%")
1b778ddc43d29d3b15b663b998a8d12ac90719a18eea347a1a580d96c04f900f
import copy from decimal import Decimal from django.apps.registry import Apps from django.db import NotSupportedError from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.backends.ddl_references import Statement from django.db.backends.utils import strip_quotes from django.db.models import UniqueConstraint from django.db.transaction import atomic class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_delete_table = "DROP TABLE %(table)s" sql_create_fk = None sql_create_inline_fk = ( "REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED" ) sql_create_column_inline_fk = sql_create_inline_fk sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)" sql_delete_unique = "DROP INDEX %(name)s" def __enter__(self): # Some SQLite schema alterations need foreign key constraints to be # disabled. Enforce it here for the duration of the schema edition. if not self.connection.disable_constraint_checking(): raise NotSupportedError( "SQLite schema editor cannot be used while foreign key " "constraint checks are enabled. Make sure to disable them " "before entering a transaction.atomic() context because " "SQLite does not support disabling them in the middle of " "a multi-statement transaction." ) return super().__enter__() def __exit__(self, exc_type, exc_value, traceback): self.connection.check_constraints() super().__exit__(exc_type, exc_value, traceback) self.connection.enable_constraint_checking() def quote_value(self, value): # The backend "mostly works" without this function and there are use # cases for compiling Python without the sqlite3 libraries (e.g. # security hardening). try: import sqlite3 value = sqlite3.adapt(value) except ImportError: pass except sqlite3.ProgrammingError: pass # Manual emulation of SQLite parameter quoting if isinstance(value, bool): return str(int(value)) elif isinstance(value, (Decimal, float, int)): return str(value) elif isinstance(value, str): return "'%s'" % value.replace("'", "''") elif value is None: return "NULL" elif isinstance(value, (bytes, bytearray, memoryview)): # Bytes are only allowed for BLOB fields, encoded as string # literals containing hexadecimal data and preceded by a single "X" # character. return "X'%s'" % value.hex() else: raise ValueError( "Cannot quote parameter value %r of type %s" % (value, type(value)) ) def prepare_default(self, value): return self.quote_value(value) def _is_referenced_by_fk_constraint( self, table_name, column_name=None, ignore_self=False ): """ Return whether or not the provided table name is referenced by another one. If `column_name` is specified, only references pointing to that column are considered. If `ignore_self` is True, self-referential constraints are ignored. """ with self.connection.cursor() as cursor: for other_table in self.connection.introspection.get_table_list(cursor): if ignore_self and other_table.name == table_name: continue relations = self.connection.introspection.get_relations( cursor, other_table.name ) for constraint_column, constraint_table in relations.values(): if constraint_table == table_name and ( column_name is None or constraint_column == column_name ): return True return False def alter_db_table( self, model, old_db_table, new_db_table, disable_constraints=True ): if ( not self.connection.features.supports_atomic_references_rename and disable_constraints and self._is_referenced_by_fk_constraint(old_db_table) ): if self.connection.in_atomic_block: raise NotSupportedError( ( "Renaming the %r table while in a transaction is not " "supported on SQLite < 3.26 because it would break referential " "integrity. Try adding `atomic = False` to the Migration class." ) % old_db_table ) self.connection.enable_constraint_checking() super().alter_db_table(model, old_db_table, new_db_table) self.connection.disable_constraint_checking() else: super().alter_db_table(model, old_db_table, new_db_table) def alter_field(self, model, old_field, new_field, strict=False): if not self._field_should_be_altered(old_field, new_field): return old_field_name = old_field.name table_name = model._meta.db_table _, old_column_name = old_field.get_attname_column() if ( new_field.name != old_field_name and not self.connection.features.supports_atomic_references_rename and self._is_referenced_by_fk_constraint( table_name, old_column_name, ignore_self=True ) ): if self.connection.in_atomic_block: raise NotSupportedError( ( "Renaming the %r.%r column while in a transaction is not " "supported on SQLite < 3.26 because it would break referential " "integrity. Try adding `atomic = False` to the Migration class." ) % (model._meta.db_table, old_field_name) ) with atomic(self.connection.alias): super().alter_field(model, old_field, new_field, strict=strict) # Follow SQLite's documented procedure for performing changes # that don't affect the on-disk content. # https://sqlite.org/lang_altertable.html#otheralter with self.connection.cursor() as cursor: schema_version = cursor.execute("PRAGMA schema_version").fetchone()[ 0 ] cursor.execute("PRAGMA writable_schema = 1") references_template = ' REFERENCES "%s" ("%%s") ' % table_name new_column_name = new_field.get_attname_column()[1] search = references_template % old_column_name replacement = references_template % new_column_name cursor.execute( "UPDATE sqlite_master SET sql = replace(sql, %s, %s)", (search, replacement), ) cursor.execute("PRAGMA schema_version = %d" % (schema_version + 1)) cursor.execute("PRAGMA writable_schema = 0") # The integrity check will raise an exception and rollback # the transaction if the sqlite_master updates corrupt the # database. cursor.execute("PRAGMA integrity_check") # Perform a VACUUM to refresh the database representation from # the sqlite_master table. with self.connection.cursor() as cursor: cursor.execute("VACUUM") else: super().alter_field(model, old_field, new_field, strict=strict) def _remake_table( self, model, create_field=None, delete_field=None, alter_field=None ): """ Shortcut to transform a model from old_model into new_model This follows the correct procedure to perform non-rename or column addition operations based on SQLite's documentation https://www.sqlite.org/lang_altertable.html#caution The essential steps are: 1. Create a table with the updated definition called "new__app_model" 2. Copy the data from the existing "app_model" table to the new table 3. Drop the "app_model" table 4. Rename the "new__app_model" table to "app_model" 5. Restore any index of the previous "app_model" table. """ # Self-referential fields must be recreated rather than copied from # the old model to ensure their remote_field.field_name doesn't refer # to an altered field. def is_self_referential(f): return f.is_relation and f.remote_field.model is model # Work out the new fields dict / mapping body = { f.name: f.clone() if is_self_referential(f) else f for f in model._meta.local_concrete_fields } # Since mapping might mix column names and default values, # its values must be already quoted. mapping = { f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields } # This maps field names (not columns) for things like unique_together rename_mapping = {} # If any of the new or altered fields is introducing a new PK, # remove the old one restore_pk_field = None if getattr(create_field, "primary_key", False) or ( alter_field and getattr(alter_field[1], "primary_key", False) ): for name, field in list(body.items()): if field.primary_key and not ( # Do not remove the old primary key when an altered field # that introduces a primary key is the same field. alter_field and name == alter_field[1].name ): field.primary_key = False restore_pk_field = field if field.auto_created: del body[name] del mapping[field.column] # Add in any created fields if create_field: body[create_field.name] = create_field # Choose a default and insert it into the copy map if not create_field.many_to_many and create_field.concrete: mapping[create_field.column] = self.prepare_default( self.effective_default(create_field), ) # Add in any altered fields if alter_field: old_field, new_field = alter_field body.pop(old_field.name, None) mapping.pop(old_field.column, None) body[new_field.name] = new_field if old_field.null and not new_field.null: case_sql = "coalesce(%(col)s, %(default)s)" % { "col": self.quote_name(old_field.column), "default": self.prepare_default(self.effective_default(new_field)), } mapping[new_field.column] = case_sql else: mapping[new_field.column] = self.quote_name(old_field.column) rename_mapping[old_field.name] = new_field.name # Remove any deleted fields if delete_field: del body[delete_field.name] del mapping[delete_field.column] # Remove any implicit M2M tables if ( delete_field.many_to_many and delete_field.remote_field.through._meta.auto_created ): return self.delete_model(delete_field.remote_field.through) # Work inside a new app registry apps = Apps() # Work out the new value of unique_together, taking renames into # account unique_together = [ [rename_mapping.get(n, n) for n in unique] for unique in model._meta.unique_together ] # Work out the new value for index_together, taking renames into # account index_together = [ [rename_mapping.get(n, n) for n in index] for index in model._meta.index_together ] indexes = model._meta.indexes if delete_field: indexes = [ index for index in indexes if delete_field.name not in index.fields ] constraints = list(model._meta.constraints) # Provide isolated instances of the fields to the new model body so # that the existing model's internals aren't interfered with when # the dummy model is constructed. body_copy = copy.deepcopy(body) # Construct a new model with the new fields to allow self referential # primary key to resolve to. This model won't ever be materialized as a # table and solely exists for foreign key reference resolution purposes. # This wouldn't be required if the schema editor was operating on model # states instead of rendered models. meta_contents = { "app_label": model._meta.app_label, "db_table": model._meta.db_table, "unique_together": unique_together, "index_together": index_together, "indexes": indexes, "constraints": constraints, "apps": apps, } meta = type("Meta", (), meta_contents) body_copy["Meta"] = meta body_copy["__module__"] = model.__module__ type(model._meta.object_name, model.__bases__, body_copy) # Construct a model with a renamed table name. body_copy = copy.deepcopy(body) meta_contents = { "app_label": model._meta.app_label, "db_table": "new__%s" % strip_quotes(model._meta.db_table), "unique_together": unique_together, "index_together": index_together, "indexes": indexes, "constraints": constraints, "apps": apps, } meta = type("Meta", (), meta_contents) body_copy["Meta"] = meta body_copy["__module__"] = model.__module__ new_model = type("New%s" % model._meta.object_name, model.__bases__, body_copy) # Create a new table with the updated schema. self.create_model(new_model) # Copy data from the old table into the new table self.execute( "INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_model._meta.db_table), ", ".join(self.quote_name(x) for x in mapping), ", ".join(mapping.values()), self.quote_name(model._meta.db_table), ) ) # Delete the old table to make way for the new self.delete_model(model, handle_autom2m=False) # Rename the new table to take way for the old self.alter_db_table( new_model, new_model._meta.db_table, model._meta.db_table, disable_constraints=False, ) # Run deferred SQL on correct table for sql in self.deferred_sql: self.execute(sql) self.deferred_sql = [] # Fix any PK-removed field if restore_pk_field: restore_pk_field.primary_key = True def delete_model(self, model, handle_autom2m=True): if handle_autom2m: super().delete_model(model) else: # Delete the table (and only that) self.execute( self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), } ) # Remove all deferred statements referencing the deleted table. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_table( model._meta.db_table ): self.deferred_sql.remove(sql) def add_field(self, model, field): """Create a field on a model.""" if ( # Primary keys and unique fields are not supported in ALTER TABLE # ADD COLUMN. field.primary_key or field.unique or # Fields with default values cannot by handled by ALTER TABLE ADD # COLUMN statement because DROP DEFAULT is not supported in # ALTER TABLE. not field.null or self.effective_default(field) is not None ): self._remake_table(model, create_field=field) else: super().add_field(model, field) def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # M2M fields are a special case if field.many_to_many: # For implicit M2M tables, delete the auto-created table if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # For explicit "through" M2M fields, do nothing elif ( self.connection.features.can_alter_table_drop_column # Primary keys, unique fields, indexed fields, and foreign keys are # not supported in ALTER TABLE DROP COLUMN. and not field.primary_key and not field.unique and not field.db_index and not (field.remote_field and field.db_constraint) ): super().remove_field(model, field) # For everything else, remake. else: # It might not actually have a column behind it if field.db_parameters(connection=self.connection)["type"] is None: return self._remake_table(model, delete_field=field) def _alter_field( self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False, ): """Perform a "physical" (non-ManyToMany) field update.""" # Use "ALTER TABLE ... RENAME COLUMN" if only the column name # changed and there aren't any constraints. if ( self.connection.features.can_alter_table_rename_column and old_field.column != new_field.column and self.column_sql(model, old_field) == self.column_sql(model, new_field) and not ( old_field.remote_field and old_field.db_constraint or new_field.remote_field and new_field.db_constraint ) ): return self.execute( self._rename_field_sql( model._meta.db_table, old_field, new_field, new_type ) ) # Alter by remaking table self._remake_table(model, alter_field=(old_field, new_field)) # Rebuild tables with FKs pointing to this field. old_collation = old_db_params.get("collation") new_collation = new_db_params.get("collation") if new_field.unique and ( old_type != new_type or old_collation != new_collation ): related_models = set() opts = new_field.model._meta for remote_field in opts.related_objects: # Ignore self-relationship since the table was already rebuilt. if remote_field.related_model == model: continue if not remote_field.many_to_many: if remote_field.field_name == new_field.name: related_models.add(remote_field.related_model) elif new_field.primary_key and remote_field.through._meta.auto_created: related_models.add(remote_field.through) if new_field.primary_key: for many_to_many in opts.many_to_many: # Ignore self-relationship since the table was already rebuilt. if many_to_many.related_model == model: continue if many_to_many.remote_field.through._meta.auto_created: related_models.add(many_to_many.remote_field.through) for related_model in related_models: self._remake_table(related_model) def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" if ( old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table ): # The field name didn't change, but some options did, so we have to # propagate this altering. self._remake_table( old_field.remote_field.through, alter_field=( # The field that points to the target model is needed, so # we can tell alter_field to change it - this is # m2m_reverse_field_name() (as opposed to m2m_field_name(), # which points to our model). old_field.remote_field.through._meta.get_field( old_field.m2m_reverse_field_name() ), new_field.remote_field.through._meta.get_field( new_field.m2m_reverse_field_name() ), ), ) return # Make a new through table self.create_model(new_field.remote_field.through) # Copy the data across self.execute( "INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_field.remote_field.through._meta.db_table), ", ".join( [ "id", new_field.m2m_column_name(), new_field.m2m_reverse_name(), ] ), ", ".join( [ "id", old_field.m2m_column_name(), old_field.m2m_reverse_name(), ] ), self.quote_name(old_field.remote_field.through._meta.db_table), ) ) # Delete the old through table self.delete_model(old_field.remote_field.through) def add_constraint(self, model, constraint): if isinstance(constraint, UniqueConstraint) and ( constraint.condition or constraint.contains_expressions or constraint.include or constraint.deferrable ): super().add_constraint(model, constraint) else: self._remake_table(model) def remove_constraint(self, model, constraint): if isinstance(constraint, UniqueConstraint) and ( constraint.condition or constraint.contains_expressions or constraint.include or constraint.deferrable ): super().remove_constraint(model, constraint) else: self._remake_table(model) def _collate_sql(self, collation): return "COLLATE " + collation
7c65f6e47bde3ba4c909e153aa18acd2b41f55c0fc8227f817f8863de25d88a2
import copy from collections import defaultdict from django.conf import settings from django.template.backends.django import get_template_tag_modules from . import Error, Tags, Warning, register E001 = Error( "You have 'APP_DIRS': True in your TEMPLATES but also specify 'loaders' " "in OPTIONS. Either remove APP_DIRS or remove the 'loaders' option.", id="templates.E001", ) E002 = Error( "'string_if_invalid' in TEMPLATES OPTIONS must be a string but got: {} ({}).", id="templates.E002", ) W003 = Warning( "{} is used for multiple template tag modules: {}", id="templates.E003", ) @register(Tags.templates) def check_setting_app_dirs_loaders(app_configs, **kwargs): return ( [E001] if any( conf.get("APP_DIRS") and "loaders" in conf.get("OPTIONS", {}) for conf in settings.TEMPLATES ) else [] ) @register(Tags.templates) def check_string_if_invalid_is_string(app_configs, **kwargs): errors = [] for conf in settings.TEMPLATES: string_if_invalid = conf.get("OPTIONS", {}).get("string_if_invalid", "") if not isinstance(string_if_invalid, str): error = copy.copy(E002) error.msg = error.msg.format( string_if_invalid, type(string_if_invalid).__name__ ) errors.append(error) return errors @register(Tags.templates) def check_for_template_tags_with_the_same_name(app_configs, **kwargs): errors = [] libraries = defaultdict(set) for conf in settings.TEMPLATES: custom_libraries = conf.get("OPTIONS", {}).get("libraries", {}) for module_name, module_path in custom_libraries.items(): libraries[module_name].add(module_path) for module_name, module_path in get_template_tag_modules(): libraries[module_name].add(module_path) for library_name, items in libraries.items(): if len(items) > 1: errors.append( Warning( W003.msg.format( repr(library_name), ", ".join(repr(item) for item in sorted(items)), ), id=W003.id, ) ) return errors
b43f6aa1463097ebc7b716009260b37378e4ca68a95acc452ad24f36f2f547d0
import functools import os import pkgutil import sys from argparse import ( _AppendConstAction, _CountAction, _StoreConstAction, _SubParsersAction, ) from collections import defaultdict from difflib import get_close_matches from importlib import import_module import django from django.apps import apps from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management.base import ( BaseCommand, CommandError, CommandParser, handle_default_options, ) from django.core.management.color import color_style from django.utils import autoreload def find_commands(management_dir): """ Given a path to a management directory, return a list of all the command names that are available. """ command_dir = os.path.join(management_dir, "commands") return [ name for _, name, is_pkg in pkgutil.iter_modules([command_dir]) if not is_pkg and not name.startswith("_") ] def load_command_class(app_name, name): """ Given a command name and an application name, return the Command class instance. Allow all errors raised by the import process (ImportError, AttributeError) to propagate. """ module = import_module("%s.management.commands.%s" % (app_name, name)) return module.Command() @functools.lru_cache(maxsize=None) def get_commands(): """ Return a dictionary mapping command names to their callback applications. Look for a management.commands package in django.core, and in each installed application -- if a commands package exists, register all commands in that package. Core commands are always included. If a settings module has been specified, also include user-defined commands. The dictionary is in the format {command_name: app_name}. Key-value pairs from this dictionary can then be used in calls to load_command_class(app_name, command_name) The dictionary is cached on the first call and reused on subsequent calls. """ commands = {name: "django.core" for name in find_commands(__path__[0])} if not settings.configured: return commands for app_config in reversed(apps.get_app_configs()): path = os.path.join(app_config.path, "management") commands.update({name: app_config.name for name in find_commands(path)}) return commands def call_command(command_name, *args, **options): """ Call the given command, with the given options and args/kwargs. This is the primary API you should use for calling specific commands. `command_name` may be a string or a command object. Using a string is preferred unless the command object is required for further processing or testing. Some examples: call_command('migrate') call_command('shell', plain=True) call_command('sqlmigrate', 'myapp') from django.core.management.commands import flush cmd = flush.Command() call_command(cmd, verbosity=0, interactive=False) # Do something with cmd ... """ if isinstance(command_name, BaseCommand): # Command object passed in. command = command_name command_name = command.__class__.__module__.split(".")[-1] else: # Load the command object by name. try: app_name = get_commands()[command_name] except KeyError: raise CommandError("Unknown command: %r" % command_name) if isinstance(app_name, BaseCommand): # If the command is already loaded, use it directly. command = app_name else: command = load_command_class(app_name, command_name) # Simulate argument parsing to get the option defaults (see #10080 for details). parser = command.create_parser("", command_name) # Use the `dest` option name from the parser option opt_mapping = { min(s_opt.option_strings).lstrip("-").replace("-", "_"): s_opt.dest for s_opt in parser._actions if s_opt.option_strings } arg_options = {opt_mapping.get(key, key): value for key, value in options.items()} parse_args = [] for arg in args: if isinstance(arg, (list, tuple)): parse_args += map(str, arg) else: parse_args.append(str(arg)) def get_actions(parser): # Parser actions and actions from sub-parser choices. for opt in parser._actions: if isinstance(opt, _SubParsersAction): for sub_opt in opt.choices.values(): yield from get_actions(sub_opt) else: yield opt parser_actions = list(get_actions(parser)) mutually_exclusive_required_options = { opt for group in parser._mutually_exclusive_groups for opt in group._group_actions if group.required } # Any required arguments which are passed in via **options must be passed # to parse_args(). for opt in parser_actions: if opt.dest in options and ( opt.required or opt in mutually_exclusive_required_options ): opt_dest_count = sum(v == opt.dest for v in opt_mapping.values()) if opt_dest_count > 1: raise TypeError( f"Cannot pass the dest {opt.dest!r} that matches multiple " f"arguments via **options." ) parse_args.append(min(opt.option_strings)) if isinstance(opt, (_AppendConstAction, _CountAction, _StoreConstAction)): continue value = arg_options[opt.dest] if isinstance(value, (list, tuple)): parse_args += map(str, value) else: parse_args.append(str(value)) defaults = parser.parse_args(args=parse_args) defaults = dict(defaults._get_kwargs(), **arg_options) # Raise an error if any unknown options were passed. stealth_options = set(command.base_stealth_options + command.stealth_options) dest_parameters = {action.dest for action in parser_actions} valid_options = (dest_parameters | stealth_options).union(opt_mapping) unknown_options = set(options) - valid_options if unknown_options: raise TypeError( "Unknown option(s) for %s command: %s. " "Valid options are: %s." % ( command_name, ", ".join(sorted(unknown_options)), ", ".join(sorted(valid_options)), ) ) # Move positional args out of options to mimic legacy optparse args = defaults.pop("args", ()) if "skip_checks" not in options: defaults["skip_checks"] = True return command.execute(*args, **defaults) class ManagementUtility: """ Encapsulate the logic of the django-admin and manage.py utilities. """ def __init__(self, argv=None): self.argv = argv or sys.argv[:] self.prog_name = os.path.basename(self.argv[0]) if self.prog_name == "__main__.py": self.prog_name = "python -m django" self.settings_exception = None def main_help_text(self, commands_only=False): """Return the script's main help text, as a string.""" if commands_only: usage = sorted(get_commands()) else: usage = [ "", "Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name, "", "Available subcommands:", ] commands_dict = defaultdict(lambda: []) for name, app in get_commands().items(): if app == "django.core": app = "django" else: app = app.rpartition(".")[-1] commands_dict[app].append(name) style = color_style() for app in sorted(commands_dict): usage.append("") usage.append(style.NOTICE("[%s]" % app)) for name in sorted(commands_dict[app]): usage.append(" %s" % name) # Output an extra note if settings are not properly configured if self.settings_exception is not None: usage.append( style.NOTICE( "Note that only Django core commands are listed " "as settings are not properly configured (error: %s)." % self.settings_exception ) ) return "\n".join(usage) def fetch_command(self, subcommand): """ Try to fetch the given subcommand, printing a message with the appropriate command called from the command line (usually "django-admin" or "manage.py") if it can't be found. """ # Get commands outside of try block to prevent swallowing exceptions commands = get_commands() try: app_name = commands[subcommand] except KeyError: if os.environ.get("DJANGO_SETTINGS_MODULE"): # If `subcommand` is missing due to misconfigured settings, the # following line will retrigger an ImproperlyConfigured exception # (get_commands() swallows the original one) so the user is # informed about it. settings.INSTALLED_APPS elif not settings.configured: sys.stderr.write("No Django settings specified.\n") possible_matches = get_close_matches(subcommand, commands) sys.stderr.write("Unknown command: %r" % subcommand) if possible_matches: sys.stderr.write(". Did you mean %s?" % possible_matches[0]) sys.stderr.write("\nType '%s help' for usage.\n" % self.prog_name) sys.exit(1) if isinstance(app_name, BaseCommand): # If the command is already loaded, use it directly. klass = app_name else: klass = load_command_class(app_name, subcommand) return klass def autocomplete(self): """ Output completion suggestions for BASH. The output of this function is passed to BASH's `COMREPLY` variable and treated as completion suggestions. `COMREPLY` expects a space separated string as the result. The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used to get information about the cli input. Please refer to the BASH man-page for more information about this variables. Subcommand options are saved as pairs. A pair consists of the long option string (e.g. '--exclude') and a boolean value indicating if the option requires arguments. When printing to stdout, an equal sign is appended to options which require arguments. Note: If debugging this function, it is recommended to write the debug output in a separate file. Otherwise the debug output will be treated and formatted as potential completion suggestions. """ # Don't complete if user hasn't sourced bash_completion file. if "DJANGO_AUTO_COMPLETE" not in os.environ: return cwords = os.environ["COMP_WORDS"].split()[1:] cword = int(os.environ["COMP_CWORD"]) try: curr = cwords[cword - 1] except IndexError: curr = "" subcommands = [*get_commands(), "help"] options = [("--help", False)] # subcommand if cword == 1: print(" ".join(sorted(filter(lambda x: x.startswith(curr), subcommands)))) # subcommand options # special case: the 'help' subcommand has no options elif cwords[0] in subcommands and cwords[0] != "help": subcommand_cls = self.fetch_command(cwords[0]) # special case: add the names of installed apps to options if cwords[0] in ("dumpdata", "sqlmigrate", "sqlsequencereset", "test"): try: app_configs = apps.get_app_configs() # Get the last part of the dotted path as the app name. options.extend((app_config.label, 0) for app_config in app_configs) except ImportError: # Fail silently if DJANGO_SETTINGS_MODULE isn't set. The # user will find out once they execute the command. pass parser = subcommand_cls.create_parser("", cwords[0]) options.extend( (min(s_opt.option_strings), s_opt.nargs != 0) for s_opt in parser._actions if s_opt.option_strings ) # filter out previously specified options from available options prev_opts = {x.split("=")[0] for x in cwords[1 : cword - 1]} options = (opt for opt in options if opt[0] not in prev_opts) # filter options by current input options = sorted((k, v) for k, v in options if k.startswith(curr)) for opt_label, require_arg in options: # append '=' to options which require args if require_arg: opt_label += "=" print(opt_label) # Exit code of the bash completion function is never passed back to # the user, so it's safe to always exit with 0. # For more details see #25420. sys.exit(0) def execute(self): """ Given the command-line arguments, figure out which subcommand is being run, create a parser appropriate to that command, and run it. """ try: subcommand = self.argv[1] except IndexError: subcommand = "help" # Display help if no arguments were given. # Preprocess options to extract --settings and --pythonpath. # These options could affect the commands that are available, so they # must be processed early. parser = CommandParser( prog=self.prog_name, usage="%(prog)s subcommand [options] [args]", add_help=False, allow_abbrev=False, ) parser.add_argument("--settings") parser.add_argument("--pythonpath") parser.add_argument("args", nargs="*") # catch-all try: options, args = parser.parse_known_args(self.argv[2:]) handle_default_options(options) except CommandError: pass # Ignore any option errors at this point. try: settings.INSTALLED_APPS except ImproperlyConfigured as exc: self.settings_exception = exc except ImportError as exc: self.settings_exception = exc if settings.configured: # Start the auto-reloading dev server even if the code is broken. # The hardcoded condition is a code smell but we can't rely on a # flag on the command class because we haven't located it yet. if subcommand == "runserver" and "--noreload" not in self.argv: try: autoreload.check_errors(django.setup)() except Exception: # The exception will be raised later in the child process # started by the autoreloader. Pretend it didn't happen by # loading an empty list of applications. apps.all_models = defaultdict(dict) apps.app_configs = {} apps.apps_ready = apps.models_ready = apps.ready = True # Remove options not compatible with the built-in runserver # (e.g. options for the contrib.staticfiles' runserver). # Changes here require manually testing as described in # #27522. _parser = self.fetch_command("runserver").create_parser( "django", "runserver" ) _options, _args = _parser.parse_known_args(self.argv[2:]) for _arg in _args: self.argv.remove(_arg) # In all other cases, django.setup() is required to succeed. else: django.setup() self.autocomplete() if subcommand == "help": if "--commands" in args: sys.stdout.write(self.main_help_text(commands_only=True) + "\n") elif not options.args: sys.stdout.write(self.main_help_text() + "\n") else: self.fetch_command(options.args[0]).print_help( self.prog_name, options.args[0] ) # Special-cases: We want 'django-admin --version' and # 'django-admin --help' to work, for backwards compatibility. elif subcommand == "version" or self.argv[1:] == ["--version"]: sys.stdout.write(django.get_version() + "\n") elif self.argv[1:] in (["--help"], ["-h"]): sys.stdout.write(self.main_help_text() + "\n") else: self.fetch_command(subcommand).run_from_argv(self.argv) def execute_from_command_line(argv=None): """Run a ManagementUtility.""" utility = ManagementUtility(argv) utility.execute()
683010f00b444182b8237128f1a7510f8986c9640b262337a88fb06093bf256c
import fnmatch import os import shutil import subprocess from pathlib import Path from subprocess import run from django.apps import apps as installed_apps from django.utils.crypto import get_random_string from django.utils.encoding import DEFAULT_LOCALE_ENCODING from .base import CommandError, CommandParser def popen_wrapper(args, stdout_encoding="utf-8"): """ Friendly wrapper around Popen. Return stdout output, stderr output, and OS status code. """ try: p = run(args, capture_output=True, close_fds=os.name != "nt") except OSError as err: raise CommandError("Error executing %s" % args[0]) from err return ( p.stdout.decode(stdout_encoding), p.stderr.decode(DEFAULT_LOCALE_ENCODING, errors="replace"), p.returncode, ) def handle_extensions(extensions): """ Organize multiple extensions that are separated with commas or passed by using --extension/-e multiple times. For example: running 'django-admin makemessages -e js,txt -e xhtml -a' would result in an extension list: ['.js', '.txt', '.xhtml'] >>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py']) {'.html', '.js', '.py'} >>> handle_extensions(['.html, txt,.tpl']) {'.html', '.tpl', '.txt'} """ ext_list = [] for ext in extensions: ext_list.extend(ext.replace(" ", "").split(",")) for i, ext in enumerate(ext_list): if not ext.startswith("."): ext_list[i] = ".%s" % ext_list[i] return set(ext_list) def find_command(cmd, path=None, pathext=None): if path is None: path = os.environ.get("PATH", "").split(os.pathsep) if isinstance(path, str): path = [path] # check if there are funny path extensions for executables, e.g. Windows if pathext is None: pathext = os.environ.get("PATHEXT", ".COM;.EXE;.BAT;.CMD").split(os.pathsep) # don't use extensions if the command ends with one of them for ext in pathext: if cmd.endswith(ext): pathext = [""] break # check if we find the command on PATH for p in path: f = os.path.join(p, cmd) if os.path.isfile(f): return f for ext in pathext: fext = f + ext if os.path.isfile(fext): return fext return None def get_random_secret_key(): """ Return a 50 character random string usable as a SECRET_KEY setting value. """ chars = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)" return get_random_string(50, chars) def parse_apps_and_model_labels(labels): """ Parse a list of "app_label.ModelName" or "app_label" strings into actual objects and return a two-element tuple: (set of model classes, set of app_configs). Raise a CommandError if some specified models or apps don't exist. """ apps = set() models = set() for label in labels: if "." in label: try: model = installed_apps.get_model(label) except LookupError: raise CommandError("Unknown model: %s" % label) models.add(model) else: try: app_config = installed_apps.get_app_config(label) except LookupError as e: raise CommandError(str(e)) apps.add(app_config) return models, apps def get_command_line_option(argv, option): """ Return the value of a command line option (which should include leading dashes, e.g. '--testrunner') from an argument list. Return None if the option wasn't passed or if the argument list couldn't be parsed. """ parser = CommandParser(add_help=False, allow_abbrev=False) parser.add_argument(option, dest="value") try: options, _ = parser.parse_known_args(argv[2:]) except CommandError: return None else: return options.value def normalize_path_patterns(patterns): """Normalize an iterable of glob style patterns based on OS.""" patterns = [os.path.normcase(p) for p in patterns] dir_suffixes = {"%s*" % path_sep for path_sep in {"/", os.sep}} norm_patterns = [] for pattern in patterns: for dir_suffix in dir_suffixes: if pattern.endswith(dir_suffix): norm_patterns.append(pattern[: -len(dir_suffix)]) break else: norm_patterns.append(pattern) return norm_patterns def is_ignored_path(path, ignore_patterns): """ Check if the given path should be ignored or not based on matching one of the glob style `ignore_patterns`. """ path = Path(path) def ignore(pattern): return fnmatch.fnmatchcase(path.name, pattern) or fnmatch.fnmatchcase( str(path), pattern ) return any(ignore(pattern) for pattern in normalize_path_patterns(ignore_patterns)) def find_formatters(): return {"black_path": shutil.which("black")} def run_formatters(written_files, black_path=(sentinel := object())): """ Run the black formatter on the specified files. """ # Use a sentinel rather than None, as which() returns None when not found. if black_path is sentinel: black_path = shutil.which("black") if black_path: subprocess.run( [black_path, "--fast", "--", *written_files], capture_output=True, )
df158fb49a5636c3588823e31a7dbf27f942807bde82657ab10ffb222124f4a9
import argparse import mimetypes import os import posixpath import shutil import stat import tempfile from importlib import import_module from urllib.request import build_opener import django from django.conf import settings from django.core.management.base import BaseCommand, CommandError from django.core.management.utils import ( find_formatters, handle_extensions, run_formatters, ) from django.template import Context, Engine from django.utils import archive from django.utils.http import parse_header_parameters from django.utils.version import get_docs_version class TemplateCommand(BaseCommand): """ Copy either a Django application layout template or a Django project layout template into the specified directory. :param style: A color style object (see django.core.management.color). :param app_or_project: The string 'app' or 'project'. :param name: The name of the application or project. :param directory: The directory to which the template should be copied. :param options: The additional variables passed to project or app templates """ requires_system_checks = [] # The supported URL schemes url_schemes = ["http", "https", "ftp"] # Rewrite the following suffixes when determining the target filename. rewrite_template_suffixes = ( # Allow shipping invalid .py files without byte-compilation. (".py-tpl", ".py"), ) def add_arguments(self, parser): parser.add_argument("name", help="Name of the application or project.") parser.add_argument( "directory", nargs="?", help="Optional destination directory" ) parser.add_argument( "--template", help="The path or URL to load the template from." ) parser.add_argument( "--extension", "-e", dest="extensions", action="append", default=["py"], help='The file extension(s) to render (default: "py"). ' "Separate multiple extensions with commas, or use " "-e multiple times.", ) parser.add_argument( "--name", "-n", dest="files", action="append", default=[], help="The file name(s) to render. Separate multiple file names " "with commas, or use -n multiple times.", ) parser.add_argument( "--exclude", "-x", action="append", default=argparse.SUPPRESS, nargs="?", const="", help=( "The directory name(s) to exclude, in addition to .git and " "__pycache__. Can be used multiple times." ), ) def handle(self, app_or_project, name, target=None, **options): self.written_files = [] self.app_or_project = app_or_project self.a_or_an = "an" if app_or_project == "app" else "a" self.paths_to_remove = [] self.verbosity = options["verbosity"] self.validate_name(name) # if some directory is given, make sure it's nicely expanded if target is None: top_dir = os.path.join(os.getcwd(), name) try: os.makedirs(top_dir) except FileExistsError: raise CommandError("'%s' already exists" % top_dir) except OSError as e: raise CommandError(e) else: top_dir = os.path.abspath(os.path.expanduser(target)) if app_or_project == "app": self.validate_name(os.path.basename(top_dir), "directory") if not os.path.exists(top_dir): raise CommandError( "Destination directory '%s' does not " "exist, please create it first." % top_dir ) # Find formatters, which are external executables, before input # from the templates can sneak into the path. formatter_paths = find_formatters() extensions = tuple(handle_extensions(options["extensions"])) extra_files = [] excluded_directories = [".git", "__pycache__"] for file in options["files"]: extra_files.extend(map(lambda x: x.strip(), file.split(","))) if exclude := options.get("exclude"): for directory in exclude: excluded_directories.append(directory.strip()) if self.verbosity >= 2: self.stdout.write( "Rendering %s template files with extensions: %s" % (app_or_project, ", ".join(extensions)) ) self.stdout.write( "Rendering %s template files with filenames: %s" % (app_or_project, ", ".join(extra_files)) ) base_name = "%s_name" % app_or_project base_subdir = "%s_template" % app_or_project base_directory = "%s_directory" % app_or_project camel_case_name = "camel_case_%s_name" % app_or_project camel_case_value = "".join(x for x in name.title() if x != "_") context = Context( { **options, base_name: name, base_directory: top_dir, camel_case_name: camel_case_value, "docs_version": get_docs_version(), "django_version": django.__version__, }, autoescape=False, ) # Setup a stub settings environment for template rendering if not settings.configured: settings.configure() django.setup() template_dir = self.handle_template(options["template"], base_subdir) prefix_length = len(template_dir) + 1 for root, dirs, files in os.walk(template_dir): path_rest = root[prefix_length:] relative_dir = path_rest.replace(base_name, name) if relative_dir: target_dir = os.path.join(top_dir, relative_dir) os.makedirs(target_dir, exist_ok=True) for dirname in dirs[:]: if "exclude" not in options: if dirname.startswith(".") or dirname == "__pycache__": dirs.remove(dirname) elif dirname in excluded_directories: dirs.remove(dirname) for filename in files: if filename.endswith((".pyo", ".pyc", ".py.class")): # Ignore some files as they cause various breakages. continue old_path = os.path.join(root, filename) new_path = os.path.join( top_dir, relative_dir, filename.replace(base_name, name) ) for old_suffix, new_suffix in self.rewrite_template_suffixes: if new_path.endswith(old_suffix): new_path = new_path[: -len(old_suffix)] + new_suffix break # Only rewrite once if os.path.exists(new_path): raise CommandError( "%s already exists. Overlaying %s %s into an existing " "directory won't replace conflicting files." % ( new_path, self.a_or_an, app_or_project, ) ) # Only render the Python files, as we don't want to # accidentally render Django templates files if new_path.endswith(extensions) or filename in extra_files: with open(old_path, encoding="utf-8") as template_file: content = template_file.read() template = Engine().from_string(content) content = template.render(context) with open(new_path, "w", encoding="utf-8") as new_file: new_file.write(content) else: shutil.copyfile(old_path, new_path) self.written_files.append(new_path) if self.verbosity >= 2: self.stdout.write("Creating %s" % new_path) try: self.apply_umask(old_path, new_path) self.make_writeable(new_path) except OSError: self.stderr.write( "Notice: Couldn't set permission bits on %s. You're " "probably using an uncommon filesystem setup. No " "problem." % new_path, self.style.NOTICE, ) if self.paths_to_remove: if self.verbosity >= 2: self.stdout.write("Cleaning up temporary files.") for path_to_remove in self.paths_to_remove: if os.path.isfile(path_to_remove): os.remove(path_to_remove) else: shutil.rmtree(path_to_remove) run_formatters(self.written_files, **formatter_paths) def handle_template(self, template, subdir): """ Determine where the app or project templates are. Use django.__path__[0] as the default because the Django install directory isn't known. """ if template is None: return os.path.join(django.__path__[0], "conf", subdir) else: if template.startswith("file://"): template = template[7:] expanded_template = os.path.expanduser(template) expanded_template = os.path.normpath(expanded_template) if os.path.isdir(expanded_template): return expanded_template if self.is_url(template): # downloads the file and returns the path absolute_path = self.download(template) else: absolute_path = os.path.abspath(expanded_template) if os.path.exists(absolute_path): return self.extract(absolute_path) raise CommandError( "couldn't handle %s template %s." % (self.app_or_project, template) ) def validate_name(self, name, name_or_dir="name"): if name is None: raise CommandError( "you must provide {an} {app} name".format( an=self.a_or_an, app=self.app_or_project, ) ) # Check it's a valid directory name. if not name.isidentifier(): raise CommandError( "'{name}' is not a valid {app} {type}. Please make sure the " "{type} is a valid identifier.".format( name=name, app=self.app_or_project, type=name_or_dir, ) ) # Check it cannot be imported. try: import_module(name) except ImportError: pass else: raise CommandError( "'{name}' conflicts with the name of an existing Python " "module and cannot be used as {an} {app} {type}. Please try " "another {type}.".format( name=name, an=self.a_or_an, app=self.app_or_project, type=name_or_dir, ) ) def download(self, url): """ Download the given URL and return the file name. """ def cleanup_url(url): tmp = url.rstrip("/") filename = tmp.split("/")[-1] if url.endswith("/"): display_url = tmp + "/" else: display_url = url return filename, display_url prefix = "django_%s_template_" % self.app_or_project tempdir = tempfile.mkdtemp(prefix=prefix, suffix="_download") self.paths_to_remove.append(tempdir) filename, display_url = cleanup_url(url) if self.verbosity >= 2: self.stdout.write("Downloading %s" % display_url) the_path = os.path.join(tempdir, filename) opener = build_opener() opener.addheaders = [("User-Agent", f"Django/{django.__version__}")] try: with opener.open(url) as source, open(the_path, "wb") as target: headers = source.info() target.write(source.read()) except OSError as e: raise CommandError( "couldn't download URL %s to %s: %s" % (url, filename, e) ) used_name = the_path.split("/")[-1] # Trying to get better name from response headers content_disposition = headers["content-disposition"] if content_disposition: _, params = parse_header_parameters(content_disposition) guessed_filename = params.get("filename") or used_name else: guessed_filename = used_name # Falling back to content type guessing ext = self.splitext(guessed_filename)[1] content_type = headers["content-type"] if not ext and content_type: ext = mimetypes.guess_extension(content_type) if ext: guessed_filename += ext # Move the temporary file to a filename that has better # chances of being recognized by the archive utils if used_name != guessed_filename: guessed_path = os.path.join(tempdir, guessed_filename) shutil.move(the_path, guessed_path) return guessed_path # Giving up return the_path def splitext(self, the_path): """ Like os.path.splitext, but takes off .tar, too """ base, ext = posixpath.splitext(the_path) if base.lower().endswith(".tar"): ext = base[-4:] + ext base = base[:-4] return base, ext def extract(self, filename): """ Extract the given file to a temporary directory and return the path of the directory with the extracted content. """ prefix = "django_%s_template_" % self.app_or_project tempdir = tempfile.mkdtemp(prefix=prefix, suffix="_extract") self.paths_to_remove.append(tempdir) if self.verbosity >= 2: self.stdout.write("Extracting %s" % filename) try: archive.extract(filename, tempdir) return tempdir except (archive.ArchiveException, OSError) as e: raise CommandError( "couldn't extract file %s to %s: %s" % (filename, tempdir, e) ) def is_url(self, template): """Return True if the name looks like a URL.""" if ":" not in template: return False scheme = template.split(":", 1)[0].lower() return scheme in self.url_schemes def apply_umask(self, old_path, new_path): current_umask = os.umask(0) os.umask(current_umask) current_mode = stat.S_IMODE(os.stat(old_path).st_mode) os.chmod(new_path, current_mode & ~current_umask) def make_writeable(self, filename): """ Make sure that the file is writeable. Useful if our source is read-only. """ if not os.access(filename, os.W_OK): st = os.stat(filename) new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR os.chmod(filename, new_permissions)
acd47e53d9bb28dc15512b52ee3a245dcc6f01372b1db32e7d815cf22c3d1848
""" A Python "serializer". Doesn't do much serializing per se -- just converts to and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for other serializers. """ from django.apps import apps from django.core.serializers import base from django.db import DEFAULT_DB_ALIAS, models from django.utils.encoding import is_protected_type class Serializer(base.Serializer): """ Serialize a QuerySet to basic Python objects. """ internal_use_only = True def start_serialization(self): self._current = None self.objects = [] def end_serialization(self): pass def start_object(self, obj): self._current = {} def end_object(self, obj): self.objects.append(self.get_dump_object(obj)) self._current = None def get_dump_object(self, obj): data = {"model": str(obj._meta)} if not self.use_natural_primary_keys or not hasattr(obj, "natural_key"): data["pk"] = self._value_from_field(obj, obj._meta.pk) data["fields"] = self._current return data def _value_from_field(self, obj, field): value = field.value_from_object(obj) # Protected types (i.e., primitives like None, numbers, dates, # and Decimals) are passed through as is. All other values are # converted to string first. return value if is_protected_type(value) else field.value_to_string(obj) def handle_field(self, obj, field): self._current[field.name] = self._value_from_field(obj, field) def handle_fk_field(self, obj, field): if self.use_natural_foreign_keys and hasattr( field.remote_field.model, "natural_key" ): related = getattr(obj, field.name) if related: value = related.natural_key() else: value = None else: value = self._value_from_field(obj, field) self._current[field.name] = value def handle_m2m_field(self, obj, field): if field.remote_field.through._meta.auto_created: if self.use_natural_foreign_keys and hasattr( field.remote_field.model, "natural_key" ): def m2m_value(value): return value.natural_key() def queryset_iterator(obj, field): return getattr(obj, field.name).iterator() else: def m2m_value(value): return self._value_from_field(value, value._meta.pk) def queryset_iterator(obj, field): return getattr(obj, field.name).only("pk").iterator() m2m_iter = getattr(obj, "_prefetched_objects_cache", {}).get( field.name, queryset_iterator(obj, field), ) self._current[field.name] = [m2m_value(related) for related in m2m_iter] def getvalue(self): return self.objects def Deserializer( object_list, *, using=DEFAULT_DB_ALIAS, ignorenonexistent=False, **options ): """ Deserialize simple Python objects back into Django ORM instances. It's expected that you pass the Python objects themselves (instead of a stream or a string) to the constructor """ handle_forward_references = options.pop("handle_forward_references", False) field_names_cache = {} # Model: <list of field_names> for d in object_list: # Look up the model and starting build a dict of data for it. try: Model = _get_model(d["model"]) except base.DeserializationError: if ignorenonexistent: continue else: raise data = {} if "pk" in d: try: data[Model._meta.pk.attname] = Model._meta.pk.to_python(d.get("pk")) except Exception as e: raise base.DeserializationError.WithData( e, d["model"], d.get("pk"), None ) m2m_data = {} deferred_fields = {} if Model not in field_names_cache: field_names_cache[Model] = {f.name for f in Model._meta.get_fields()} field_names = field_names_cache[Model] # Handle each field for (field_name, field_value) in d["fields"].items(): if ignorenonexistent and field_name not in field_names: # skip fields no longer on model continue field = Model._meta.get_field(field_name) # Handle M2M relations if field.remote_field and isinstance( field.remote_field, models.ManyToManyRel ): try: values = base.deserialize_m2m_values( field, field_value, using, handle_forward_references ) except base.M2MDeserializationError as e: raise base.DeserializationError.WithData( e.original_exc, d["model"], d.get("pk"), e.pk ) if values == base.DEFER_FIELD: deferred_fields[field] = field_value else: m2m_data[field.name] = values # Handle FK fields elif field.remote_field and isinstance( field.remote_field, models.ManyToOneRel ): try: value = base.deserialize_fk_value( field, field_value, using, handle_forward_references ) except Exception as e: raise base.DeserializationError.WithData( e, d["model"], d.get("pk"), field_value ) if value == base.DEFER_FIELD: deferred_fields[field] = field_value else: data[field.attname] = value # Handle all other fields else: try: data[field.name] = field.to_python(field_value) except Exception as e: raise base.DeserializationError.WithData( e, d["model"], d.get("pk"), field_value ) obj = base.build_instance(Model, data, using) yield base.DeserializedObject(obj, m2m_data, deferred_fields) def _get_model(model_identifier): """Look up a model from an "app_label.model_name" string.""" try: return apps.get_model(model_identifier) except (LookupError, TypeError): raise base.DeserializationError( "Invalid model identifier: '%s'" % model_identifier )
5fd05c943a245d674cfc7809a05576e2f9770a95affdf5093652114845b0abad
""" XML serializer. """ import json from xml.dom import pulldom from xml.sax import handler from xml.sax.expatreader import ExpatParser as _ExpatParser from django.apps import apps from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.core.serializers import base from django.db import DEFAULT_DB_ALIAS, models from django.utils.xmlutils import SimplerXMLGenerator, UnserializableContentError class Serializer(base.Serializer): """Serialize a QuerySet to XML.""" def indent(self, level): if self.options.get("indent") is not None: self.xml.ignorableWhitespace( "\n" + " " * self.options.get("indent") * level ) def start_serialization(self): """ Start serialization -- open the XML document and the root element. """ self.xml = SimplerXMLGenerator( self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET) ) self.xml.startDocument() self.xml.startElement("django-objects", {"version": "1.0"}) def end_serialization(self): """ End serialization -- end the document. """ self.indent(0) self.xml.endElement("django-objects") self.xml.endDocument() def start_object(self, obj): """ Called as each object is handled. """ if not hasattr(obj, "_meta"): raise base.SerializationError( "Non-model object (%s) encountered during serialization" % type(obj) ) self.indent(1) attrs = {"model": str(obj._meta)} if not self.use_natural_primary_keys or not hasattr(obj, "natural_key"): obj_pk = obj.pk if obj_pk is not None: attrs["pk"] = str(obj_pk) self.xml.startElement("object", attrs) def end_object(self, obj): """ Called after handling all fields for an object. """ self.indent(1) self.xml.endElement("object") def handle_field(self, obj, field): """ Handle each field on an object (except for ForeignKeys and ManyToManyFields). """ self.indent(2) self.xml.startElement( "field", { "name": field.name, "type": field.get_internal_type(), }, ) # Get a "string version" of the object's data. if getattr(obj, field.name) is not None: value = field.value_to_string(obj) if field.get_internal_type() == "JSONField": # Dump value since JSONField.value_to_string() doesn't output # strings. value = json.dumps(value, cls=field.encoder) try: self.xml.characters(value) except UnserializableContentError: raise ValueError( "%s.%s (pk:%s) contains unserializable characters" % (obj.__class__.__name__, field.name, obj.pk) ) else: self.xml.addQuickElement("None") self.xml.endElement("field") def handle_fk_field(self, obj, field): """ Handle a ForeignKey (they need to be treated slightly differently from regular fields). """ self._start_relational_field(field) related_att = getattr(obj, field.get_attname()) if related_att is not None: if self.use_natural_foreign_keys and hasattr( field.remote_field.model, "natural_key" ): related = getattr(obj, field.name) # If related object has a natural key, use it related = related.natural_key() # Iterable natural keys are rolled out as subelements for key_value in related: self.xml.startElement("natural", {}) self.xml.characters(str(key_value)) self.xml.endElement("natural") else: self.xml.characters(str(related_att)) else: self.xml.addQuickElement("None") self.xml.endElement("field") def handle_m2m_field(self, obj, field): """ Handle a ManyToManyField. Related objects are only serialized as references to the object's PK (i.e. the related *data* is not dumped, just the relation). """ if field.remote_field.through._meta.auto_created: self._start_relational_field(field) if self.use_natural_foreign_keys and hasattr( field.remote_field.model, "natural_key" ): # If the objects in the m2m have a natural key, use it def handle_m2m(value): natural = value.natural_key() # Iterable natural keys are rolled out as subelements self.xml.startElement("object", {}) for key_value in natural: self.xml.startElement("natural", {}) self.xml.characters(str(key_value)) self.xml.endElement("natural") self.xml.endElement("object") def queryset_iterator(obj, field): return getattr(obj, field.name).iterator() else: def handle_m2m(value): self.xml.addQuickElement("object", attrs={"pk": str(value.pk)}) def queryset_iterator(obj, field): return getattr(obj, field.name).only("pk").iterator() m2m_iter = getattr(obj, "_prefetched_objects_cache", {}).get( field.name, queryset_iterator(obj, field), ) for relobj in m2m_iter: handle_m2m(relobj) self.xml.endElement("field") def _start_relational_field(self, field): """Output the <field> element for relational fields.""" self.indent(2) self.xml.startElement( "field", { "name": field.name, "rel": field.remote_field.__class__.__name__, "to": str(field.remote_field.model._meta), }, ) class Deserializer(base.Deserializer): """Deserialize XML.""" def __init__( self, stream_or_string, *, using=DEFAULT_DB_ALIAS, ignorenonexistent=False, **options, ): super().__init__(stream_or_string, **options) self.handle_forward_references = options.pop("handle_forward_references", False) self.event_stream = pulldom.parse(self.stream, self._make_parser()) self.db = using self.ignore = ignorenonexistent def _make_parser(self): """Create a hardened XML parser (no custom/external entities).""" return DefusedExpatParser() def __next__(self): for event, node in self.event_stream: if event == "START_ELEMENT" and node.nodeName == "object": self.event_stream.expandNode(node) return self._handle_object(node) raise StopIteration def _handle_object(self, node): """Convert an <object> node to a DeserializedObject.""" # Look up the model using the model loading mechanism. If this fails, # bail. Model = self._get_model_from_node(node, "model") # Start building a data dictionary from the object. data = {} if node.hasAttribute("pk"): data[Model._meta.pk.attname] = Model._meta.pk.to_python( node.getAttribute("pk") ) # Also start building a dict of m2m data (this is saved as # {m2m_accessor_attribute : [list_of_related_objects]}) m2m_data = {} deferred_fields = {} field_names = {f.name for f in Model._meta.get_fields()} # Deserialize each field. for field_node in node.getElementsByTagName("field"): # If the field is missing the name attribute, bail (are you # sensing a pattern here?) field_name = field_node.getAttribute("name") if not field_name: raise base.DeserializationError( "<field> node is missing the 'name' attribute" ) # Get the field from the Model. This will raise a # FieldDoesNotExist if, well, the field doesn't exist, which will # be propagated correctly unless ignorenonexistent=True is used. if self.ignore and field_name not in field_names: continue field = Model._meta.get_field(field_name) # As is usually the case, relation fields get the special treatment. if field.remote_field and isinstance( field.remote_field, models.ManyToManyRel ): value = self._handle_m2m_field_node(field_node, field) if value == base.DEFER_FIELD: deferred_fields[field] = [ [ getInnerText(nat_node).strip() for nat_node in obj_node.getElementsByTagName("natural") ] for obj_node in field_node.getElementsByTagName("object") ] else: m2m_data[field.name] = value elif field.remote_field and isinstance( field.remote_field, models.ManyToOneRel ): value = self._handle_fk_field_node(field_node, field) if value == base.DEFER_FIELD: deferred_fields[field] = [ getInnerText(k).strip() for k in field_node.getElementsByTagName("natural") ] else: data[field.attname] = value else: if field_node.getElementsByTagName("None"): value = None else: value = field.to_python(getInnerText(field_node).strip()) # Load value since JSONField.to_python() outputs strings. if field.get_internal_type() == "JSONField": value = json.loads(value, cls=field.decoder) data[field.name] = value obj = base.build_instance(Model, data, self.db) # Return a DeserializedObject so that the m2m data has a place to live. return base.DeserializedObject(obj, m2m_data, deferred_fields) def _handle_fk_field_node(self, node, field): """ Handle a <field> node for a ForeignKey """ # Check if there is a child node named 'None', returning None if so. if node.getElementsByTagName("None"): return None else: model = field.remote_field.model if hasattr(model._default_manager, "get_by_natural_key"): keys = node.getElementsByTagName("natural") if keys: # If there are 'natural' subelements, it must be a natural key field_value = [getInnerText(k).strip() for k in keys] try: obj = model._default_manager.db_manager( self.db ).get_by_natural_key(*field_value) except ObjectDoesNotExist: if self.handle_forward_references: return base.DEFER_FIELD else: raise obj_pk = getattr(obj, field.remote_field.field_name) # If this is a natural foreign key to an object that # has a FK/O2O as the foreign key, use the FK value if field.remote_field.model._meta.pk.remote_field: obj_pk = obj_pk.pk else: # Otherwise, treat like a normal PK field_value = getInnerText(node).strip() obj_pk = model._meta.get_field( field.remote_field.field_name ).to_python(field_value) return obj_pk else: field_value = getInnerText(node).strip() return model._meta.get_field(field.remote_field.field_name).to_python( field_value ) def _handle_m2m_field_node(self, node, field): """ Handle a <field> node for a ManyToManyField. """ model = field.remote_field.model default_manager = model._default_manager if hasattr(default_manager, "get_by_natural_key"): def m2m_convert(n): keys = n.getElementsByTagName("natural") if keys: # If there are 'natural' subelements, it must be a natural key field_value = [getInnerText(k).strip() for k in keys] obj_pk = ( default_manager.db_manager(self.db) .get_by_natural_key(*field_value) .pk ) else: # Otherwise, treat like a normal PK value. obj_pk = model._meta.pk.to_python(n.getAttribute("pk")) return obj_pk else: def m2m_convert(n): return model._meta.pk.to_python(n.getAttribute("pk")) values = [] try: for c in node.getElementsByTagName("object"): values.append(m2m_convert(c)) except Exception as e: if isinstance(e, ObjectDoesNotExist) and self.handle_forward_references: return base.DEFER_FIELD else: raise base.M2MDeserializationError(e, c) else: return values def _get_model_from_node(self, node, attr): """ Look up a model from a <object model=...> or a <field rel=... to=...> node. """ model_identifier = node.getAttribute(attr) if not model_identifier: raise base.DeserializationError( "<%s> node is missing the required '%s' attribute" % (node.nodeName, attr) ) try: return apps.get_model(model_identifier) except (LookupError, TypeError): raise base.DeserializationError( "<%s> node has invalid model identifier: '%s'" % (node.nodeName, model_identifier) ) def getInnerText(node): """Get all the inner text of a DOM node (recursively).""" # inspired by https://mail.python.org/pipermail/xml-sig/2005-March/011022.html inner_text = [] for child in node.childNodes: if ( child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE ): inner_text.append(child.data) elif child.nodeType == child.ELEMENT_NODE: inner_text.extend(getInnerText(child)) else: pass return "".join(inner_text) # Below code based on Christian Heimes' defusedxml class DefusedExpatParser(_ExpatParser): """ An expat parser hardened against XML bomb attacks. Forbid DTDs, external entity references """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.setFeature(handler.feature_external_ges, False) self.setFeature(handler.feature_external_pes, False) def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): raise DTDForbidden(name, sysid, pubid) def entity_decl( self, name, is_parameter_entity, value, base, sysid, pubid, notation_name ): raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name) def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): # expat 1.2 raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) def external_entity_ref_handler(self, context, base, sysid, pubid): raise ExternalReferenceForbidden(context, base, sysid, pubid) def reset(self): _ExpatParser.reset(self) parser = self._parser parser.StartDoctypeDeclHandler = self.start_doctype_decl parser.EntityDeclHandler = self.entity_decl parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl parser.ExternalEntityRefHandler = self.external_entity_ref_handler class DefusedXmlException(ValueError): """Base exception.""" def __repr__(self): return str(self) class DTDForbidden(DefusedXmlException): """Document type definition is forbidden.""" def __init__(self, name, sysid, pubid): super().__init__() self.name = name self.sysid = sysid self.pubid = pubid def __str__(self): tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})" return tpl.format(self.name, self.sysid, self.pubid) class EntitiesForbidden(DefusedXmlException): """Entity definition is forbidden.""" def __init__(self, name, value, base, sysid, pubid, notation_name): super().__init__() self.name = name self.value = value self.base = base self.sysid = sysid self.pubid = pubid self.notation_name = notation_name def __str__(self): tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})" return tpl.format(self.name, self.sysid, self.pubid) class ExternalReferenceForbidden(DefusedXmlException): """Resolving an external reference is forbidden.""" def __init__(self, context, base, sysid, pubid): super().__init__() self.context = context self.base = base self.sysid = sysid self.pubid = pubid def __str__(self): tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})" return tpl.format(self.sysid, self.pubid)
fa67ccffaf635b5c22b2103c2c5cbbb7c52518124f9d7d86d01fb29004a7002e
from io import BytesIO from django.conf import settings from django.core import signals from django.core.handlers import base from django.http import HttpRequest, QueryDict, parse_cookie from django.urls import set_script_prefix from django.utils.encoding import repercent_broken_unicode from django.utils.functional import cached_property from django.utils.regex_helper import _lazy_re_compile _slashes_re = _lazy_re_compile(rb"/+") class LimitedStream: """Wrap another stream to disallow reading it past a number of bytes.""" def __init__(self, stream, limit): self.stream = stream self.remaining = limit self.buffer = b"" def _read_limited(self, size=None): if size is None or size > self.remaining: size = self.remaining if size == 0: return b"" result = self.stream.read(size) self.remaining -= len(result) return result def read(self, size=None): if size is None: result = self.buffer + self._read_limited() self.buffer = b"" elif size < len(self.buffer): result = self.buffer[:size] self.buffer = self.buffer[size:] else: # size >= len(self.buffer) result = self.buffer + self._read_limited(size - len(self.buffer)) self.buffer = b"" return result def readline(self, size=None): while b"\n" not in self.buffer and (size is None or len(self.buffer) < size): if size: # since size is not None here, len(self.buffer) < size chunk = self._read_limited(size - len(self.buffer)) else: chunk = self._read_limited() if not chunk: break self.buffer += chunk sio = BytesIO(self.buffer) if size: line = sio.readline(size) else: line = sio.readline() self.buffer = sio.read() return line def close(self): pass class WSGIRequest(HttpRequest): non_picklable_attrs = HttpRequest.non_picklable_attrs | frozenset(["environ"]) meta_non_picklable_attrs = frozenset(["wsgi.errors", "wsgi.input"]) def __init__(self, environ): script_name = get_script_name(environ) # If PATH_INFO is empty (e.g. accessing the SCRIPT_NAME URL without a # trailing slash), operate as if '/' was requested. path_info = get_path_info(environ) or "/" self.environ = environ self.path_info = path_info # be careful to only replace the first slash in the path because of # http://test/something and http://test//something being different as # stated in https://www.ietf.org/rfc/rfc2396.txt self.path = "%s/%s" % (script_name.rstrip("/"), path_info.replace("/", "", 1)) self.META = environ self.META["PATH_INFO"] = path_info self.META["SCRIPT_NAME"] = script_name self.method = environ["REQUEST_METHOD"].upper() # Set content_type, content_params, and encoding. self._set_content_type_params(environ) try: content_length = int(environ.get("CONTENT_LENGTH")) except (ValueError, TypeError): content_length = 0 self._stream = LimitedStream(self.environ["wsgi.input"], content_length) self._read_started = False self.resolver_match = None def __getstate__(self): state = super().__getstate__() for attr in self.meta_non_picklable_attrs: if attr in state["META"]: del state["META"][attr] return state def _get_scheme(self): return self.environ.get("wsgi.url_scheme") @cached_property def GET(self): # The WSGI spec says 'QUERY_STRING' may be absent. raw_query_string = get_bytes_from_wsgi(self.environ, "QUERY_STRING", "") return QueryDict(raw_query_string, encoding=self._encoding) def _get_post(self): if not hasattr(self, "_post"): self._load_post_and_files() return self._post def _set_post(self, post): self._post = post @cached_property def COOKIES(self): raw_cookie = get_str_from_wsgi(self.environ, "HTTP_COOKIE", "") return parse_cookie(raw_cookie) @property def FILES(self): if not hasattr(self, "_files"): self._load_post_and_files() return self._files POST = property(_get_post, _set_post) class WSGIHandler(base.BaseHandler): request_class = WSGIRequest def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.load_middleware() def __call__(self, environ, start_response): set_script_prefix(get_script_name(environ)) signals.request_started.send(sender=self.__class__, environ=environ) request = self.request_class(environ) response = self.get_response(request) response._handler_class = self.__class__ status = "%d %s" % (response.status_code, response.reason_phrase) response_headers = [ *response.items(), *(("Set-Cookie", c.output(header="")) for c in response.cookies.values()), ] start_response(status, response_headers) if getattr(response, "file_to_stream", None) is not None and environ.get( "wsgi.file_wrapper" ): # If `wsgi.file_wrapper` is used the WSGI server does not call # .close on the response, but on the file wrapper. Patch it to use # response.close instead which takes care of closing all files. response.file_to_stream.close = response.close response = environ["wsgi.file_wrapper"]( response.file_to_stream, response.block_size ) return response def get_path_info(environ): """Return the HTTP request's PATH_INFO as a string.""" path_info = get_bytes_from_wsgi(environ, "PATH_INFO", "/") return repercent_broken_unicode(path_info).decode() def get_script_name(environ): """ Return the equivalent of the HTTP request's SCRIPT_NAME environment variable. If Apache mod_rewrite is used, return what would have been the script name prior to any rewriting (so it's the script name as seen from the client's perspective), unless the FORCE_SCRIPT_NAME setting is set (to anything). """ if settings.FORCE_SCRIPT_NAME is not None: return settings.FORCE_SCRIPT_NAME # If Apache's mod_rewrite had a whack at the URL, Apache set either # SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any # rewrites. Unfortunately not every web server (lighttpd!) passes this # information through all the time, so FORCE_SCRIPT_NAME, above, is still # needed. script_url = get_bytes_from_wsgi(environ, "SCRIPT_URL", "") or get_bytes_from_wsgi( environ, "REDIRECT_URL", "" ) if script_url: if b"//" in script_url: # mod_wsgi squashes multiple successive slashes in PATH_INFO, # do the same with script_url before manipulating paths (#17133). script_url = _slashes_re.sub(b"/", script_url) path_info = get_bytes_from_wsgi(environ, "PATH_INFO", "") script_name = script_url[: -len(path_info)] if path_info else script_url else: script_name = get_bytes_from_wsgi(environ, "SCRIPT_NAME", "") return script_name.decode() def get_bytes_from_wsgi(environ, key, default): """ Get a value from the WSGI environ dictionary as bytes. key and default should be strings. """ value = environ.get(key, default) # Non-ASCII values in the WSGI environ are arbitrarily decoded with # ISO-8859-1. This is wrong for Django websites where UTF-8 is the default. # Re-encode to recover the original bytestring. return value.encode("iso-8859-1") def get_str_from_wsgi(environ, key, default): """ Get a value from the WSGI environ dictionary as str. key and default should be str objects. """ value = get_bytes_from_wsgi(environ, key, default) return value.decode(errors="replace")
1ace763df1bec3c2abbfac69ac062fb787dc243335109be259c5a1d1a9a55c62
import sys import time from importlib import import_module from django.apps import apps from django.core.management.base import BaseCommand, CommandError, no_translations from django.core.management.sql import emit_post_migrate_signal, emit_pre_migrate_signal from django.db import DEFAULT_DB_ALIAS, connections, router from django.db.migrations.autodetector import MigrationAutodetector from django.db.migrations.executor import MigrationExecutor from django.db.migrations.loader import AmbiguityError from django.db.migrations.state import ModelState, ProjectState from django.utils.module_loading import module_has_submodule from django.utils.text import Truncator class Command(BaseCommand): help = ( "Updates database schema. Manages both apps with migrations and those without." ) requires_system_checks = [] def add_arguments(self, parser): parser.add_argument( "--skip-checks", action="store_true", help="Skip system checks.", ) parser.add_argument( "app_label", nargs="?", help="App label of an application to synchronize the state.", ) parser.add_argument( "migration_name", nargs="?", help="Database state will be brought to the state after that " 'migration. Use the name "zero" to unapply all migrations.', ) parser.add_argument( "--noinput", "--no-input", action="store_false", dest="interactive", help="Tells Django to NOT prompt the user for input of any kind.", ) parser.add_argument( "--database", default=DEFAULT_DB_ALIAS, help=( 'Nominates a database to synchronize. Defaults to the "default" ' "database." ), ) parser.add_argument( "--fake", action="store_true", help="Mark migrations as run without actually running them.", ) parser.add_argument( "--fake-initial", action="store_true", help=( "Detect if tables already exist and fake-apply initial migrations if " "so. Make sure that the current database schema matches your initial " "migration before using this flag. Django will only check for an " "existing table name." ), ) parser.add_argument( "--plan", action="store_true", help="Shows a list of the migration actions that will be performed.", ) parser.add_argument( "--run-syncdb", action="store_true", help="Creates tables for apps without migrations.", ) parser.add_argument( "--check", action="store_true", dest="check_unapplied", help=( "Exits with a non-zero status if unapplied migrations exist and does " "not actually apply migrations." ), ) parser.add_argument( "--prune", action="store_true", dest="prune", help="Delete nonexistent migrations from the django_migrations table.", ) @no_translations def handle(self, *args, **options): database = options["database"] if not options["skip_checks"]: self.check(databases=[database]) self.verbosity = options["verbosity"] self.interactive = options["interactive"] # Import the 'management' module within each installed app, to register # dispatcher events. for app_config in apps.get_app_configs(): if module_has_submodule(app_config.module, "management"): import_module(".management", app_config.name) # Get the database we're operating from connection = connections[database] # Hook for backends needing any database preparation connection.prepare_database() # Work out which apps have migrations and which do not executor = MigrationExecutor(connection, self.migration_progress_callback) # Raise an error if any migrations are applied before their dependencies. executor.loader.check_consistent_history(connection) # Before anything else, see if there's conflicting apps and drop out # hard if there are any conflicts = executor.loader.detect_conflicts() if conflicts: name_str = "; ".join( "%s in %s" % (", ".join(names), app) for app, names in conflicts.items() ) raise CommandError( "Conflicting migrations detected; multiple leaf nodes in the " "migration graph: (%s).\nTo fix them run " "'python manage.py makemigrations --merge'" % name_str ) # If they supplied command line arguments, work out what they mean. run_syncdb = options["run_syncdb"] target_app_labels_only = True if options["app_label"]: # Validate app_label. app_label = options["app_label"] try: apps.get_app_config(app_label) except LookupError as err: raise CommandError(str(err)) if run_syncdb: if app_label in executor.loader.migrated_apps: raise CommandError( "Can't use run_syncdb with app '%s' as it has migrations." % app_label ) elif app_label not in executor.loader.migrated_apps: raise CommandError("App '%s' does not have migrations." % app_label) if options["app_label"] and options["migration_name"]: migration_name = options["migration_name"] if migration_name == "zero": targets = [(app_label, None)] else: try: migration = executor.loader.get_migration_by_prefix( app_label, migration_name ) except AmbiguityError: raise CommandError( "More than one migration matches '%s' in app '%s'. " "Please be more specific." % (migration_name, app_label) ) except KeyError: raise CommandError( "Cannot find a migration matching '%s' from app '%s'." % (migration_name, app_label) ) target = (app_label, migration.name) # Partially applied squashed migrations are not included in the # graph, use the last replacement instead. if ( target not in executor.loader.graph.nodes and target in executor.loader.replacements ): incomplete_migration = executor.loader.replacements[target] target = incomplete_migration.replaces[-1] targets = [target] target_app_labels_only = False elif options["app_label"]: targets = [ key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label ] else: targets = executor.loader.graph.leaf_nodes() if options["prune"]: if not options["app_label"]: raise CommandError( "Migrations can be pruned only when an app is specified." ) if self.verbosity > 0: self.stdout.write("Pruning migrations:", self.style.MIGRATE_HEADING) to_prune = set(executor.loader.applied_migrations) - set( executor.loader.disk_migrations ) squashed_migrations_with_deleted_replaced_migrations = [ migration_key for migration_key, migration_obj in executor.loader.replacements.items() if any(replaced in to_prune for replaced in migration_obj.replaces) ] if squashed_migrations_with_deleted_replaced_migrations: self.stdout.write( self.style.NOTICE( " Cannot use --prune because the following squashed " "migrations have their 'replaces' attributes and may not " "be recorded as applied:" ) ) for migration in squashed_migrations_with_deleted_replaced_migrations: app, name = migration self.stdout.write(f" {app}.{name}") self.stdout.write( self.style.NOTICE( " Re-run 'manage.py migrate' if they are not marked as " "applied, and remove 'replaces' attributes in their " "Migration classes." ) ) else: to_prune = sorted( migration for migration in to_prune if migration[0] == app_label ) if to_prune: for migration in to_prune: app, name = migration if self.verbosity > 0: self.stdout.write( self.style.MIGRATE_LABEL(f" Pruning {app}.{name}"), ending="", ) executor.recorder.record_unapplied(app, name) if self.verbosity > 0: self.stdout.write(self.style.SUCCESS(" OK")) elif self.verbosity > 0: self.stdout.write(" No migrations to prune.") plan = executor.migration_plan(targets) if options["plan"]: self.stdout.write("Planned operations:", self.style.MIGRATE_LABEL) if not plan: self.stdout.write(" No planned migration operations.") else: for migration, backwards in plan: self.stdout.write(str(migration), self.style.MIGRATE_HEADING) for operation in migration.operations: message, is_error = self.describe_operation( operation, backwards ) style = self.style.WARNING if is_error else None self.stdout.write(" " + message, style) if options["check_unapplied"]: sys.exit(1) return if options["check_unapplied"]: if plan: sys.exit(1) return if options["prune"]: return # At this point, ignore run_syncdb if there aren't any apps to sync. run_syncdb = options["run_syncdb"] and executor.loader.unmigrated_apps # Print some useful info if self.verbosity >= 1: self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:")) if run_syncdb: if options["app_label"]: self.stdout.write( self.style.MIGRATE_LABEL( " Synchronize unmigrated app: %s" % app_label ) ) else: self.stdout.write( self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") + (", ".join(sorted(executor.loader.unmigrated_apps))) ) if target_app_labels_only: self.stdout.write( self.style.MIGRATE_LABEL(" Apply all migrations: ") + (", ".join(sorted({a for a, n in targets})) or "(none)") ) else: if targets[0][1] is None: self.stdout.write( self.style.MIGRATE_LABEL(" Unapply all migrations: ") + str(targets[0][0]) ) else: self.stdout.write( self.style.MIGRATE_LABEL(" Target specific migration: ") + "%s, from %s" % (targets[0][1], targets[0][0]) ) pre_migrate_state = executor._create_project_state(with_applied_migrations=True) pre_migrate_apps = pre_migrate_state.apps emit_pre_migrate_signal( self.verbosity, self.interactive, connection.alias, stdout=self.stdout, apps=pre_migrate_apps, plan=plan, ) # Run the syncdb phase. if run_syncdb: if self.verbosity >= 1: self.stdout.write( self.style.MIGRATE_HEADING("Synchronizing apps without migrations:") ) if options["app_label"]: self.sync_apps(connection, [app_label]) else: self.sync_apps(connection, executor.loader.unmigrated_apps) # Migrate! if self.verbosity >= 1: self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:")) if not plan: if self.verbosity >= 1: self.stdout.write(" No migrations to apply.") # If there's changes that aren't in migrations yet, tell them # how to fix it. autodetector = MigrationAutodetector( executor.loader.project_state(), ProjectState.from_apps(apps), ) changes = autodetector.changes(graph=executor.loader.graph) if changes: self.stdout.write( self.style.NOTICE( " Your models in app(s): %s have changes that are not " "yet reflected in a migration, and so won't be " "applied." % ", ".join(repr(app) for app in sorted(changes)) ) ) self.stdout.write( self.style.NOTICE( " Run 'manage.py makemigrations' to make new " "migrations, and then re-run 'manage.py migrate' to " "apply them." ) ) fake = False fake_initial = False else: fake = options["fake"] fake_initial = options["fake_initial"] post_migrate_state = executor.migrate( targets, plan=plan, state=pre_migrate_state.clone(), fake=fake, fake_initial=fake_initial, ) # post_migrate signals have access to all models. Ensure that all models # are reloaded in case any are delayed. post_migrate_state.clear_delayed_apps_cache() post_migrate_apps = post_migrate_state.apps # Re-render models of real apps to include relationships now that # we've got a final state. This wouldn't be necessary if real apps # models were rendered with relationships in the first place. with post_migrate_apps.bulk_update(): model_keys = [] for model_state in post_migrate_apps.real_models: model_key = model_state.app_label, model_state.name_lower model_keys.append(model_key) post_migrate_apps.unregister_model(*model_key) post_migrate_apps.render_multiple( [ModelState.from_model(apps.get_model(*model)) for model in model_keys] ) # Send the post_migrate signal, so individual apps can do whatever they need # to do at this point. emit_post_migrate_signal( self.verbosity, self.interactive, connection.alias, stdout=self.stdout, apps=post_migrate_apps, plan=plan, ) def migration_progress_callback(self, action, migration=None, fake=False): if self.verbosity >= 1: compute_time = self.verbosity > 1 if action == "apply_start": if compute_time: self.start = time.monotonic() self.stdout.write(" Applying %s..." % migration, ending="") self.stdout.flush() elif action == "apply_success": elapsed = ( " (%.3fs)" % (time.monotonic() - self.start) if compute_time else "" ) if fake: self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed)) else: self.stdout.write(self.style.SUCCESS(" OK" + elapsed)) elif action == "unapply_start": if compute_time: self.start = time.monotonic() self.stdout.write(" Unapplying %s..." % migration, ending="") self.stdout.flush() elif action == "unapply_success": elapsed = ( " (%.3fs)" % (time.monotonic() - self.start) if compute_time else "" ) if fake: self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed)) else: self.stdout.write(self.style.SUCCESS(" OK" + elapsed)) elif action == "render_start": if compute_time: self.start = time.monotonic() self.stdout.write(" Rendering model states...", ending="") self.stdout.flush() elif action == "render_success": elapsed = ( " (%.3fs)" % (time.monotonic() - self.start) if compute_time else "" ) self.stdout.write(self.style.SUCCESS(" DONE" + elapsed)) def sync_apps(self, connection, app_labels): """Run the old syncdb-style operation on a list of app_labels.""" with connection.cursor() as cursor: tables = connection.introspection.table_names(cursor) # Build the manifest of apps and models that are to be synchronized. all_models = [ ( app_config.label, router.get_migratable_models( app_config, connection.alias, include_auto_created=False ), ) for app_config in apps.get_app_configs() if app_config.models_module is not None and app_config.label in app_labels ] def model_installed(model): opts = model._meta converter = connection.introspection.identifier_converter return not ( (converter(opts.db_table) in tables) or ( opts.auto_created and converter(opts.auto_created._meta.db_table) in tables ) ) manifest = { app_name: list(filter(model_installed, model_list)) for app_name, model_list in all_models } # Create the tables for each model if self.verbosity >= 1: self.stdout.write(" Creating tables...") with connection.schema_editor() as editor: for app_name, model_list in manifest.items(): for model in model_list: # Never install unmanaged models, etc. if not model._meta.can_migrate(connection): continue if self.verbosity >= 3: self.stdout.write( " Processing %s.%s model" % (app_name, model._meta.object_name) ) if self.verbosity >= 1: self.stdout.write( " Creating table %s" % model._meta.db_table ) editor.create_model(model) # Deferred SQL is executed when exiting the editor's context. if self.verbosity >= 1: self.stdout.write(" Running deferred SQL...") @staticmethod def describe_operation(operation, backwards): """Return a string that describes a migration operation for --plan.""" prefix = "" is_error = False if hasattr(operation, "code"): code = operation.reverse_code if backwards else operation.code action = (code.__doc__ or "") if code else None elif hasattr(operation, "sql"): action = operation.reverse_sql if backwards else operation.sql else: action = "" if backwards: prefix = "Undo " if action is not None: action = str(action).replace("\n", "") elif backwards: action = "IRREVERSIBLE" is_error = True if action: action = " -> " + action truncated = Truncator(action) return prefix + operation.describe() + truncated.chars(40), is_error